├── net ├── __init__.py └── model.py ├── .gitignore ├── resultOfCycleGAN.png ├── resultOfUNet-VGG.png ├── data ├── src │ ├── 1004_966_512_2_1_0_0_.png │ ├── 1007_821_512_2_1_0_0_.png │ ├── 1008_784_549_2_2_0_0_.png │ ├── 1008_784_549_2_2_0_1_.png │ ├── 1008_784_549_2_2_1_0_.png │ ├── 1008_784_549_2_2_1_1_.png │ ├── 1009_798_540_2_2_0_0_.png │ └── 1009_798_540_2_2_0_1_.png ├── target │ ├── 1004_966_512_2_1_0_0_.png │ ├── 1007_821_512_2_1_0_0_.png │ ├── 1008_784_549_2_2_0_0_.png │ ├── 1008_784_549_2_2_0_1_.png │ ├── 1008_784_549_2_2_1_0_.png │ ├── 1008_784_549_2_2_1_1_.png │ ├── 1009_798_540_2_2_0_0_.png │ └── 1009_798_540_2_2_0_1_.png ├── renameCutedImageOfSrcAndTarget.py ├── verifySizeInSrcAndTargetWhereNameIsSame.py └── cutImageToEqualSize.py ├── README.md ├── utils └── dataset.py └── train.py /net/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import VGG16UNet 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | __pycache__/ 3 | net/__pycache__/ 4 | utils/__pycache__/ -------------------------------------------------------------------------------- /resultOfCycleGAN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/resultOfCycleGAN.png -------------------------------------------------------------------------------- /resultOfUNet-VGG.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/resultOfUNet-VGG.png -------------------------------------------------------------------------------- /data/src/1004_966_512_2_1_0_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/src/1004_966_512_2_1_0_0_.png -------------------------------------------------------------------------------- /data/src/1007_821_512_2_1_0_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/src/1007_821_512_2_1_0_0_.png -------------------------------------------------------------------------------- /data/src/1008_784_549_2_2_0_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/src/1008_784_549_2_2_0_0_.png -------------------------------------------------------------------------------- /data/src/1008_784_549_2_2_0_1_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/src/1008_784_549_2_2_0_1_.png -------------------------------------------------------------------------------- /data/src/1008_784_549_2_2_1_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/src/1008_784_549_2_2_1_0_.png -------------------------------------------------------------------------------- /data/src/1008_784_549_2_2_1_1_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/src/1008_784_549_2_2_1_1_.png -------------------------------------------------------------------------------- /data/src/1009_798_540_2_2_0_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/src/1009_798_540_2_2_0_0_.png -------------------------------------------------------------------------------- /data/src/1009_798_540_2_2_0_1_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/src/1009_798_540_2_2_0_1_.png -------------------------------------------------------------------------------- /data/target/1004_966_512_2_1_0_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/target/1004_966_512_2_1_0_0_.png -------------------------------------------------------------------------------- /data/target/1007_821_512_2_1_0_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/target/1007_821_512_2_1_0_0_.png -------------------------------------------------------------------------------- /data/target/1008_784_549_2_2_0_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/target/1008_784_549_2_2_0_0_.png -------------------------------------------------------------------------------- /data/target/1008_784_549_2_2_0_1_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/target/1008_784_549_2_2_0_1_.png -------------------------------------------------------------------------------- /data/target/1008_784_549_2_2_1_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/target/1008_784_549_2_2_1_0_.png -------------------------------------------------------------------------------- /data/target/1008_784_549_2_2_1_1_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/target/1008_784_549_2_2_1_1_.png -------------------------------------------------------------------------------- /data/target/1009_798_540_2_2_0_0_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/target/1009_798_540_2_2_0_0_.png -------------------------------------------------------------------------------- /data/target/1009_798_540_2_2_0_1_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gvfjyy/imageDenoise-UNet-VGG/HEAD/data/target/1009_798_540_2_2_0_1_.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # imageDenoiseUsingUNet-VGG 2 | 拓片图像去噪网络,以UNet为基本框架,编码器基于VGG16 3 | 4 | 5 | ## 环境: 6 | Python 3.5.3
7 | Pytorch 1.2.0
8 | 9 | 10 | ## 使用方法: 11 | 1、准备成对的数据集,以相同的名字分别放置在data/src和data/target文件夹下
12 | 2、如果图像的尺寸过大&&电脑内存不充足,可利用data/文件夹下的脚本进行剪切处理
13 | 3、使用train.py进行训练
14 | 15 | 16 | ## 实验结果: 17 | 数据集:甲骨拓片数据集。
18 | 对比网络:CycleGAN
19 | UNet-VGG的效果优于CycleGAN,可以看出CycleGAN对于字体有弱化现象,且对于细节的处理没有UNet-VGG好。
20 | ### CycleGAN处理效果 21 | ![image](https://github.com/libai-github/imageDenoiseUsingUNet-VGG/blob/master/resultOfCycleGAN.png) 22 | ### UNet-VGG处理效果 23 | ![image](https://github.com/libai-github/imageDenoiseUsingUNet-VGG/blob/master/resultOfUNet-VGG.png) 24 | 25 | -------------------------------------------------------------------------------- /data/renameCutedImageOfSrcAndTarget.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | 4 | if __name__ == "__main__": 5 | a='src' 6 | b='target' 7 | id=1000 8 | for i in os.listdir(a): 9 | img=Image.open(os.path.join(a,i)) 10 | img.convert('L') 11 | os.remove(os.path.join(a,i)) 12 | img.save(a+'/'+str(id)+'.png') 13 | 14 | img=Image.open(os.path.join(b,i)) 15 | img.convert('L') 16 | os.remove(os.path.join(b,i)) 17 | img.save(b+'/'+str(id)+'.png') 18 | 19 | 20 | print(id) 21 | id+=1 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /data/verifySizeInSrcAndTargetWhereNameIsSame.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | import numpy as np 4 | import shutil 5 | 6 | if __name__ == "__main__": 7 | 8 | a='src' 9 | b='target' 10 | 11 | for i in os.listdir(a): 12 | imga=Image.open(os.path.join(a,i)) 13 | imgb=Image.open(os.path.join(b,i)) 14 | if imga.size!=imgb.size: 15 | print(imga.size,imgb.size) 16 | #imgb=imgb.resize(imga.size) 17 | #imgb.save(os.path.join(b,i)) 18 | #os.remove(os.path.join(a,i)) 19 | #os.remove(os.path.join(b,i)) 20 | #shutil.move(os.path.join(a,i), os.path.join(c,i)) 21 | #shutil.move(os.path.join(b,i), os.path.join(d,i)) 22 | print(i) 23 | ''' 24 | 25 | a='src' 26 | b='target' 27 | num=0 28 | for i in os.listdir(a): 29 | 30 | imga=Image.open(os.path.join(a,i)) 31 | imgb=Image.open(os.path.join(b,i)) 32 | if imga.size[0]<300 or imga.size[1]<300: 33 | print(imga.size,imgb.size) 34 | print(i) 35 | num+=1 36 | print(num) 37 | 38 | ''' 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /data/cutImageToEqualSize.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | import numpy as np 4 | import math 5 | 6 | if __name__ == "__main__": 7 | root='src' 8 | size=512 9 | for i in os.listdir(root): 10 | path=os.path.join(root,i) 11 | img=Image.open(path).convert('L') 12 | if img.size[0]<512: 13 | a,b=img.size 14 | img=img.resize((512,int(b*(512/a)))) 15 | print('chuxian') 16 | if img.size[1]<512: 17 | a,b=img.size 18 | img=img.resize((int(a*(512/b)),512)) 19 | print('chuxian') 20 | img=np.array(img) 21 | print(img.shape) 22 | 23 | h,w=img.shape 24 | y_max=math.ceil(h/size) 25 | x_max=math.ceil(w/size) 26 | for y in range(y_max): 27 | for x in range(x_max): 28 | if (y+1==y_max) and (x+114:#在此之前为VGG16参数,不用再初始化 134 | torch.nn.init.xavier_normal_(m.weight.data)#Xavier 正态分布 135 | if m.bias is not None: 136 | m.bias.data.zero_() 137 | elif isinstance(m,nn.ReLU): 138 | continue 139 | elif isinstance(m,nn.MaxPool2d): 140 | continue 141 | elif isinstance(m,nn.ConvTranspose2d): 142 | torch.nn.init.xavier_normal_(m.weight.data)#Xavier 正态分布 143 | if m.bias is not None: 144 | m.bias.data.zero_() 145 | num+=1 146 | 147 | #加载上次训练的参数 148 | net.load_state_dict(torch.load('Checkpoint_epoch100l1.pth')) 149 | 150 | 151 | #加载数据到CPU/GPU 152 | net.to(device=device,dtype=torch.float32) 153 | #summary(net, (1, 224, 224)) 154 | 155 | try: 156 | train_net(net=net, 157 | epochs=args.epochs, 158 | batch_size=args.batchsize, 159 | lr=args.lr, 160 | device=device, 161 | img_scale=args.scale, 162 | out_channel=args.out_channel 163 | ) 164 | except KeyboardInterrupt: 165 | torch.save(net.state_dict(), 'INTERRUPTED.pth') 166 | print('Saved interrupt') 167 | try: 168 | sys.exit(0) 169 | except SystemExit: 170 | os._exit(0) 171 | --------------------------------------------------------------------------------