├── README.md ├── creat_pathtxt.py ├── sort.py ├── ssim.py └── train.py /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### 1.introduction 4 | 5 | Based on the convolutional recurrent neural network, make an image sequence prediction. For example, input 16 consecutive image frames, the image size (3,128,128). Encoder feature extraction is performed on each consecutive 16 images using a convolutional network, and then the extracted feature sequence is input into the recurrent neural network (LSTM), and then the decoder is deconvolved into troch (3,128,128) of the original image size. It can also be regarded as the 17th frame image generated based on the first 16 frames, and the 17th frame image of the original sequence is used as the label to calculate the loss.... 6 | 7 | ### 2.data 8 | 9 | Sort the data in the following way: 10 | 11 | 12 | 13 | 14 | 15 | Use creat_pathtxt.py to save the image path into a txt file for easy extraction during training. A sequence extracted from each line in txt. During training, each line of pictures is iterated in a loop, encoded and sent to LSTM training. The generated txt file is as follows: 16 | 17 | ![](https://raw.githubusercontent.com/wangyifan2018/cloudimg/master/data20200806181105.png) 18 | 19 | The parameter seqsize can be changed in creat_pathtxt.py, the last picture of each sequence is the label by default, and the loss is calculated. 20 | 21 | ### 3.train 22 | 23 | Set the training parameters in train.py, the main parameters are described as follows: 24 | 25 | BATCH_SIZE: the number of pictures in each batch of training; 26 | 27 | SEQ_SIZE: The number of pictures used for prediction in the sequence, which is 1 less than the number of pictures in each line of txt (note); 28 | 29 | learning_rate: Trying this parameter several times works well; 30 | 31 | epochs: The number of iterations, one hundred is enough. 32 | 33 | Using GeForce RTX 2060 to train 100 rounds of 2000 images, it takes about 20 minutes. 34 | 35 | 36 | 37 | ### 4.prediction 38 | 39 | Run train.py, the decoded pictures and original pictures will be saved every 5 epochs of training, and stored in the folder conv_autoencoder. The following is a set of decoded pictures and original pictures: 40 | 41 | 42 | 43 | ![](https://raw.githubusercontent.com/wangyifan2018/cloudimg/master/data20200806181651.png) 44 | 45 | decode_image 46 | 47 | ![](https://raw.githubusercontent.com/wangyifan2018/cloudimg/master/data20200806181727.png) 48 | 49 | raw_image 50 | -------------------------------------------------------------------------------- /creat_pathtxt.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | dir='./imgsrc' 4 | fp = open('./img_path.txt','w+') 5 | imgfile_list = os.listdir('./imgsrc') 6 | imgfile_list.sort(key= lambda x:(x[:])) 7 | #print(img_list) 8 | seqsize = 8 9 | skip = 2 ################ 等间隔循环 10 | for imgfile in imgfile_list: 11 | filepath = os.path.join(dir,imgfile) 12 | img_list = os.listdir(filepath) 13 | img_list.sort(key=lambda x: (x[:-4])) 14 | #滑窗取序列,步长为8 15 | for i in range(0, len(img_list)-seqsize, 8): 16 | for j in range(i, i+seqsize): 17 | img = img_list[j] 18 | path = os.path.join(filepath, img) 19 | if j == i+seqsize-1: 20 | fp.write(path+'\n') 21 | #elif j != (i + seqsize - skip): #将label跳过skip wyf 22 | else: 23 | fp.write(path+' ') 24 | 25 | fp.close() -------------------------------------------------------------------------------- /sort.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | 3 | import os 4 | import shutil 5 | 6 | class BatchRename(): 7 | ''' 8 | 批量重命名文件夹中的图片文件 9 | ''' 10 | def __init__(self): 11 | self.path = './imgsrc/01' 12 | self.newpath = './new_pic' 13 | 14 | def tiqu(self): 15 | filelist = os.listdir(self.path) 16 | # print(filelist) 17 | # total_num = len(filelist) 18 | for file in filelist: 19 | # print file 20 | filedir = os.path.join(self.path, file) 21 | # print filedir 22 | (shotname, extension) = os.path.splitext(file) 23 | # a = os.path.splitext(file) 24 | # b = shotname.split('.')[0] 25 | b = shotname 26 | # print(shotname) 27 | 28 | if (int(b) % 10 == 1): 29 | print (shotname) 30 | # print(str(filedir)) 31 | shutil.copy(str(filedir), os.path.join(self.newpath, b+'.png')) 32 | 33 | if __name__ == '__main__': 34 | demo = BatchRename() 35 | demo.tiqu() 36 | 37 | -------------------------------------------------------------------------------- /ssim.py: -------------------------------------------------------------------------------- 1 | from skimage.measure import compare_ssim 2 | from scipy.misc import imread 3 | import numpy as np 4 | 5 | img1 = imread('decode_image_100.png') 6 | img2 = imread('raw_image_100.png') 7 | 8 | img2 = np.resize(img2, (img1.shape[0], img1.shape[1], img1.shape[2])) 9 | 10 | print(img2.shape) 11 | print(img1.shape) 12 | ssim = compare_ssim(img1, img2, multichannel=True) 13 | 14 | print(ssim) -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import torch.nn as nn 4 | import numpy as np 5 | import torch.optim as optim 6 | from PIL import Image 7 | from torch.utils.data import Dataset, DataLoader 8 | from torchvision import transforms 9 | from torch.autograd import Variable 10 | from torchvision.utils import save_image 11 | 12 | BATCH_SIZE = 8 13 | SEQ_SIZE = 7 14 | learning_rate = 0.0001 15 | epochs = 100 16 | PATH_SAVE = './model/lstm_model.t7' 17 | os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" 18 | 19 | transform_list = [ 20 | transforms.ToTensor() 21 | ] 22 | 23 | data_transforms = transforms.Compose( transform_list ) 24 | 25 | 26 | def default_loader(path): 27 | return Image.open(path).convert('RGB') 28 | 29 | def to_img(x): 30 | x = 0.5 * (x + 1.) # 将-1~1转成0-1 31 | x = x.clamp(0, 1) 32 | x = x.view(x.shape[0], 3, 128, 128) 33 | return x 34 | 35 | class SeqDataset(Dataset): 36 | def __init__(self, txt, transform=None, target_transform=None, loader=default_loader): 37 | fh = open(txt, 'r') 38 | imgseqs = [] 39 | for line in fh: 40 | line = line.strip('\n') 41 | line = line.rstrip() 42 | imgseqs.append(line) 43 | self.num_samples = len(imgseqs) 44 | self.imgseqs = imgseqs 45 | self.transform = transform 46 | self.target_transform = target_transform 47 | self.loader = loader 48 | 49 | def __getitem__(self, index): 50 | current_index = np.random.choice(range(0, self.num_samples)) 51 | imgs_path = self.imgseqs[current_index].split() 52 | current_imgs = [] 53 | current_imgs_path = imgs_path[:len(imgs_path)-1] 54 | current_label_path = imgs_path[len(imgs_path)-1] 55 | current_label = self.loader(current_label_path) 56 | 57 | 58 | 59 | for frame in current_imgs_path: 60 | img = self.loader(frame) 61 | if self.transform is not None: 62 | img = self.transform(img) 63 | current_imgs.append(img) 64 | current_label = self.transform(current_label) 65 | #print(current_label.shape) 66 | batch_cur_imgs = np.stack(current_imgs, axis=0) 67 | return batch_cur_imgs, current_label 68 | 69 | def __len__(self): 70 | return len(self.imgseqs) 71 | 72 | class EncoderMUG2d_LSTM(nn.Module): 73 | def __init__(self, input_nc=3, encode_dim=1024, lstm_hidden_size=1024, seq_len=SEQ_SIZE, num_lstm_layers=1, bidirectional=False): 74 | super(EncoderMUG2d_LSTM, self).__init__() 75 | self.seq_len = seq_len 76 | self.num_directions = 2 if bidirectional else 1 77 | self.num_lstm_layers = num_lstm_layers 78 | self.lstm_hidden_size = lstm_hidden_size 79 | #3*128*128 80 | self.encoder = nn.Sequential( 81 | nn.Conv2d(input_nc, 32, 4,2,1), # 32*64*64 82 | nn.BatchNorm2d(32), 83 | nn.LeakyReLU(0.2, inplace=True), 84 | #32*63*63 85 | nn.Conv2d(32, 64, 4, 2, 1), # 64*32*32 86 | nn.BatchNorm2d(64), 87 | nn.LeakyReLU(0.2, inplace=True), 88 | #64*31*31 89 | nn.Conv2d(64, 128, 4, 2, 1), # 128*16*16 90 | nn.BatchNorm2d(128), 91 | nn.LeakyReLU(0.2, inplace=True), 92 | 93 | nn.Conv2d(128, 256, 4, 2, 1), # 256*8*8 94 | nn.BatchNorm2d(256), 95 | nn.LeakyReLU(0.2, inplace=True), 96 | 97 | nn.Conv2d(256, 512, 4, 2, 1), # 512*4*4 98 | nn.BatchNorm2d(512), 99 | nn.LeakyReLU(0.2, inplace=True), 100 | 101 | nn.Conv2d(512, 512, 4, 2, 1), # 512*2*2 102 | nn.BatchNorm2d(512), 103 | nn.LeakyReLU(0.2, inplace=True), 104 | 105 | nn.Conv2d(512, 1024, 4, 2, 1), # 1024*1*1 106 | nn.BatchNorm2d(1024), 107 | nn.LeakyReLU(0.2, inplace=True), 108 | ) 109 | 110 | self.fc = nn.Linear(1024, encode_dim) 111 | self.lstm = nn.LSTM(encode_dim, encode_dim, batch_first=True) 112 | 113 | def init_hidden(self, x): 114 | batch_size = x.size(0) 115 | h = x.data.new( 116 | self.num_directions * self.num_lstm_layers, batch_size, self.lstm_hidden_size).zero_() 117 | c = x.data.new( 118 | self.num_directions * self.num_lstm_layers, batch_size, self.lstm_hidden_size).zero_() 119 | return Variable(h), Variable(c) 120 | 121 | 122 | def forward(self, x): 123 | #x.shape [batchsize,seqsize,3,128,128] 124 | B = x.size(0) 125 | x = x.view(B * SEQ_SIZE, 3, 128, 128) #x.shape[batchsize*seqsize,3,128,128] 126 | # [batchsize*seqsize, 3, 128, 128] -> [batchsize*seqsize, 1024,1,1] 127 | x = self.encoder(x) 128 | #[batchsize * seqsize, 1024, 1, 1]-> [batchsize*seqsize, 1024] 129 | x = x.view(-1, 1024) 130 | # [batchsize * seqsize, 1024] 131 | x = self.fc(x) 132 | # [batchsize , seqsize ,1024] 133 | x = x.view(-1, SEQ_SIZE, x.size(1)) 134 | h0, c0 = self.init_hidden(x) 135 | output, (hn,cn) = self.lstm(x,(h0,c0)) 136 | return hn 137 | 138 | class DecoderMUG2d(nn.Module): 139 | def __init__(self, output_nc=3, encode_dim=1024): #output size: 64x64 140 | super(DecoderMUG2d, self).__init__() 141 | 142 | self.project = nn.Sequential( 143 | nn.Linear(encode_dim, 1024*1*1), 144 | nn.ReLU(inplace=True) 145 | ) 146 | self.decoder = nn.Sequential( 147 | nn.ConvTranspose2d(1024, 512, 4), # 512*4*4 148 | nn.BatchNorm2d(512), 149 | nn.ReLU(True), 150 | 151 | nn.ConvTranspose2d(512, 256, 4, stride=2), # 256*10*10 152 | nn.BatchNorm2d(256), 153 | nn.ReLU(True), 154 | 155 | nn.ConvTranspose2d(256, 128, 4), # 128*13*13 156 | nn.BatchNorm2d(128), 157 | nn.ReLU(True), 158 | 159 | nn.ConvTranspose2d(128, 64, 4,stride=2), # 64*28*28 160 | nn.BatchNorm2d(64), 161 | nn.ReLU(True), 162 | 163 | nn.ConvTranspose2d(64, 32, 4), # 32*31*31 164 | nn.BatchNorm2d(32), 165 | nn.ReLU(True), 166 | 167 | nn.ConvTranspose2d(32, 16, 4,stride=2), # 16*64*64 168 | nn.BatchNorm2d(16), 169 | nn.ReLU(True), 170 | 171 | nn.ConvTranspose2d(16, output_nc, 4, stride=2, padding=1), # 3*128*128 172 | nn.Sigmoid(), 173 | ) 174 | def forward(self, x): 175 | x = self.project(x) 176 | x = x.view(-1, 1024, 1, 1) 177 | decode = self.decoder(x) 178 | return decode 179 | 180 | class net(nn.Module): 181 | def __init__(self): 182 | super(net,self).__init__() 183 | self.n1 = EncoderMUG2d_LSTM() 184 | self.n2 = DecoderMUG2d() 185 | 186 | def forward(self, x): 187 | output = self.n1(x) 188 | output = self.n2(output) #B*3*128*128 189 | return output 190 | 191 | 192 | if __name__ == '__main__': 193 | train_data = SeqDataset(txt='./img_path.txt',transform=data_transforms) 194 | train_loader = DataLoader(train_data, shuffle=True, num_workers=0,batch_size=BATCH_SIZE) 195 | 196 | 197 | model = net() 198 | if torch.cuda.is_available(): 199 | model.cuda() 200 | optimizer = optim.Adam(model.parameters(), lr=learning_rate) 201 | loss_func = nn.MSELoss() 202 | 203 | inputs, label = next(iter(train_loader)) ######## 取label 迭代 204 | 205 | for epoch in range(epochs): 206 | print('epoch {}'.format(epoch + 1)) 207 | train_loss = 0. 208 | train_acc = 0. 209 | for batch_x, batch_y in train_loader: 210 | inputs, label = Variable(batch_x).cuda(), Variable(batch_y).cuda() 211 | output = model(inputs) 212 | loss = loss_func(output, label)/label.shape[0] 213 | optimizer.zero_grad() 214 | loss.backward() 215 | optimizer.step() 216 | 217 | print('epoch: {}, Loss: {:.4f}'.format(epoch + 1, loss.data.cpu().numpy())) 218 | 219 | if (epoch + 1) % 5 == 0: # 每 5 次,保存一下解码的图片和原图片 220 | pic = to_img(output.cpu().data) 221 | img = to_img(label.cpu().data) 222 | if not os.path.exists('./conv_autoencoder'): 223 | os.mkdir('./conv_autoencoder') 224 | save_image(pic, './conv_autoencoder/decode_image_{}.png'.format(epoch + 1)) 225 | save_image(img, './conv_autoencoder/raw_image_{}.png'.format(epoch + 1)) 226 | 227 | torch.save(model.state_dict(), PATH_SAVE) 228 | --------------------------------------------------------------------------------