├── images ├── arch2.png ├── steg_loss.png └── architecture1.png ├── README.md ├── trainer.py └── model.py /images/arch2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishvishal/DeepSteganography/HEAD/images/arch2.png -------------------------------------------------------------------------------- /images/steg_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishvishal/DeepSteganography/HEAD/images/steg_loss.png -------------------------------------------------------------------------------- /images/architecture1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishvishal/DeepSteganography/HEAD/images/architecture1.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepSteganography 2 | Hiding Images in Plain Sight: Deep Steganography 3 | 4 | ### Unofficial implementation of NIPS 2017 paper: Hiding Images in Plain Sight: Deep Steganography in Pytorch. 5 | 6 | [link to paper](https://papers.nips.cc/paper/6802-hiding-images-in-plain-sight-deep-steganography) 7 | 8 | 9 | ### Abstract 10 | 11 | > Steganography is the practice of concealing a secret message within another, 12 | > ordinary, message. Commonly, steganography is used to unobtrusively hide a small 13 | > message within the noisy regions of a larger image. In this study, we attempt 14 | > to place a full size color image within another image of the same size. Deep 15 | > neural networks are simultaneously trained to create the hiding and revealing 16 | > processes and are designed to specifically work as a pair. The system is trained on 17 | > images drawn randomly from the ImageNet database, and works well on natural 18 | > images from a wide variety of sources. Beyond demonstrating the successful 19 | > application of deep learning to hiding images, we carefully examine how the result 20 | > is achieved and explore extensions. Unlike many popular steganographic methods 21 | > that encode the secret message within the least significant bits of the carrier image, 22 | > our approach compresses and distributes the secret image’s representation across 23 | > all of the available bits. 24 | 25 | ### Architecture 26 | 27 | The architecture consists of a preparation network, hiding network and reveal network. 28 | 29 | ![Architecture](https://github.com/krishnavishalv/DeepSteganography/blob/master/images/architecture1.png) 30 | 31 | ### Error equation 32 | 33 | ![Error](https://github.com/krishnavishalv/DeepSteganography/blob/master/images/arch2.png) 34 | 35 | 36 | ![Error equation](https://github.com/krishnavishalv/DeepSteganography/blob/master/images/steg_loss.png) 37 | 38 | 39 | The first error terms backprops only through preparation network and hiding network and the second error backprops through all networks. 40 | -------------------------------------------------------------------------------- /trainer.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import torch 4 | from torch.autograd import Variable 5 | from torch import utils 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | import torch.optim as optim 9 | import os 10 | from torchvision import datasets, utils 11 | import torchvision.transforms as transforms 12 | 13 | from model import StegNet 14 | 15 | TRAIN_PATH = '/home/vkv/Downloads/tiny_imagenet/train' 16 | TEST_PATH = 'home/vkv/Downloads/test' 17 | MODEL_PATH = 'home/vkv/Downloads/DeepSteg/checkpoint' 18 | epochs = 100 19 | 20 | def denormalize(image, std, mean): 21 | for t in range(3): 22 | image[t, :, :] = (image[t, :, :] * std[t]) + mean[t] 23 | return image 24 | 25 | def steg_loss(S_prime, C_prime, S, C, beta): 26 | loss_cover = F.mse_loss(C_prime, C) 27 | loss_secret = F.mse_loss(S_prime, S) 28 | loss = loss_cover + B*loss_secret 29 | return loss, loss_cover, loss_secret 30 | 31 | train_loader = torch.utils.DataLoader( 32 | datasets.ImageFolder( 33 | TRAIN_PATH, 34 | transforms.Compose([ 35 | transforms.Scale(256), 36 | transforms.RandomCrop(224), 37 | transforms.ToTensor(), 38 | transforms.Normalize(mean=mean, std=std)])), 39 | batch_size=10, pin_memory=True, num_workers=1, 40 | shuffle=True, drop_last=True) 41 | 42 | test_loader = torch.utils.DataLoader( 43 | datasets.ImageFolder( 44 | TEST_PATH, 45 | transforms.Compose([ 46 | transforms.Scale(256), 47 | transforms.RandomCrop(224), 48 | transforms.ToTensor(), 49 | transforms.Normalize(mean=mean, std=std)])), 50 | batch_size=5, pin_memory=True, num_workers=1, 51 | shuffle=True, drop_last=True) 52 | 53 | model = StegNet() 54 | 55 | def train(train_loader, beta, lr): 56 | optimizer = optim.Adam(model.parameters(), lr=learning_rate) 57 | 58 | losses = [] 59 | 60 | for epoch in range(epochs): 61 | model.Train() 62 | train_loss = [] 63 | 64 | for i, data in enumerate(train_loader): 65 | 66 | images, _ = data 67 | 68 | covers = images[:len(images)//2] 69 | secrets = images[len(images)//2:] 70 | covers = Variable(covers, requires_grad=False) 71 | secrets = Variable(secrets, requires_grad=False) 72 | 73 | optimizer.zero_grad() 74 | hidden, output = model(secrets, covers) 75 | 76 | loss, loss_cover, loss_secret = steg_loss(output, hidden, secrets, covers, beta) 77 | loss.backward() 78 | optimizer.step() 79 | 80 | train_loss.append(loss.data[0]) 81 | losses.append(loss.data[0]) 82 | 83 | torch.save(model.state_dict(), MODEL_PATH+'.pkl') 84 | avg_train_loss = np.mean(train_loss) 85 | print('Train Loss {1:.4f}, cover_error {2:.4f}, secret_error{3:.4f}'. format(loss.data[0], loss_cover.data[0], loss_secret.data[0])) 86 | print ('Epoch [{0}/{1}], Average_loss: {2:.4f}'.format( 87 | epoch+1, epochs, avg_train_loss)) 88 | 89 | return model, avg_train_loss, losses 90 | 91 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import utils 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | 7 | def gaussian_noise(tensor, mean=0, stddev=0.1): 8 | noise = torch.nn.init.normal(torch.Tensor(tensor.size()), 0, 0.1) 9 | return Variable(tensor + noise) 10 | 11 | 12 | class PrepNet(nn.Module): 13 | def __init__(self): 14 | super().__init__() 15 | 16 | self.p1 = nn.Sequential( 17 | nn.Conv2d(3, 50, kernel_size=3, padding=1), 18 | nn.ReLU(), 19 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 20 | nn.ReLU(), 21 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 22 | nn.ReLU(), 23 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 24 | nn.ReLU()) 25 | 26 | self.p2 = nn.Sequential( 27 | nn.Conv2d(3, 50, kernel_size=4, padding=1), 28 | nn.ReLU(), 29 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 30 | nn.ReLU(), 31 | nn.Conv2d(50, 50, kernel_size=4, padding=1), 32 | nn.ReLU(), 33 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 34 | nn.ReLU()) 35 | 36 | self.p3 = nn.Sequential( 37 | nn.Conv2d(3, 50, kernel_size=5, padding=2), 38 | nn.ReLU(), 39 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 40 | nn.ReLU(), 41 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 42 | nn.ReLU(), 43 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 44 | nn.ReLU()) 45 | 46 | self.p4 = nn.Sequential( 47 | nn.Conv2d(150, 50, kernel_size=3, padding=1), 48 | nn.ReLU()) 49 | 50 | self.p5 = nn.Sequential( 51 | nn.Conv2d(150, 50, kernel_size=4, padding=1), 52 | nn.ReLU(), 53 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 54 | nn.ReLU()) 55 | 56 | self.p6 = nn.Sequential( 57 | nn.Conv2d(150, 50, kernel_size=5, padding=2), 58 | nn.ReLU()) 59 | 60 | def forward(self, x): 61 | p1 = self.p1(x) 62 | p2 = self.p2(x) 63 | p3 = self.p3(x) 64 | 65 | x = torch.cat((p1, p2, p3), 1) 66 | 67 | p4 = self.p4(x) 68 | p5 = self.p5(x) 69 | p6 = self.p6(x) 70 | 71 | x = torch.cat((p4, p5, p6), 1) 72 | 73 | return x 74 | 75 | 76 | class HidingNet(nn.Module): 77 | def __init__(self): 78 | super().__init__() 79 | 80 | self.h1 = nn.Sequential( 81 | nn.Conv2d(153, 50, kernel_size=3, padding=1), 82 | nn.ReLU(), 83 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 84 | nn.ReLU(), 85 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 86 | nn.ReLU(), 87 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 88 | nn.ReLU()) 89 | 90 | self.h2 = nn.Sequential( 91 | nn.Conv2d(153, 50, kernel_size=4, padding=1), 92 | nn.ReLU(), 93 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 94 | nn.ReLU(), 95 | nn.Conv2d(50, 50, kernel_size=4, padding=1), 96 | nn.ReLU(), 97 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 98 | nn.ReLU()) 99 | 100 | self.h3 = nn.Sequential( 101 | nn.Conv2d(153, 50, kernel_size=5, padding=2), 102 | nn.ReLU(), 103 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 104 | nn.ReLU(), 105 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 106 | nn.ReLU(), 107 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 108 | nn.ReLU()) 109 | 110 | self.h4 = nn.Sequential( 111 | nn.Conv2d(150, 50, kernel_size=3, padding=1), 112 | nn.ReLU()) 113 | 114 | self.h5 = nn.Sequential( 115 | nn.Conv2d(150, 50, kernel_size=4, padding=1), 116 | nn.ReLU(), 117 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 118 | nn.ReLU()) 119 | 120 | self.h6 = nn.Sequential( 121 | nn.Conv2d(150, 50, kernel_size=5 ,padding=2), 122 | nn.ReLU()) 123 | 124 | self.h7 = nn.Sequential( 125 | nn.Conv2d(150, 3, kernel_size=1, padding=0)) 126 | 127 | def forward(self, x): 128 | h1 = self.h1(x) 129 | h2 = self.h2(x) 130 | h3 = self.h3(x) 131 | 132 | x = torch.cat((h1, h2, h3), 1) 133 | 134 | h4 = self.h4(x) 135 | h5 = self.h5(x) 136 | h6 = self.h6(x) 137 | 138 | x = torch.cat((h4, h5, h6), 1) 139 | x = self.h7(x) 140 | x_n = gaussian_noise(x.data, 0, 0.1) 141 | return x, x_n 142 | 143 | class RevealNet(nn.Module): 144 | def __init__(self): 145 | super(RevealNetwork, self).__init__() 146 | self.r1 = nn.Sequential( 147 | nn.Conv2d(3, 50, kernel_size=3, padding=1), 148 | nn.ReLU(), 149 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 150 | nn.ReLU(), 151 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 152 | nn.ReLU(), 153 | nn.Conv2d(50, 50, kernel_size=3, padding=1), 154 | nn.ReLU()) 155 | self.r2 = nn.Sequential( 156 | nn.Conv2d(3, 50, kernel_size=4, padding=1), 157 | nn.ReLU(), 158 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 159 | nn.ReLU(), 160 | nn.Conv2d(50, 50, kernel_size=4, padding=1), 161 | nn.ReLU(), 162 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 163 | nn.ReLU()) 164 | self.r3 = nn.Sequential( 165 | nn.Conv2d(3, 50, kernel_size=5, padding=2), 166 | nn.ReLU(), 167 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 168 | nn.ReLU(), 169 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 170 | nn.ReLU(), 171 | nn.Conv2d(50, 50, kernel_size=5, padding=2), 172 | nn.ReLU()) 173 | self.r4 = nn.Sequential( 174 | nn.Conv2d(150, 50, kernel_size=3, padding=1), 175 | nn.ReLU()) 176 | self.r5 = nn.Sequential( 177 | nn.Conv2d(150, 50, kernel_size=4, padding=1), 178 | nn.ReLU(), 179 | nn.Conv2d(50, 50, kernel_size=4, padding=2), 180 | nn.ReLU()) 181 | self.r6 = nn.Sequential( 182 | nn.Conv2d(150, 50, kernel_size=5, padding=2), 183 | nn.ReLU()) 184 | self.r7 = nn.Sequential( 185 | nn.Conv2d(150, 3, kernel_size=1, padding=0)) 186 | 187 | def forward(self, x): 188 | r1 = self.r1(x) 189 | r2 = self.r2(x) 190 | r3 = self.r3(x) 191 | x = torch.cat((r1, r2, r3), 1) 192 | r4 = self.r4(x) 193 | r5 = self.r5(x) 194 | r6 = self.r6(x) 195 | x = torch.cat((r4, r5, r6), 1) 196 | x = self.finalR(x) 197 | return x 198 | 199 | class StegNet(nn.Module): 200 | def __init__(self): 201 | super().__init__() 202 | self.s1 = PrepNet() 203 | self.s2 = HidingNet() 204 | self.s3 = RevealNet() 205 | 206 | def forward(self, secret, cover): 207 | x1 = self.s1(secret) 208 | x = torch.cat((x1, cover), 1) 209 | x2, x2_n = self.s2(x) 210 | x3 = self.s3(x2_n) 211 | 212 | return x2, x3 213 | 214 | 215 | --------------------------------------------------------------------------------