├── src ├── __init__.py ├── Models │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── SingleSizeModel.cpython-36.pyc │ └── SingleSizeModel.py ├── __pycache__ │ ├── Utils.cpython-36.pyc │ └── __init__.cpython-36.pyc └── Utils.py ├── _images ├── README.md ├── current_state.png ├── loss_beta_.75.png ├── loss_beta_0.25.png └── network_diagram.png ├── requirements.txt ├── LICENSE ├── README.md └── Steganography.ipynb /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/Models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /_images/README.md: -------------------------------------------------------------------------------- 1 | This folder contains images for the Github Readme and notebooks 2 | -------------------------------------------------------------------------------- /_images/current_state.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harveyslash/Deep-Steganography/HEAD/_images/current_state.png -------------------------------------------------------------------------------- /_images/loss_beta_.75.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harveyslash/Deep-Steganography/HEAD/_images/loss_beta_.75.png -------------------------------------------------------------------------------- /_images/loss_beta_0.25.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harveyslash/Deep-Steganography/HEAD/_images/loss_beta_0.25.png -------------------------------------------------------------------------------- /_images/network_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harveyslash/Deep-Steganography/HEAD/_images/network_diagram.png -------------------------------------------------------------------------------- /src/__pycache__/Utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harveyslash/Deep-Steganography/HEAD/src/__pycache__/Utils.cpython-36.pyc -------------------------------------------------------------------------------- /src/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harveyslash/Deep-Steganography/HEAD/src/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/Models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harveyslash/Deep-Steganography/HEAD/src/Models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/Models/__pycache__/SingleSizeModel.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harveyslash/Deep-Steganography/HEAD/src/Models/__pycache__/SingleSizeModel.cpython-36.pyc -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | bleach==1.5.0 2 | enum34==1.1.6 3 | html5lib==0.9999999 4 | Markdown==2.6.11 5 | numpy==1.14.0 6 | Pillow==5.0.0 7 | protobuf==3.5.1 8 | six==1.11.0 9 | tensorboard==1.0.0a6 10 | tensorflow==1.4.1 11 | tensorflow-tensorboard==0.4.0rc3 12 | Werkzeug==0.14.1 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Harshvardhan Gupta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/Utils.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | from PIL import Image,ImageOps 4 | import random 5 | import tensorflow as tf 6 | import time 7 | from datetime import datetime 8 | from os.path import join 9 | import numpy as np 10 | 11 | def normalize_batch(imgs): 12 | return (imgs - np.array([0.485, 0.456, 0.406])) /np.array([0.229, 0.224, 0.225]) 13 | 14 | def denormalize_batch(imgs,should_clip=True): 15 | imgs= (imgs * np.array([0.229, 0.224, 0.225])) + np.array([0.485, 0.456, 0.406]) 16 | 17 | if should_clip: 18 | imgs= np.clip(imgs,0,1) 19 | return imgs 20 | 21 | def get_img_batch(files_list,batch_size=32,size=(224,224),should_normalise=True): 22 | 23 | batch_cover = [] 24 | batch_secret = [] 25 | 26 | for i in range(batch_size): 27 | img_secret_path = random.choice(files_list) 28 | img_cover_path = random.choice(files_list) 29 | 30 | img_secret = Image.open(img_secret_path).convert("RGB") 31 | img_cover = Image.open(img_cover_path).convert("RGB") 32 | 33 | img_secret = np.array(ImageOps.fit(img_secret,size),dtype=np.float32) 34 | img_cover = np.array(ImageOps.fit(img_cover,size),dtype=np.float32) 35 | 36 | img_secret /= 255. 37 | img_cover /= 255. 38 | 39 | batch_cover.append(img_cover) 40 | batch_secret.append(img_secret) 41 | 42 | batch_cover,batch_secret = np.array(batch_cover) , np.array(batch_secret) 43 | 44 | if should_normalise: 45 | batch_cover = normalize_batch(batch_cover) 46 | batch_secret = normalize_batch(batch_secret) 47 | 48 | return batch_cover,batch_secret 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep-Steganography 2 | 3 | Tensorflow Implementation of [Hiding Images in Plain Sight: Deep Steganography](https://papers.nips.cc/paper/6802-hiding-images-in-plain-sight-deep-steganography) (unofficial) 4 | 5 | [Steganography](https://en.wikipedia.org/wiki/Steganography) is the science of Hiding a message in another message. In this case, a Picture is hidden inside another picture using Deep Learning. 6 | 7 | Blog Post on it can be found [here](https://buzzrobot.com/hiding-images-using-ai-deep-steganography-b7726bd58b06) 8 | 9 | ## Dependencies Installation 10 | The dependencies can be installed by using 11 | ``` 12 | pip install -r requirements.txt 13 | ``` 14 | This will install the tensorflow CPU version by default. 15 | If you would like to use your GPU , you can do 16 | ``` 17 | pip install --force-reinstall tensorflow-gpu 18 | ``` 19 | This basically reinstalls the gpu version of tensorflow for your system. 20 | 21 | 22 | 23 | ## Framework 24 | The Framework takes in Two images. One is the secret image(extreme right) and another is the cover image(extreme left). 25 | 26 | The goal is to 'hide' the secret image in the cover image such that only the cover image is visible. This is the covered image(Center Left) 27 | 28 | Then , this hidden image can be passed to a Revealing network, which can get the hidden image back(Center Right). 29 | 30 | 31 | 32 | ![Current System State](_images/current_state.png) 33 | As you can see, it is visually very hard to realise that the covered image is an image that has been tampered with. Yet the Reveal network can get back almost all of the important parts of the image. 34 | 35 | 36 | ### Architecture 37 | ![network_design](_images/network_diagram.png) 38 | Prep Net, Hide Net , and Reveal net have the same convolutional block structure. Therefore, in the image , only the reveal network is shown, and prep/hide networks are collapsed (to make the image fit). 39 | 40 | ### Loss Curves 41 | Two networks were trained with different beta values (0.25 and .75).Both had a batch size of 8. The loss curves are shown for them: 42 | Beta = .25 43 | ![beta .25](_images/loss_beta_0.25.png) 44 | 45 | Beta = .75 46 | ![beta .75](_images/loss_beta_.75.png) 47 | 48 | 49 | ## Demo: 50 | 51 | In order to test the working of the networks , we have written a browser inplementation using Keras JS. 52 | You can find it here: 53 | 54 | https://harveyslash.github.io/Deep-Steg-JS/ 55 | Due to the extremely high computational resources that it requires, it takes about 3 minutes to setup. 56 | -------------------------------------------------------------------------------- /src/Models/SingleSizeModel.py: -------------------------------------------------------------------------------- 1 | """ 2 | """ 3 | import tensorflow as tf 4 | from tensorflow.python.layers.convolutional import conv2d 5 | from src.Utils import get_img_batch 6 | import glob 7 | class SingleSizeModel(): 8 | """ A convolution model that handles only same size cover 9 | and secret images. 10 | """ 11 | def get_prep_network_op(self,secret_tensor): 12 | 13 | with tf.variable_scope('prep_net'): 14 | 15 | with tf.variable_scope("3x3_conv_branch"): 16 | conv_3x3 = conv2d(inputs=secret_tensor,filters=50,kernel_size=3,padding='same',name="1",activation=tf.nn.relu) 17 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="2",activation=tf.nn.relu) 18 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="3",activation=tf.nn.relu) 19 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="4",activation=tf.nn.relu) 20 | 21 | with tf.variable_scope("4x4_conv_branch"): 22 | conv_4x4 = conv2d(inputs=secret_tensor,filters=50,kernel_size=4,padding='same',name="1",activation=tf.nn.relu) 23 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="2",activation=tf.nn.relu) 24 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="3",activation=tf.nn.relu) 25 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="4",activation=tf.nn.relu) 26 | 27 | with tf.variable_scope("5x5_conv_branch"): 28 | conv_5x5 = conv2d(inputs=secret_tensor,filters=50,kernel_size=5,padding='same',name="1",activation=tf.nn.relu) 29 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="2",activation=tf.nn.relu) 30 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="3",activation=tf.nn.relu) 31 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="4",activation=tf.nn.relu) 32 | 33 | concat_1 = tf.concat([conv_3x3,conv_4x4,conv_5x5],axis=3,name='concat_1') 34 | 35 | conv_5x5 = conv2d(inputs=concat_1,filters=50,kernel_size=5,padding='same',name="final_5x5",activation=tf.nn.relu) 36 | conv_4x4 = conv2d(inputs=concat_1,filters=50,kernel_size=4,padding='same',name="final_4x4",activation=tf.nn.relu) 37 | conv_3x3 = conv2d(inputs=concat_1,filters=50,kernel_size=3,padding='same',name="final_3x3",activation=tf.nn.relu) 38 | 39 | concat_final = tf.concat([conv_5x5,conv_4x4,conv_3x3],axis=3,name='concat_final') 40 | 41 | return concat_final 42 | 43 | 44 | def get_hiding_network_op(self,cover_tensor,prep_output): 45 | 46 | with tf.variable_scope('hide_net'): 47 | concat_input = tf.concat([cover_tensor,prep_output],axis=3,name='images_features_concat') 48 | 49 | with tf.variable_scope("3x3_conv_branch"): 50 | conv_3x3 = conv2d(inputs=concat_input,filters=50,kernel_size=3,padding='same',name="1",activation=tf.nn.relu) 51 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="2",activation=tf.nn.relu) 52 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="3",activation=tf.nn.relu) 53 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="4",activation=tf.nn.relu) 54 | 55 | with tf.variable_scope("4x4_conv_branch"): 56 | conv_4x4 = conv2d(inputs=concat_input,filters=50,kernel_size=4,padding='same',name="1",activation=tf.nn.relu) 57 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="2",activation=tf.nn.relu) 58 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="3",activation=tf.nn.relu) 59 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="4",activation=tf.nn.relu) 60 | 61 | with tf.variable_scope("5x5_conv_branch"): 62 | conv_5x5 = conv2d(inputs=concat_input,filters=50,kernel_size=5,padding='same',name="1",activation=tf.nn.relu) 63 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="2",activation=tf.nn.relu) 64 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="3",activation=tf.nn.relu) 65 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="4",activation=tf.nn.relu) 66 | 67 | concat_1 = tf.concat([conv_3x3,conv_4x4,conv_5x5],axis=3,name='concat_1') 68 | 69 | conv_5x5 = conv2d(inputs=concat_1,filters=50,kernel_size=5,padding='same',name="final_5x5",activation=tf.nn.relu) 70 | conv_4x4 = conv2d(inputs=concat_1,filters=50,kernel_size=4,padding='same',name="final_4x4",activation=tf.nn.relu) 71 | conv_3x3 = conv2d(inputs=concat_1,filters=50,kernel_size=3,padding='same',name="final_3x3",activation=tf.nn.relu) 72 | 73 | concat_final = tf.concat([conv_5x5,conv_4x4,conv_3x3],axis=3,name='concat_final') 74 | output = tf.layers.conv2d(inputs=concat_final,filters=3,kernel_size=1,padding='same',name='output') 75 | 76 | return output 77 | 78 | 79 | 80 | def get_reveal_network_op(self,container_tensor): 81 | 82 | with tf.variable_scope('reveal_net'): 83 | 84 | with tf.variable_scope("3x3_conv_branch"): 85 | conv_3x3 = conv2d(inputs=container_tensor,filters=50,kernel_size=3,padding='same',name="1",activation=tf.nn.relu) 86 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="2",activation=tf.nn.relu) 87 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="3",activation=tf.nn.relu) 88 | conv_3x3 = conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name="4",activation=tf.nn.relu) 89 | 90 | with tf.variable_scope("4x4_conv_branch"): 91 | conv_4x4 = conv2d(inputs=container_tensor,filters=50,kernel_size=4,padding='same',name="1",activation=tf.nn.relu) 92 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="2",activation=tf.nn.relu) 93 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="3",activation=tf.nn.relu) 94 | conv_4x4 = conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name="4",activation=tf.nn.relu) 95 | 96 | with tf.variable_scope("5x5_conv_branch"): 97 | conv_5x5 = conv2d(inputs=container_tensor,filters=50,kernel_size=5,padding='same',name="1",activation=tf.nn.relu) 98 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="2",activation=tf.nn.relu) 99 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="3",activation=tf.nn.relu) 100 | conv_5x5 = conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name="4",activation=tf.nn.relu) 101 | 102 | concat_1 = tf.concat([conv_3x3,conv_4x4,conv_5x5],axis=3,name='concat_1') 103 | 104 | conv_5x5 = conv2d(inputs=concat_1,filters=50,kernel_size=5,padding='same',name="final_5x5",activation=tf.nn.relu) 105 | conv_4x4 = conv2d(inputs=concat_1,filters=50,kernel_size=4,padding='same',name="final_4x4",activation=tf.nn.relu) 106 | conv_3x3 = conv2d(inputs=concat_1,filters=50,kernel_size=3,padding='same',name="final_3x3",activation=tf.nn.relu) 107 | 108 | concat_final = tf.concat([conv_5x5,conv_4x4,conv_3x3],axis=3,name='concat_final') 109 | 110 | output = tf.layers.conv2d(inputs=concat_final,filters=3,kernel_size=1,padding='same',name='output') 111 | 112 | return output 113 | 114 | def get_noise_layer_op(self,tensor,std=.1): 115 | with tf.variable_scope("noise_layer"): 116 | return tensor + tf.random_normal(shape=tf.shape(tensor), mean=0.0, stddev=std, dtype=tf.float32) 117 | 118 | def get_loss_op(self,secret_true,secret_pred,cover_true,cover_pred,beta=.5): 119 | 120 | with tf.variable_scope("losses"): 121 | beta = tf.constant(beta,name="beta") 122 | secret_mse = tf.losses.mean_squared_error(secret_true,secret_pred) 123 | cover_mse = tf.losses.mean_squared_error(cover_true,cover_pred) 124 | final_loss = cover_mse + beta*secret_mse 125 | return final_loss , secret_mse , cover_mse 126 | 127 | def get_tensor_to_img_op(self,tensor): 128 | with tf.variable_scope("",reuse=True): 129 | t = tensor*tf.convert_to_tensor([0.229, 0.224, 0.225]) + tf.convert_to_tensor([0.485, 0.456, 0.406]) 130 | return tf.clip_by_value(t,0,1) 131 | 132 | def prepare_training_graph(self,secret_tensor,cover_tensor,global_step_tensor): 133 | 134 | prep_output_op = self.get_prep_network_op(secret_tensor) 135 | hiding_output_op = self.get_hiding_network_op(cover_tensor=cover_tensor,prep_output=prep_output_op) 136 | noise_add_op = self.get_noise_layer_op(hiding_output_op) 137 | reveal_output_op = self.get_reveal_network_op(noise_add_op) 138 | 139 | loss_op,secret_loss_op,cover_loss_op = self.get_loss_op(secret_tensor,reveal_output_op,cover_tensor,hiding_output_op,beta=self.beta) 140 | 141 | minimize_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss_op,global_step=global_step_tensor) 142 | 143 | tf.summary.scalar('loss', loss_op,family='train') 144 | tf.summary.scalar('reveal_net_loss', secret_loss_op,family='train') 145 | tf.summary.scalar('cover_net_loss', cover_loss_op,family='train') 146 | 147 | tf.summary.image('secret',self.get_tensor_to_img_op(secret_tensor),max_outputs=1,family='train') 148 | tf.summary.image('cover',self.get_tensor_to_img_op(cover_tensor),max_outputs=1,family='train') 149 | tf.summary.image('hidden',self.get_tensor_to_img_op(hiding_output_op),max_outputs=1,family='train') 150 | tf.summary.image('hidden_noisy',self.get_tensor_to_img_op(noise_add_op),max_outputs=1,family='train') 151 | tf.summary.image('revealed',self.get_tensor_to_img_op(reveal_output_op),max_outputs=1,family='train') 152 | 153 | merged_summary_op = tf.summary.merge_all() 154 | 155 | return minimize_op, merged_summary_op 156 | 157 | def prepare_test_graph(self,secret_tensor,cover_tensor): 158 | with tf.variable_scope("",reuse=True): 159 | 160 | prep_output_op = self.get_prep_network_op(secret_tensor) 161 | hiding_output_op = self.get_hiding_network_op(cover_tensor=cover_tensor,prep_output=prep_output_op) 162 | reveal_output_op = self.get_reveal_network_op(hiding_output_op) 163 | 164 | loss_op,secret_loss_op,cover_loss_op = self.get_loss_op(secret_tensor,reveal_output_op,cover_tensor,hiding_output_op) 165 | 166 | tf.summary.scalar('loss', loss_op,family='test') 167 | tf.summary.scalar('reveal_net_loss', secret_loss_op,family='test') 168 | tf.summary.scalar('cover_net_loss', cover_loss_op,family='test') 169 | 170 | tf.summary.image('secret',self.get_tensor_to_img_op(secret_tensor),max_outputs=1,family='test') 171 | tf.summary.image('cover',self.get_tensor_to_img_op(cover_tensor),max_outputs=1,family='test') 172 | tf.summary.image('hidden',self.get_tensor_to_img_op(hiding_output_op),max_outputs=1,family='test') 173 | tf.summary.image('revealed',self.get_tensor_to_img_op(reveal_output_op),max_outputs=1,family='test') 174 | 175 | merged_summary_op = tf.summary.merge_all() 176 | 177 | return merged_summary_op 178 | 179 | def prepare_deployment_graph(self,secret_tensor,cover_tensor,covered_tensor): 180 | with tf.variable_scope("",reuse=True): 181 | 182 | prep_output_op = self.get_prep_network_op(secret_tensor) 183 | hiding_output_op = self.get_hiding_network_op(cover_tensor=cover_tensor,prep_output=prep_output_op) 184 | 185 | reveal_output_op = self.get_reveal_network_op(covered_tensor) 186 | 187 | return hiding_output_op , reveal_output_op 188 | 189 | def get_tensor_to_img_op(self,tensor): 190 | with tf.variable_scope("",reuse=True): 191 | t = tensor*tf.convert_to_tensor([0.229, 0.224, 0.225]) + tf.convert_to_tensor([0.485, 0.456, 0.406]) 192 | return tf.clip_by_value(t,0,1) 193 | 194 | 195 | def __init__(self, beta,log_path, input_shape=(None,224, 224, 3) ): 196 | 197 | self.beta = beta 198 | self.learning_rate = 0.0001 199 | self.sess = tf.InteractiveSession() 200 | 201 | self.secret_tensor = tf.placeholder(shape=input_shape,dtype=tf.float32,name="input_prep") 202 | self.cover_tensor = tf.placeholder(shape=input_shape,dtype=tf.float32,name="input_hide") 203 | self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step') 204 | 205 | self.train_op , self.summary_op = self.prepare_training_graph(self.secret_tensor,self.cover_tensor,self.global_step_tensor) 206 | 207 | self.writer = tf.summary.FileWriter(log_path,self.sess.graph) 208 | 209 | self.test_op = self.prepare_test_graph(self.secret_tensor,self.cover_tensor) 210 | 211 | self.covered_tensor = tf.placeholder(shape=input_shape,dtype=tf.float32,name="deploy_covered") 212 | self.deploy_hide_image_op , self.deploy_reveal_image_op = self.prepare_deployment_graph(self.secret_tensor,self.cover_tensor,self.covered_tensor) 213 | self.sess.run(tf.global_variables_initializer()) 214 | 215 | print("OK") 216 | 217 | def make_chkp(self,path): 218 | saver = tf.train.Saver(max_to_keep=1) 219 | global_step = self.sess.run(self.global_step_tensor) 220 | saver.save(self.sess,path,global_step) 221 | 222 | def load_chkp(self,path): 223 | saver = tf.train.Saver(max_to_keep=1) 224 | global_step = self.sess.run(self.global_step_tensor) 225 | print("LOADED") 226 | saver.restore(self.sess,path) 227 | 228 | 229 | def train(self,steps,files_list,batch_size): 230 | 231 | 232 | for step in range(steps): 233 | saver = tf.train.Saver(max_to_keep=1) 234 | covers,secrets = get_img_batch(files_list=files_list,batch_size=batch_size) 235 | self.sess.run([self.train_op],feed_dict={"input_prep:0":secrets,"input_hide:0":covers}) 236 | 237 | if step %10 == 0: 238 | summary,global_step = self.sess.run([self.summary_op,self.global_step_tensor],feed_dict={"input_prep:0":secrets,"input_hide:0":covers}) 239 | self.writer.add_summary(summary,global_step) 240 | 241 | 242 | 243 | 244 | 245 | m = SingleSizeModel(beta=.75,log_path="/tmp/lol/exp_2") 246 | # m.load_chkp("/home/harsh/ml/Stegano/checkpoints/beta_0.75.chkp-102192") 247 | files_list = glob.glob("/home/harsh/ml/Stegano/data/train/"+"**/*") 248 | m.train(100,files_list,8) 249 | # print(files_list) 250 | 251 | -------------------------------------------------------------------------------- /Steganography.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Hiding Images in Plain Sight: Deep Steganography \n", 8 | "\n", 9 | "#### An Unofficial Tensorflow Implementation" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [ 17 | { 18 | "name": "stdout", 19 | "output_type": "stream", 20 | "text": [ 21 | "Populating the interactive namespace from numpy and matplotlib\n" 22 | ] 23 | } 24 | ], 25 | "source": [ 26 | "%pylab inline" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": {}, 32 | "source": [ 33 | "## Imports" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 2, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "import glob\n", 43 | "import os\n", 44 | "from PIL import Image,ImageOps\n", 45 | "import random\n", 46 | "import tensorflow as tf\n", 47 | "import time\n", 48 | "from datetime import datetime\n", 49 | "from os.path import join" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": {}, 55 | "source": [ 56 | "## Configuration\n", 57 | "All Configuration related information is represented in CAPS" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 3, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "TRAIN_PATH = './data/train/'\n", 67 | "LOGS_Path = \"./logs/\"\n", 68 | "CHECKPOINTS_PATH = './checkpoints/'\n", 69 | "\n", 70 | "\n", 71 | "BATCH_SIZE = 8\n", 72 | "LEARNING_RATE = .0001\n", 73 | "BETA = .75\n", 74 | "\n", 75 | "EXP_NAME = f\"beta_{BETA}\"" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "metadata": {}, 81 | "source": [ 82 | "## Helper Methods to Handle images \n", 83 | "The images are first converted to float values between 0 and 1. \n", 84 | "\n", 85 | "Then they are normalised using the Mean and STD from ImageNet. \n", 86 | "\n", 87 | "To convert these normalised values back to images, a helper function to undo this normalisation is also written." 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 4, 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "files_list = glob.glob(join(TRAIN_PATH,\"**/*\"))\n", 97 | "\n", 98 | "def normalize_batch(imgs):\n", 99 | " return (imgs - np.array([0.485, 0.456, 0.406])) /np.array([0.229, 0.224, 0.225])\n", 100 | " \n", 101 | "def denormalize_batch(imgs,should_clip=True):\n", 102 | " imgs= (imgs * np.array([0.229, 0.224, 0.225])) + np.array([0.485, 0.456, 0.406])\n", 103 | " \n", 104 | " if should_clip:\n", 105 | " imgs= np.clip(imgs,0,1)\n", 106 | " return imgs\n", 107 | "\n", 108 | "def get_img_batch(files_list,batch_size=32,size=(224,224),should_normalise=True):\n", 109 | " \n", 110 | " batch_cover = []\n", 111 | " batch_secret = []\n", 112 | "\n", 113 | " for i in range(batch_size):\n", 114 | " img_secret_path = random.choice(files_list)\n", 115 | " img_cover_path = random.choice(files_list)\n", 116 | " \n", 117 | " img_secret = Image.open(img_secret_path).convert(\"RGB\")\n", 118 | " img_cover = Image.open(img_cover_path).convert(\"RGB\")\n", 119 | "\n", 120 | " img_secret = np.array(ImageOps.fit(img_secret,size),dtype=np.float32)\n", 121 | " img_cover = np.array(ImageOps.fit(img_cover,size),dtype=np.float32)\n", 122 | " \n", 123 | " img_secret /= 255.\n", 124 | " img_cover /= 255.\n", 125 | " \n", 126 | " batch_cover.append(img_cover)\n", 127 | " batch_secret.append(img_secret)\n", 128 | " \n", 129 | " batch_cover,batch_secret = np.array(batch_cover) , np.array(batch_secret)\n", 130 | " \n", 131 | " if should_normalise:\n", 132 | " batch_cover = normalize_batch(batch_cover)\n", 133 | " batch_secret = normalize_batch(batch_secret)\n", 134 | "\n", 135 | " return batch_cover,batch_secret\n", 136 | " " 137 | ] 138 | }, 139 | { 140 | "cell_type": "markdown", 141 | "metadata": {}, 142 | "source": [ 143 | "## Network Definitions\n", 144 | "The three networks are identical in terms of structure. \n", 145 | "\n", 146 | "1. The Prepare network takes in the **Secret Image** and outputs a (BATCH_SIZE,INPUT_HEIGHT,INPUT_WEIGHT,150) tensor. \n", 147 | "\n", 148 | "2. The Cover network takes in the output from 1. , and a *Cover Image*. It concatenates these two tensors , giving a (BATCH_SIZE,INPUT_HEIGHT,INPUT_WEIGHT,153) tensor. Then it performs Convolutions , and outputs a (BATCH_SIZE,INPUT_HEIGHT,INPUT_WEIGHT,3) image.\n", 149 | "\n", 150 | "3. The Reveal Network Takes in the output image from Cover Network , and outputs the Revealed Image (which is supposed to look like the **Secret Image**\n" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 5, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "def get_prep_network_op(secret_tensor):\n", 160 | " \n", 161 | " with tf.variable_scope('prep_net'):\n", 162 | " \n", 163 | " with tf.variable_scope(\"3x3_conv_branch\"):\n", 164 | " conv_3x3 = tf.layers.conv2d(inputs=secret_tensor,filters=50,kernel_size=3,padding='same',name=\"1\",activation=tf.nn.relu)\n", 165 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"2\",activation=tf.nn.relu)\n", 166 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"3\",activation=tf.nn.relu)\n", 167 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"4\",activation=tf.nn.relu)\n", 168 | " \n", 169 | " with tf.variable_scope(\"4x4_conv_branch\"):\n", 170 | " conv_4x4 = tf.layers.conv2d(inputs=secret_tensor,filters=50,kernel_size=4,padding='same',name=\"1\",activation=tf.nn.relu)\n", 171 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"2\",activation=tf.nn.relu) \n", 172 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"3\",activation=tf.nn.relu)\n", 173 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"4\",activation=tf.nn.relu)\n", 174 | "\n", 175 | " with tf.variable_scope(\"5x5_conv_branch\"):\n", 176 | " conv_5x5 = tf.layers.conv2d(inputs=secret_tensor,filters=50,kernel_size=5,padding='same',name=\"1\",activation=tf.nn.relu)\n", 177 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"2\",activation=tf.nn.relu) \n", 178 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"3\",activation=tf.nn.relu)\n", 179 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"4\",activation=tf.nn.relu)\n", 180 | " \n", 181 | " concat_1 = tf.concat([conv_3x3,conv_4x4,conv_5x5],axis=3,name='concat_1')\n", 182 | " \n", 183 | " conv_5x5 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=5,padding='same',name=\"final_5x5\",activation=tf.nn.relu)\n", 184 | " conv_4x4 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=4,padding='same',name=\"final_4x4\",activation=tf.nn.relu)\n", 185 | " conv_3x3 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=3,padding='same',name=\"final_3x3\",activation=tf.nn.relu)\n", 186 | " \n", 187 | " concat_final = tf.concat([conv_5x5,conv_4x4,conv_3x3],axis=3,name='concat_final')\n", 188 | "\n", 189 | " return concat_final\n", 190 | "\n", 191 | " \n", 192 | "def get_hiding_network_op(cover_tensor,prep_output):\n", 193 | " \n", 194 | " with tf.variable_scope('hide_net'):\n", 195 | " concat_input = tf.concat([cover_tensor,prep_output],axis=3,name='images_features_concat')\n", 196 | " \n", 197 | " with tf.variable_scope(\"3x3_conv_branch\"):\n", 198 | " conv_3x3 = tf.layers.conv2d(inputs=concat_input,filters=50,kernel_size=3,padding='same',name=\"1\",activation=tf.nn.relu)\n", 199 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"2\",activation=tf.nn.relu)\n", 200 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"3\",activation=tf.nn.relu)\n", 201 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"4\",activation=tf.nn.relu)\n", 202 | " \n", 203 | " with tf.variable_scope(\"4x4_conv_branch\"):\n", 204 | " conv_4x4 = tf.layers.conv2d(inputs=concat_input,filters=50,kernel_size=4,padding='same',name=\"1\",activation=tf.nn.relu)\n", 205 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"2\",activation=tf.nn.relu) \n", 206 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"3\",activation=tf.nn.relu)\n", 207 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"4\",activation=tf.nn.relu)\n", 208 | "\n", 209 | " with tf.variable_scope(\"5x5_conv_branch\"):\n", 210 | " conv_5x5 = tf.layers.conv2d(inputs=concat_input,filters=50,kernel_size=5,padding='same',name=\"1\",activation=tf.nn.relu)\n", 211 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"2\",activation=tf.nn.relu) \n", 212 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"3\",activation=tf.nn.relu)\n", 213 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"4\",activation=tf.nn.relu)\n", 214 | " \n", 215 | " concat_1 = tf.concat([conv_3x3,conv_4x4,conv_5x5],axis=3,name='concat_1')\n", 216 | " \n", 217 | " conv_5x5 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=5,padding='same',name=\"final_5x5\",activation=tf.nn.relu)\n", 218 | " conv_4x4 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=4,padding='same',name=\"final_4x4\",activation=tf.nn.relu)\n", 219 | " conv_3x3 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=3,padding='same',name=\"final_3x3\",activation=tf.nn.relu)\n", 220 | " \n", 221 | " concat_final = tf.concat([conv_5x5,conv_4x4,conv_3x3],axis=3,name='concat_final')\n", 222 | " output = tf.layers.conv2d(inputs=concat_final,filters=3,kernel_size=1,padding='same',name='output')\n", 223 | " \n", 224 | " return output\n", 225 | " \n", 226 | " \n", 227 | " \n", 228 | "def get_reveal_network_op(container_tensor):\n", 229 | " \n", 230 | " with tf.variable_scope('reveal_net'):\n", 231 | " \n", 232 | " with tf.variable_scope(\"3x3_conv_branch\"):\n", 233 | " conv_3x3 = tf.layers.conv2d(inputs=container_tensor,filters=50,kernel_size=3,padding='same',name=\"1\",activation=tf.nn.relu)\n", 234 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"2\",activation=tf.nn.relu)\n", 235 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"3\",activation=tf.nn.relu)\n", 236 | " conv_3x3 = tf.layers.conv2d(inputs=conv_3x3,filters=50,kernel_size=3,padding='same',name=\"4\",activation=tf.nn.relu)\n", 237 | " \n", 238 | " with tf.variable_scope(\"4x4_conv_branch\"):\n", 239 | " conv_4x4 = tf.layers.conv2d(inputs=container_tensor,filters=50,kernel_size=4,padding='same',name=\"1\",activation=tf.nn.relu)\n", 240 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"2\",activation=tf.nn.relu) \n", 241 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"3\",activation=tf.nn.relu)\n", 242 | " conv_4x4 = tf.layers.conv2d(inputs=conv_4x4,filters=50,kernel_size=4,padding='same',name=\"4\",activation=tf.nn.relu)\n", 243 | "\n", 244 | " with tf.variable_scope(\"5x5_conv_branch\"):\n", 245 | " conv_5x5 = tf.layers.conv2d(inputs=container_tensor,filters=50,kernel_size=5,padding='same',name=\"1\",activation=tf.nn.relu)\n", 246 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"2\",activation=tf.nn.relu) \n", 247 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"3\",activation=tf.nn.relu)\n", 248 | " conv_5x5 = tf.layers.conv2d(inputs=conv_5x5,filters=50,kernel_size=5,padding='same',name=\"4\",activation=tf.nn.relu)\n", 249 | " \n", 250 | " concat_1 = tf.concat([conv_3x3,conv_4x4,conv_5x5],axis=3,name='concat_1')\n", 251 | " \n", 252 | " conv_5x5 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=5,padding='same',name=\"final_5x5\",activation=tf.nn.relu)\n", 253 | " conv_4x4 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=4,padding='same',name=\"final_4x4\",activation=tf.nn.relu)\n", 254 | " conv_3x3 = tf.layers.conv2d(inputs=concat_1,filters=50,kernel_size=3,padding='same',name=\"final_3x3\",activation=tf.nn.relu)\n", 255 | " \n", 256 | " concat_final = tf.concat([conv_5x5,conv_4x4,conv_3x3],axis=3,name='concat_final')\n", 257 | " \n", 258 | " output = tf.layers.conv2d(inputs=concat_final,filters=3,kernel_size=1,padding='same',name='output')\n", 259 | "\n", 260 | " return output\n", 261 | "\n", 262 | "def get_noise_layer_op(tensor,std=.1):\n", 263 | " with tf.variable_scope(\"noise_layer\"):\n", 264 | " return tensor + tf.random_normal(shape=tf.shape(tensor), mean=0.0, stddev=std, dtype=tf.float32) \n", 265 | " \n", 266 | "def get_loss_op(secret_true,secret_pred,cover_true,cover_pred,beta=.5):\n", 267 | " \n", 268 | " with tf.variable_scope(\"losses\"):\n", 269 | " beta = tf.constant(beta,name=\"beta\")\n", 270 | " secret_mse = tf.losses.mean_squared_error(secret_true,secret_pred)\n", 271 | " cover_mse = tf.losses.mean_squared_error(cover_true,cover_pred)\n", 272 | " final_loss = cover_mse + beta*secret_mse\n", 273 | " return final_loss , secret_mse , cover_mse \n", 274 | "\n", 275 | "def get_tensor_to_img_op(tensor):\n", 276 | " with tf.variable_scope(\"\",reuse=True):\n", 277 | " t = tensor*tf.convert_to_tensor([0.229, 0.224, 0.225]) + tf.convert_to_tensor([0.485, 0.456, 0.406])\n", 278 | " return tf.clip_by_value(t,0,1)" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": 6, 284 | "metadata": {}, 285 | "outputs": [], 286 | "source": [ 287 | "def prepare_training_graph(secret_tensor,cover_tensor,global_step_tensor):\n", 288 | " \n", 289 | " prep_output_op = get_prep_network_op(secret_tensor)\n", 290 | " hiding_output_op = get_hiding_network_op(cover_tensor=cover_tensor,prep_output=prep_output_op)\n", 291 | " noise_add_op = get_noise_layer_op(hiding_output_op)\n", 292 | " reveal_output_op = get_reveal_network_op(noise_add_op)\n", 293 | " \n", 294 | " loss_op,secret_loss_op,cover_loss_op = get_loss_op(secret_tensor,reveal_output_op,cover_tensor,hiding_output_op,beta=BETA)\n", 295 | "\n", 296 | " minimize_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss_op,global_step=global_step_tensor)\n", 297 | " \n", 298 | " tf.summary.scalar('loss', loss_op,family='train')\n", 299 | " tf.summary.scalar('reveal_net_loss', secret_loss_op,family='train')\n", 300 | " tf.summary.scalar('cover_net_loss', cover_loss_op,family='train')\n", 301 | "\n", 302 | " tf.summary.image('secret',get_tensor_to_img_op(secret_tensor),max_outputs=1,family='train')\n", 303 | " tf.summary.image('cover',get_tensor_to_img_op(cover_tensor),max_outputs=1,family='train')\n", 304 | " tf.summary.image('hidden',get_tensor_to_img_op(hiding_output_op),max_outputs=1,family='train')\n", 305 | " tf.summary.image('hidden_noisy',get_tensor_to_img_op(noise_add_op),max_outputs=1,family='train')\n", 306 | " tf.summary.image('revealed',get_tensor_to_img_op(reveal_output_op),max_outputs=1,family='train')\n", 307 | "\n", 308 | " merged_summary_op = tf.summary.merge_all()\n", 309 | " \n", 310 | " return minimize_op, merged_summary_op " 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 7, 316 | "metadata": {}, 317 | "outputs": [], 318 | "source": [ 319 | "def prepare_test_graph(secret_tensor,cover_tensor):\n", 320 | " with tf.variable_scope(\"\",reuse=True):\n", 321 | " \n", 322 | " prep_output_op = get_prep_network_op(secret_tensor)\n", 323 | " hiding_output_op = get_hiding_network_op(cover_tensor=cover_tensor,prep_output=prep_output_op)\n", 324 | " reveal_output_op = get_reveal_network_op(hiding_output_op)\n", 325 | " \n", 326 | " loss_op,secret_loss_op,cover_loss_op = get_loss_op(secret_tensor,reveal_output_op,cover_tensor,hiding_output_op)\n", 327 | "\n", 328 | " tf.summary.scalar('loss', loss_op,family='test')\n", 329 | " tf.summary.scalar('reveal_net_loss', secret_loss_op,family='test')\n", 330 | " tf.summary.scalar('cover_net_loss', cover_loss_op,family='test')\n", 331 | "\n", 332 | " tf.summary.image('secret',get_tensor_to_img_op(secret_tensor),max_outputs=1,family='test')\n", 333 | " tf.summary.image('cover',get_tensor_to_img_op(cover_tensor),max_outputs=1,family='test')\n", 334 | " tf.summary.image('hidden',get_tensor_to_img_op(hiding_output_op),max_outputs=1,family='test')\n", 335 | " tf.summary.image('revealed',get_tensor_to_img_op(reveal_output_op),max_outputs=1,family='test')\n", 336 | "\n", 337 | " merged_summary_op = tf.summary.merge_all()\n", 338 | "\n", 339 | " return merged_summary_op " 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "execution_count": 8, 345 | "metadata": {}, 346 | "outputs": [], 347 | "source": [ 348 | "def prepare_deployment_graph(secret_tensor,cover_tensor,covered_tensor):\n", 349 | " with tf.variable_scope(\"\",reuse=True):\n", 350 | "\n", 351 | " prep_output_op = get_prep_network_op(secret_tensor)\n", 352 | " hiding_output_op = get_hiding_network_op(cover_tensor=cover_tensor,prep_output=prep_output_op)\n", 353 | "\n", 354 | " reveal_output_op = get_reveal_network_op(covered_tensor)\n", 355 | "\n", 356 | " return hiding_output_op , reveal_output_op" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": 9, 362 | "metadata": {}, 363 | "outputs": [], 364 | "source": [ 365 | "sess = tf.InteractiveSession(graph=tf.Graph())" 366 | ] 367 | }, 368 | { 369 | "cell_type": "code", 370 | "execution_count": 10, 371 | "metadata": {}, 372 | "outputs": [], 373 | "source": [ 374 | "secret_tensor = tf.placeholder(shape=[None,224,224,3],dtype=tf.float32,name=\"input_prep\")\n", 375 | "cover_tensor = tf.placeholder(shape=[None,224,224,3],dtype=tf.float32,name=\"input_hide\")\n", 376 | "global_step_tensor = tf.Variable(0, trainable=False, name='global_step')\n", 377 | "\n", 378 | "train_op , summary_op = prepare_training_graph(secret_tensor,cover_tensor,global_step_tensor)\n", 379 | "\n", 380 | "writer = tf.summary.FileWriter(join(LOGS_Path,EXP_NAME),sess.graph)\n", 381 | "\n", 382 | "test_op = prepare_test_graph(secret_tensor,cover_tensor)\n", 383 | "\n", 384 | "covered_tensor = tf.placeholder(shape=[None,224,224,3],dtype=tf.float32,name=\"deploy_covered\")\n", 385 | "deploy_hide_image_op , deploy_reveal_image_op = prepare_deployment_graph(secret_tensor,cover_tensor,covered_tensor)" 386 | ] 387 | }, 388 | { 389 | "cell_type": "code", 390 | "execution_count": 11, 391 | "metadata": {}, 392 | "outputs": [], 393 | "source": [ 394 | "saver = tf.train.Saver(max_to_keep=1)\n", 395 | "sess.run(tf.global_variables_initializer())\n", 396 | "# saver.restore(sess,join(CHECKPOINTS_PATH,EXP_NAME))" 397 | ] 398 | }, 399 | { 400 | "cell_type": "code", 401 | "execution_count": null, 402 | "metadata": {}, 403 | "outputs": [], 404 | "source": [ 405 | "total_steps = len(files_list)//BATCH_SIZE + 1" 406 | ] 407 | }, 408 | { 409 | "cell_type": "code", 410 | "execution_count": null, 411 | "metadata": {}, 412 | "outputs": [], 413 | "source": [ 414 | "for ep in range(100):\n", 415 | " for step in range(total_steps):\n", 416 | " covers,secrets = get_img_batch(files_list=files_list,batch_size=BATCH_SIZE)\n", 417 | " sess.run([train_op],feed_dict={\"input_prep:0\":secrets,\"input_hide:0\":covers})\n", 418 | " \n", 419 | " if step % 10 ==0 :\n", 420 | " \n", 421 | " summary,global_step = sess.run([summary_op,global_step_tensor],feed_dict={\"input_prep:0\":secrets,\"input_hide:0\":covers})\n", 422 | " writer.add_summary(summary,global_step)\n", 423 | " \n", 424 | " if step % 100 ==0 :\n", 425 | " \n", 426 | " covers,secrets = get_img_batch(files_list=files_list,batch_size=1)\n", 427 | " summary,global_step = sess.run([test_op,global_step_tensor],feed_dict={\"input_prep:0\":secrets,\"input_hide:0\":covers})\n", 428 | " writer.add_summary(summary,global_step)\n", 429 | "\n", 430 | " \n", 431 | " save_path = saver.save(sess, join(CHECKPOINTS_PATH,EXP_NAME+\".chkp\"),global_step=global_step)\n" 432 | ] 433 | }, 434 | { 435 | "cell_type": "code", 436 | "execution_count": null, 437 | "metadata": {}, 438 | "outputs": [], 439 | "source": [ 440 | "# sess.close()\n", 441 | "\n", 442 | "writer.close()" 443 | ] 444 | }, 445 | { 446 | "cell_type": "code", 447 | "execution_count": null, 448 | "metadata": {}, 449 | "outputs": [], 450 | "source": [ 451 | "covers,secrets = get_img_batch(files_list=files_list,batch_size=1)\n", 452 | "\n", 453 | "cover = covers.squeeze()\n", 454 | "secret = secrets.squeeze()\n", 455 | "plt.imshow(denormalize_batch(cover))\n", 456 | "plt.show()\n", 457 | "plt.imshow(denormalize_batch(secret))\n", 458 | "plt.show()" 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": null, 464 | "metadata": {}, 465 | "outputs": [], 466 | "source": [ 467 | "hidden = sess.run(deploy_hide_image_op,feed_dict={'input_prep:0':secrets,'input_hide:0':covers})\n", 468 | "\n", 469 | "plt.imshow(denormalize_batch(hidden.squeeze()))" 470 | ] 471 | }, 472 | { 473 | "cell_type": "code", 474 | "execution_count": null, 475 | "metadata": {}, 476 | "outputs": [], 477 | "source": [ 478 | "revealed = sess.run(deploy_reveal_image_op,feed_dict={'deploy_covered:0':hidden})\n", 479 | "\n", 480 | "plt.imshow(denormalize_batch(revealed.squeeze()))" 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": null, 486 | "metadata": {}, 487 | "outputs": [], 488 | "source": [ 489 | "plt.imshow(np.clip(hiding_output.squeeze(),0,1))" 490 | ] 491 | }, 492 | { 493 | "cell_type": "code", 494 | "execution_count": null, 495 | "metadata": {}, 496 | "outputs": [], 497 | "source": [ 498 | "hiding_network_output = sess.run([hiding_output_op],\n", 499 | " feed_dict={secret_tensor:secrets,cover_tensor:covers})[0]" 500 | ] 501 | }, 502 | { 503 | "cell_type": "code", 504 | "execution_count": null, 505 | "metadata": {}, 506 | "outputs": [], 507 | "source": [ 508 | "# plt.imshow(np.clip(hiding_network_output[0],0,1))" 509 | ] 510 | }, 511 | { 512 | "cell_type": "code", 513 | "execution_count": null, 514 | "metadata": {}, 515 | "outputs": [], 516 | "source": [ 517 | "# join(\"OK\",\".OK\",\".OK\")" 518 | ] 519 | }, 520 | { 521 | "cell_type": "code", 522 | "execution_count": null, 523 | "metadata": {}, 524 | "outputs": [], 525 | "source": [ 526 | "# covers,secrets = get_img_batch(files_list=files_list,batch_size=BATCH_SIZE)\n", 527 | "\n", 528 | "# type(secrets)" 529 | ] 530 | }, 531 | { 532 | "cell_type": "code", 533 | "execution_count": null, 534 | "metadata": {}, 535 | "outputs": [], 536 | "source": [ 537 | "# files_list" 538 | ] 539 | }, 540 | { 541 | "cell_type": "code", 542 | "execution_count": null, 543 | "metadata": {}, 544 | "outputs": [], 545 | "source": [ 546 | "# image_str = tf.placeholder(tf.string)\n", 547 | "# im_tf = tf.image.decode_image(image_str)" 548 | ] 549 | }, 550 | { 551 | "cell_type": "code", 552 | "execution_count": null, 553 | "metadata": {}, 554 | "outputs": [], 555 | "source": [ 556 | "# cover_imgs = []\n", 557 | "# hidden_imgs = []\n", 558 | "# hidden_noisy = []\n", 559 | "# reveal_imgs = []\n", 560 | "# secret_imgs = []\n", 561 | "\n", 562 | "\n", 563 | "# count = 0\n", 564 | "# for e in tf.train.summary_iterator(join(LOGS_Path,'beta_0.25','events.out.tfevents.1516061354.pcvirus')):\n", 565 | "# for v in e.summary.value:\n", 566 | "# if v.tag == 'train/train/cover/image':\n", 567 | "# output = im_tf.eval(feed_dict={image_str:v.image.encoded_image_string})\n", 568 | "# cover_imgs.append(output)\n", 569 | " \n", 570 | "# if v.tag == 'train/train/hidden/image':\n", 571 | "# output = im_tf.eval(feed_dict={image_str:v.image.encoded_image_string})\n", 572 | "# hidden_imgs.append(output)\n", 573 | " \n", 574 | "# if v.tag == 'train/train/hidden_noisy/image':\n", 575 | "# output = im_tf.eval(feed_dict={image_str:v.image.encoded_image_string})\n", 576 | "# hidden_noisy.append(output)\n", 577 | " \n", 578 | "# if v.tag == 'train/train/revealed/image':\n", 579 | "# output = im_tf.eval(feed_dict={image_str:v.image.encoded_image_string})\n", 580 | "# reveal_imgs.append(output)\n", 581 | " \n", 582 | "# if v.tag == 'train/train/secret/image':\n", 583 | "# output = im_tf.eval(feed_dict={image_str:v.image.encoded_image_string})\n", 584 | "# secret_imgs.append(output)\n", 585 | " " 586 | ] 587 | }, 588 | { 589 | "cell_type": "code", 590 | "execution_count": null, 591 | "metadata": {}, 592 | "outputs": [], 593 | "source": [] 594 | } 595 | ], 596 | "metadata": { 597 | "kernelspec": { 598 | "display_name": "Python 3", 599 | "language": "python", 600 | "name": "python3" 601 | }, 602 | "language_info": { 603 | "codemirror_mode": { 604 | "name": "ipython", 605 | "version": 3 606 | }, 607 | "file_extension": ".py", 608 | "mimetype": "text/x-python", 609 | "name": "python", 610 | "nbconvert_exporter": "python", 611 | "pygments_lexer": "ipython3", 612 | "version": "3.6.3" 613 | } 614 | }, 615 | "nbformat": 4, 616 | "nbformat_minor": 2 617 | } 618 | --------------------------------------------------------------------------------