├── .gitignore ├── .ipynb_checkpoints └── retrieval_example-checkpoint.ipynb ├── README.md ├── data_loader.py ├── data_loader.pyc ├── main.py ├── models.py ├── models.pyc ├── retrieval_example.ipynb ├── trainer.py └── trainer.pyc /.gitignore: -------------------------------------------------------------------------------- 1 | # data 2 | data 3 | dataset 4 | *.tgz 5 | *.zip 6 | *.tar.gz 7 | *.pkl 8 | *.npy 9 | 10 | # e.t.c 11 | checkpoints 12 | *.swp 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepFashion-Tensorflow 2 | 3 | This repository contains implementation of DeepFashion[]. But tensorflow doesn't support ROI pooling/Landmark pooling which is most important part of this paper. 4 | 5 | If updated version support this pooling layer, will be updated repository also. 6 | before the supporting, This repo only compute category and attribute loss. 7 | To compute triplet loss, we need relevant and non-relevant set but no exists here. Relevant/Non-relevant set arbitrarily extracted random samples from the same/other class. 8 | 9 | Currently this repo only predict categories range in 1 to 50. Top1 acc is about 25%, top5 is about 58% on validation(perform on test is poor, why?). I will found some way to improve and to reproduce original one. 10 | After applying triplet loss, still no improvement yet. 11 | 12 | ----------------------------------------------------------------------------------- 13 | 14 | There is data imbalance problem, so the results are not expected outputs. 15 | Currently i am working to fix this problem. Some categories dominates over few sample categories. 16 | -------------------------------------------------------------------------------- /data_loader.py: -------------------------------------------------------------------------------- 1 | import os, random 2 | import numpy as np 3 | import tensorflow as tf 4 | import pickle 5 | from collections import defaultdict 6 | 7 | def read_and_decode(root, input_queue): 8 | image = tf.read_file(input_queue[0]) 9 | example = tf.image.decode_png(image, channels = 3) 10 | category = input_queue[1] 11 | attr = input_queue[2] 12 | return example, category, attr 13 | 14 | def queue_ready(files, categories, attrs, pos, neg, nepochs = 10): 15 | f = tf.convert_to_tensor(files, dtype=tf.string) 16 | c = tf.convert_to_tensor(categories, dtype=tf.int32) 17 | a = tf.convert_to_tensor(attrs, dtype=tf.int32) 18 | p = tf.convert_to_tensor(pos, dtype=tf.string) 19 | n = tf.convert_to_tensor(neg, dtype=tf.string) 20 | return tf.train.slice_input_producer([f, c, a, p, n], num_epochs = nepochs) 21 | 22 | class Data_loader(object): 23 | def __init__(self, root, cate_path, attr_path, partition_path, 24 | batch_size, scale_size, pkl_path = './data/fashion.pkl'): 25 | 26 | self.batch_size = batch_size 27 | self.scale_size = scale_size 28 | 29 | self.get_data(root, cate_path, attr_path, partition_path, 30 | batch_size, pkl_path) 31 | 32 | def get_queue(self, min_queue_examples, split = 'train', nepochs = 10, shuffle = True): 33 | split = split.lower() 34 | _queue_ready = queue_ready(self._d[split + '_files'], 35 | self._d[split + '_category'], self._d[split + '_attr'], self._d['pos'], self._d['neg'], nepochs = nepochs) 36 | 37 | content = tf.read_file(_queue_ready[0]) 38 | image = tf.image.decode_png(content, channels = 3) 39 | category = _queue_ready[1] 40 | attr = _queue_ready[2] 41 | pos_ex = tf.read_file(_queue_ready[3]) 42 | neg_ex = tf.read_file(_queue_ready[4]) 43 | pos_image = tf.image.decode_png(pos_ex, channels = 3) 44 | neg_image = tf.image.decode_png(neg_ex, channels = 3) 45 | 46 | resized_image = tf.image.resize_images(image, [self.scale_size, self.scale_size]) 47 | pos_resized = tf.image.resize_images(pos_image, [self.scale_size, self.scale_size]) 48 | neg_resized = tf.image.resize_images(neg_image, [self.scale_size, self.scale_size]) 49 | # convert from [0, 255] to [-0.5, 0.5] float. 50 | resized_image = tf.cast(resized_image, tf.float32) * (1. / 255) - 0.5 51 | 52 | num_preprocess_threads = 4 53 | 54 | if not shuffle: 55 | feature, cate, att = tf.train.batch([resized_image, category, attr], 56 | batch_size = self.batch_size, 57 | num_threads = num_preprocess_threads, 58 | capacity = min_queue_examples + 10 * self.batch_size, 59 | allow_smaller_final_batch = True) 60 | return feature, cate, att 61 | else: 62 | feature, pos_feature, neg_feature, cate, att = tf.train.shuffle_batch([resized_image, pos_resized, neg_resized, category, attr], 63 | batch_size = self.batch_size, 64 | num_threads = num_preprocess_threads, 65 | capacity = min_queue_examples + 10 * self.batch_size, 66 | min_after_dequeue = min_queue_examples) 67 | return feature, pos_feature, neg_feature, cate, att 68 | 69 | def get_data(self, root, cate_path, attr_path, partition_path, 70 | batch_size, scale_size, pkl_path = './data/fashion.pkl'): 71 | 72 | if pkl_path != None and os.path.exists(pkl_path): 73 | f = open(pkl_path, 'rb') 74 | self._d = pickle.load(f) 75 | f.close() 76 | print(" [*] Number of train files: %d"%len(self._d['train_files'])) 77 | print(" [*] Number of val files: %d"%len(self._d['val_files'])) 78 | print(" [*] Number of test files: %d"%len(self._d['test_files'])) 79 | print(" [*] file loaded at %s"%pkl_path) 80 | else: 81 | print(" [*] No saved data: %s"%pkl_path) 82 | list_category_img = open(root + cate_path).readlines() 83 | list_attr_img = open(root + attr_path).readlines() 84 | 85 | train_val_test_idx = open(root + partition_path).readlines() 86 | 87 | train_files, val_files, test_files = [], [], [] 88 | train_category, val_category, test_category = [], [], [] 89 | train_attr, val_attr, test_attr = [], [], [] 90 | 91 | category_files = defaultdict(list) 92 | 93 | for idx in range(2, len(train_val_test_idx)): 94 | path, split = train_val_test_idx[idx].split() 95 | category = list_category_img[idx].split()[1] 96 | attr = list_attr_img[idx].split()[1:] 97 | path = root + path[0].upper() + path[1:] 98 | attr = [n if n != -1 else 0 for n in attr] 99 | if split.lower() == 'train': 100 | train_files.append(path) 101 | train_category.append(category) 102 | category_files[int(category)].append(path) 103 | train_attr.append(attr) 104 | elif split.lower() == 'val': 105 | val_files.append(path) 106 | val_category.append(category) 107 | val_attr.append(attr) 108 | else: 109 | test_files.append(path) 110 | test_category.append(category) 111 | test_attr.append(attr) 112 | 113 | train_N, val_N, test_N = 50000, 10000, 10000 114 | train_indices, val_indices, test_indices = np.arange(len(train_files)), np.arange(len(val_files)), np.arange(len(test_files)) 115 | random.shuffle(train_indices) 116 | random.shuffle(val_indices) 117 | random.shuffle(test_indices) 118 | train_indices = train_indices[:train_N] 119 | val_indices = val_indices[:val_N] 120 | test_indices = test_indices[:test_N] 121 | 122 | train_files = np.array(train_files)[train_indices] 123 | val_files = np.array(val_files)[val_indices] 124 | test_files = np.array(test_files)[test_indices] 125 | train_category = np.array(train_category, dtype = np.int32)[train_indices] 126 | train_pos, train_neg = [], [] 127 | for idx in range(train_N): 128 | pos_idx = np.random.randint(len(np.where(train_category == train_category[idx]))) 129 | neg_idx = np.random.randint(len(np.where(train_category != train_category[idx]))) 130 | train_pos.append(train_files[pos_idx]) 131 | train_neg.append(train_files[neg_idx]) 132 | 133 | val_category = np.array(val_category, dtype = np.int32)[val_indices] 134 | test_category = np.array(test_category, dtype = np.int32)[test_indices] 135 | train_attr = np.array(train_attr, dtype = np.int32)[train_indices] 136 | val_attr = np.array(val_attr, dtype = np.int32)[val_indices] 137 | test_attr = np.array(test_attr, dtype = np.int32)[test_indices] 138 | 139 | train_pos, train_neg = np.array(train_pos), np.array(train_neg) 140 | 141 | print(" [*] Number of train files: %d"%len(train_files)) 142 | print(" [*] Number of val files: %d"%len(val_files)) 143 | print(" [*] Number of test files: %d"%len(test_files)) 144 | 145 | self._d = {'train_files': train_files, 146 | 'val_files': val_files, 147 | 'test_files': test_files, 148 | 'train_category': train_category, 149 | 'val_category': val_category, 150 | 'test_category': test_category, 151 | 'train_attr': train_attr, 152 | 'val_attr': val_attr, 153 | 'test_attr': test_attr, 154 | 'pos': train_pos, 155 | 'neg': train_neg 156 | } 157 | 158 | f = open(pkl_path, 'wb') 159 | pickle.dump(self._d, f) 160 | f.close() 161 | print(" [*] filed saved at %s"%pkl_path) 162 | -------------------------------------------------------------------------------- /data_loader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jeong-tae/DeepFashion-Tensorflow/bcc66ef3143b20fa2019147e8275d56a52bf4d36/data_loader.pyc -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from trainer import Trainer 2 | from models import closest_l2_distance 3 | import tensorflow as tf 4 | import pickle, os 5 | import numpy as np 6 | 7 | batch_size = 25 8 | image_size = 224 9 | lr = 0.00001 10 | epoch = 5 11 | checkpoint_dir = './data/' 12 | 13 | flags = tf.app.flags 14 | flags.DEFINE_boolean("feature_learning", True, "True, if you want to train feature extractor, otherwise False") 15 | flags.DEFINE_boolean("retrieval", False, "True, if you want to search similar images by query image") 16 | flags.DEFINE_boolean("fine_tune", True, "True, if you want to train from existing model") 17 | FLAGS = flags.FLAGS 18 | 19 | def main(_): 20 | 21 | if FLAGS.feature_learning == True: 22 | train_module = Trainer(batch_size, image_size, lr, epoch) 23 | 24 | train_module.build_model() 25 | 26 | train_module.saver = tf.train.Saver() 27 | ckpt = tf.train.get_checkpoint_state(checkpoint_dir) 28 | if ckpt and FLAGS.fine_tune: 29 | train_module.saver.restore(train_module.sess, ckpt) 30 | else: 31 | print(" [!] Not found checkpoint") 32 | 33 | train_module.sess.run([tf.global_variables_initializer(), 34 | tf.local_variables_initializer()]) 35 | coord = tf.train.Coordinator() 36 | tf.train.start_queue_runners(sess = train_module.sess, coord = coord) 37 | 38 | train_module.train() 39 | 40 | train_module.test(epoch) 41 | tf.reset_default_graph() 42 | train_module.sess.close() 43 | 44 | if FLAGS.retrieval == True: 45 | 46 | demo_module = Trainer(batch_size, image_size, 0., 1) 47 | demo_module.build_model() 48 | 49 | demo_module.saver = tf.train.Saver() 50 | ckpt = tf.train.latest_checkpoint(checkpoint_dir) 51 | if ckpt: 52 | demo_module.saver.restore(demo_module.sess, ckpt) 53 | print(" [*] Parameter restored from %s"%ckpt) 54 | else: 55 | print(" [!] Not found checkpoint, process terminates") 56 | return -1 57 | 58 | demo_module.sess.run([tf.global_variables_initializer(), 59 | tf.local_variables_initializer()]) 60 | 61 | coord = tf.train.Coordinator() 62 | tf.train.start_queue_runners(sess = demo_module.sess, coord = coord) 63 | from scipy import misc 64 | 65 | if os.path.exists("./data/retrieval.pkl"): 66 | fin = open('./data/retrieval.pkl', 'rb') 67 | feature_and_paths = pickle.load(fin) 68 | fin.close() 69 | else: 70 | test_files = demo_module.d_loader._d['test_files'] 71 | 72 | feature_and_paths = [] 73 | for f in test_files: 74 | img = misc.imread(f) 75 | img = misc.imresize(img, (image_size, image_size)) * (1. / 255) - 0.5 76 | 77 | feature = demo_module.demo(test_img = img) 78 | feature_and_paths.append( (feature, f) ) 79 | 80 | fout = open("./data/retrieval.pkl", "wb") 81 | pickle.dump(feature_and_paths, fout) 82 | fout.close() 83 | print(" [*] retrieval data saved at ./data/retrieval.pkl") 84 | print(" [*] retrieval ready") 85 | 86 | val_files = demo_module.d_loader._d['test_files'] 87 | index = np.random.randint(10000, size = 1) 88 | 89 | f = val_files[index[0]] 90 | print(" example: %s"%f) 91 | img = misc.imread(f) 92 | img = misc.imresize(img, (image_size, image_size)) * (1. / 255) - 0.5 93 | feature = demo_module.demo(test_img = img) 94 | 95 | paths = closest_l2_distance(feature_and_paths, feature) 96 | print(paths) 97 | 98 | 99 | if __name__ == '__main__': 100 | tf.app.run() 101 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from numpy import linalg as LA 4 | 5 | slim = tf.contrib.slim 6 | 7 | def vgg_16(inputs, num_cate = 50, num_attr = 1000, dropout_keep_prob = 0.5, 8 | spatial_squeeze = True, scope = 'vgg_16', padding = 'VALID'): 9 | 10 | """ 11 | inputs: a tensor of size [batch_size, height, width, channels]. 12 | dropout_keep_prob: 13 | """ 14 | 15 | with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc: 16 | end_points_collection = sc.name + '_end_points' 17 | with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], 18 | outputs_collections = end_points_collection): 19 | net = slim.repeat(inputs, 2, slim.conv2d, 16, [3, 3], scope = 'conv1') 20 | net = slim.max_pool2d(net, [2, 2], scope = 'pool1') 21 | net = slim.repeat(net, 2, slim.conv2d, 32, [3, 3], scope = 'conv2') 22 | net = slim.max_pool2d(net, [2, 2], scope = 'pool2') 23 | net = slim.repeat(net, 3, slim.conv2d, 64, [3, 3], scope = 'conv3') 24 | net = slim.max_pool2d(net, [2, 2], scope = 'pool3') 25 | net = slim.repeat(net, 3, slim.conv2d, 128, [3, 3], scope = 'conv4') 26 | net = slim.max_pool2d(net, [2, 2], scope = 'pool4') 27 | net = slim.repeat(net, 3, slim.conv2d, 128, [3, 3], scope = 'conv5') 28 | net = slim.max_pool2d(net, [2, 2], scope = 'pool5') 29 | 30 | net = slim.conv2d(net, 256, [7, 7], padding = padding, scope = 'fc6') 31 | net = slim.dropout(net, dropout_keep_prob, scope='dropout6') 32 | net = slim.conv2d(net, 128, [1, 1], scope='fc7') 33 | net = slim.dropout(net, dropout_keep_prob, scope='dropout7') 34 | net1 = slim.conv2d(net, num_cate, [1, 1], 35 | activation_fn = None, normalizer_fn = None, scope = 'fc8-c') 36 | net2 = slim.conv2d(net, num_attr, [1, 1], 37 | activation_fn = None, normalizer_fn = None, scope = 'fc8-a') 38 | 39 | end_points = slim.utils.convert_collection_to_dict(end_points_collection) 40 | if spatial_squeeze: 41 | net1 = tf.squeeze(net1, [1, 2], name = 'fc8-c/squeezed') 42 | net2 = tf.squeeze(net2, [1, 2], name = 'fc8-a/squeezed') 43 | 44 | end_points[sc.name + '/fc8-c'] = net1 45 | end_points[sc.name + '/fc8-a'] = net2 46 | return net1, net2, end_points 47 | vgg_16.default_image_size = 224 48 | 49 | def closest_l2_distance(feature_and_path, query): 50 | 51 | _dists = [] 52 | for _tuple in feature_and_path: 53 | feature, path = _tuple 54 | dist = LA.norm(feature - query) 55 | _dists.append(dist) 56 | 57 | closests = np.argsort(_dists)[:5] 58 | 59 | paths = [feature_and_path[idx][1] for idx in closests] 60 | return paths 61 | -------------------------------------------------------------------------------- /models.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jeong-tae/DeepFashion-Tensorflow/bcc66ef3143b20fa2019147e8275d56a52bf4d36/models.pyc -------------------------------------------------------------------------------- /trainer.py: -------------------------------------------------------------------------------- 1 | from models import vgg_16 2 | from data_loader import Data_loader 3 | import tensorflow as tf 4 | import numpy as np 5 | import os 6 | 7 | root = './dataset/DeepFashion/' 8 | cate_path = 'Anno/list_category_img.txt' 9 | attr_path = 'Anno/list_attr_img.txt' 10 | partition_path = 'Eval/list_eval_partition.txt' 11 | 12 | class Trainer(object): 13 | def __init__(self, batch_size, image_size, lr, epoch): 14 | 15 | self.input_images = tf.placeholder(tf.float32, [None, image_size, image_size, 3]) 16 | self.pos_images = tf.placeholder(tf.float32, [None, image_size, image_size, 3]) 17 | self.neg_images = tf.placeholder(tf.float32, [None, image_size, image_size, 3]) 18 | # sparse label 19 | self.input_cate = tf.placeholder(tf.int32, [None]) 20 | self.input_attr = tf.placeholder(tf.float32, [None, 1000]) 21 | self.num_cate = 50 22 | self.num_attr = 1000 23 | self.dropout_keep_prob = tf.placeholder(tf.float32) 24 | self.g_step = tf.Variable(0) 25 | self.lr = tf.train.exponential_decay(lr, self.g_step, 50000, 0.98) 26 | self.batch_size = batch_size 27 | self.image_size = image_size 28 | self.max_epoch = epoch 29 | self.d_loader = Data_loader(root, cate_path, attr_path, partition_path, self.batch_size, image_size) 30 | 31 | self.trainX, self.train_pos, self.train_neg, self.trainY1, self.trainY2 = self.d_loader.get_queue(1000, 'train', epoch, True) 32 | self.valX, self.valY1, self.valY2 = self.d_loader.get_queue(1000, 'val', None, False) 33 | self.testX, self.testY1, self.testY2 = self.d_loader.get_queue(1000, 'test', None, False) 34 | 35 | def build_model(self): 36 | 37 | pred_triple, pred_attr, self.end_points = vgg_16(tf.concat([self.input_images, self.pos_images, self.neg_images], 0), 38 | num_cate = self.num_cate, num_attr = self.num_attr, 39 | dropout_keep_prob = self.dropout_keep_prob) 40 | self.pred_cate, pos_cate, neg_cate = tf.split(pred_triple, 3) 41 | self.pred_attr, _, _ = tf.split(pred_attr, 3) 42 | 43 | self.cate_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = self.input_cate, logits = self.pred_cate)) 44 | self.attr_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.input_attr, logits = self.pred_attr)) 45 | pos_dist = tf.sqrt(tf.reduce_sum(tf.pow(self.pred_cate - pos_cate, 2), reduction_indices = 1)) 46 | neg_dist = tf.sqrt(tf.reduce_sum(tf.pow(self.pred_cate - neg_cate, 2), reduction_indices = 1)) 47 | # deprecated cost 48 | #triplet_cost = tf.reduce_mean(pos_dist - neg_dist) 49 | #triplet_cost = tf.clip_by_value(triplet_cost, -40., 40.) 50 | self.attr_loss = tf.clip_by_value(self.attr_loss, 0., 40.) 51 | # 0.5 for margin 52 | self.triplet_loss = tf.maximum(0.0, 0.5 + tf.reduce_mean(pos_dist - neg_dist)) 53 | 54 | self.loss = self.cate_loss + self.triplet_loss + self.attr_loss 55 | optimizer = tf.train.AdamOptimizer(self.lr) 56 | self.train_op = optimizer.minimize(self.loss, global_step = self.g_step) 57 | 58 | self.sess = tf.Session() 59 | 60 | print(" [*] model ready") 61 | 62 | def top_k_acc(self, preds, sparse_labels, k): 63 | 64 | argsorted = np.argsort(-preds, 1)[:, :k] 65 | acc = 100.0 * np.sum([np.any(argsorted[i] == sparse_labels[i]) for i in range(preds.shape[0])]) / preds.shape[0] 66 | return acc 67 | 68 | def train(self): 69 | 70 | print(" [*] train start") 71 | prev_loss = 999. 72 | counting = 0. 73 | for i in range(self.max_epoch): 74 | batch_len = int(50000. / self.batch_size) 75 | for step in range(batch_len): 76 | batch_img, batch_pos, batch_neg, batch_cate, batch_attr = self.sess.run([self.trainX, 77 | self.train_pos, self.train_neg, self.trainY1, self.trainY2]) 78 | _, c_preds, loss, g_step = self.sess.run([self.train_op, self.pred_cate, self.loss, self.g_step], 79 | feed_dict = { 80 | self.input_images: batch_img, 81 | self.pos_images: batch_pos, 82 | self.neg_images: batch_neg, 83 | self.input_cate: batch_cate, 84 | self.input_attr: batch_attr, 85 | self.dropout_keep_prob: 0.5 86 | }) 87 | acc = 100.0 * np.sum(np.argmax(c_preds, 1) == batch_cate) / c_preds.shape[0] 88 | acc5 = self.top_k_acc(c_preds, batch_cate, k = 5) 89 | print("step: %d, acc: %.2f, top5 acc: %.2f, current loss: %.2f"%(batch_len*i + step, acc, acc5, loss)) 90 | 91 | if g_step % 10 == 0 or g_step == 0: 92 | val_loss = self.validation(g_step) 93 | self.test(i) 94 | if val_loss < prev_loss: 95 | self.saver.save(self.sess, os.path.join("./data/", 96 | "deepfashion.ckpt"), global_step = g_step) 97 | prev_loss = val_loss 98 | counting = 0 99 | else: 100 | counting += 1 101 | 102 | if counting > 50: 103 | print(" [*] Early stopping") 104 | break 105 | self.test(i) 106 | print(" [*] train end") 107 | 108 | 109 | def validation(self, i): 110 | 111 | val_len = int(10000. / self.batch_size) 112 | val_preds = [] 113 | val_cates = [] 114 | val_loss = [] 115 | for step in range(val_len): 116 | batch_img, batch_cate, batch_attr = self.sess.run([self.valX, self.valY1, self.valY2]) 117 | c_preds, loss = self.sess.run([self.pred_cate, self.loss], 118 | feed_dict = { 119 | self.input_images: batch_img, 120 | self.pos_images: batch_img, # dummy 121 | self.neg_images: batch_img, # dummy 122 | self.input_cate: batch_cate, 123 | self.input_attr: batch_attr, 124 | self.dropout_keep_prob: 1.0 125 | }) 126 | val_cates.append(batch_cate) 127 | val_preds.append(c_preds) 128 | val_loss.append(loss) 129 | val_preds = np.concatenate(val_preds, axis = 0) 130 | val_cates = np.concatenate(val_cates, axis = 0) 131 | acc = 100.0 * np.sum(np.argmax(val_preds, 1) == val_cates) / val_preds.shape[0] 132 | acc5 = self.top_k_acc(val_preds, val_cates, k = 5) 133 | 134 | mean_loss = sum(val_loss) / float(val_len) 135 | print("g_step: %d, validation acc: %.2f, top5 acc: %.2f, loss: %.2f"%(i, acc, acc5, mean_loss)) 136 | 137 | return mean_loss 138 | 139 | def test(self, i): 140 | 141 | test_len = int(10000. / self.batch_size) 142 | test_preds = [] 143 | test_cates = [] 144 | for step in range(test_len): 145 | batch_img, batch_cate, batch_attr = self.sess.run([self.testX, self.testY1, self.testY2]) 146 | c_preds, loss = self.sess.run([self.pred_cate, self.loss], 147 | feed_dict = { 148 | self.input_images: batch_img, 149 | self.pos_images: batch_img, # dummy 150 | self.neg_images: batch_img, # dummy 151 | self.input_cate: batch_cate, 152 | self.input_attr: batch_attr, 153 | self.dropout_keep_prob: 1.0 154 | }) 155 | test_cates.append(batch_cate) 156 | test_preds.append(c_preds) 157 | test_preds = np.concatenate(test_preds, axis = 0) 158 | test_cates = np.concatenate(test_cates, axis = 0) 159 | acc = 100.0 * np.sum(np.argmax(test_preds, 1) == test_cates) / test_preds.shape[0] 160 | acc5 = self.top_k_acc(test_preds, test_cates, k = 5) 161 | print("epoch: %d, test acc: %.2f, top5 acc: %.2f"%(i, acc, acc5)) 162 | 163 | def demo(self, test_img): 164 | 165 | test_img = np.reshape(test_img, [-1, self.image_size, self.image_size, 3]) 166 | features = self.sess.run([self.end_points['vgg_16/fc7']], 167 | feed_dict = { 168 | self.input_images: test_img, 169 | self.pos_images: test_img, # dummy 170 | self.neg_images: test_img, # dummy 171 | self.dropout_keep_prob: 1.0 172 | }) 173 | 174 | return np.reshape(features, [-1, 4096]) 175 | -------------------------------------------------------------------------------- /trainer.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jeong-tae/DeepFashion-Tensorflow/bcc66ef3143b20fa2019147e8275d56a52bf4d36/trainer.pyc --------------------------------------------------------------------------------