├── README.md ├── cr_domain.data ├── data_helpers.py ├── ood_BDEK_adv_baseline.py ├── ood_BDEK_adv_cond.py ├── ood_BDEK_adv_gen.py ├── text_cnn_baseline.py ├── text_cnn_cond.py └── text_cnn_gen.py /README.md: -------------------------------------------------------------------------------- 1 | # Domain Robust Text Representation 2 | 3 | The Implementation for 4 | 5 | Li, Yitong, Timothy Baldwin and Trevor Cohn (2018) What's in a Domain? Learning Domain-Robust Text Representations Using Adversarial Training , In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics – Human Language Technologies (NAACL HLT 2018), New Orleans, USA. 6 | 7 | ## Data 8 | 9 | 1. Multi-Domain Sentiment Dataset (Blitzer et al., 2007); 10 | 2. Language identification data (Lui and Baldwin (2011)). 11 | 12 | ## Requirements 13 | 14 | - python 2.7 15 | - Tensorflow 1.1+ 16 | - numpy 17 | - scipy 18 | 19 | ## Models 20 | 21 | 1. Baseline: ood_BDEK_adv_baseline.py 22 | 2. Cond: ood_BDEK_adv_cond.py 23 | 3. Gen: ood_BDEK_adv_gen.py 24 | 25 | as described in the paper. 26 | 27 | ## running the code 28 | 29 | ### Multi-domain sentiment task 30 | ```bash 31 | python ood_BDEK_adv_*.py [parameters] 32 | ``` 33 | 34 | ### main parameters: 35 | ```text 36 | --num_train_epochs: normal training epochs w\o adversarial supervision; 37 | --num_tune_epochs: adversarial learning epochs; 38 | --adv_lambda: hyper-parameter lambda for adversarial loss. 39 | ``` 40 | 41 | ### examples: 42 | Baseline w\o adversarial: 43 | ```bash 44 | python ood_BDEK_adv_baseline.py --num_train_epochs 50 45 | ``` 46 | Baseline w adversarial loss: 47 | ```bash 48 | python ood_BDEK_adv_baseline.py --num_tune_epochs 50 --adv_lambda 1e-3 49 | ``` 50 | Gen model with pre-train: 51 | ```bash 52 | python ood_BDEK_adv_gen.py --num_train_epochs 50 --num_tune_epochs 50 53 | ``` 54 | -------------------------------------------------------------------------------- /data_helpers.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import re 3 | import itertools 4 | from collections import Counter 5 | 6 | 7 | def clean_str(string): 8 | string = re.sub(r"[^A-Za-z0-9()!?\'\`]", " ", string) 9 | string = re.sub(r"\'s", " \'s", string) 10 | string = re.sub(r"\'ve", " \'ve", string) 11 | string = re.sub(r"n\'t", " n\'t", string) 12 | string = re.sub(r"\'re", " \'re", string) 13 | string = re.sub(r"\'d", " \'d", string) 14 | string = re.sub(r"\'ll", " \'ll", string) 15 | # string = re.sub(r",", " , ", string) 16 | string = re.sub(r"!", " ! ", string) 17 | string = re.sub(r"\(", " \( ", string) 18 | string = re.sub(r"\)", " \) ", string) 19 | string = re.sub(r"\?", " \? ", string) 20 | string = re.sub(r"\s{2,}", " ", string) 21 | return string.strip().lower() 22 | 23 | 24 | class VOCAB_processor(object): 25 | def __init__(self, max_leng): 26 | self.max_len = max_leng 27 | self.vocab = {"" : 0, "": 1} 28 | self.reverse_vocab = {0 : "", 1 : ""} 29 | self.vocab_size = 2 30 | self.total_unk = 0 31 | self.total_token = 0 32 | 33 | def fit(self, x, d = None): 34 | size = self.vocab_size 35 | for s in x: 36 | s = s.split(' ') 37 | for w in s: 38 | if d != None and w not in d: 39 | self.vocab[w] = 1 40 | elif self.vocab.get(w, -1) == -1: 41 | self.vocab[w] = size 42 | self.reverse_vocab[size] = w 43 | size += 1 44 | self.vocab_size = size 45 | return self.vocab_size 46 | 47 | def transform(self, x): 48 | self.total_unk = 0 49 | self.total_token = 0 50 | trans = [] 51 | for s in x: 52 | s = s.split(" ") 53 | array = [] 54 | for ind, w in enumerate(s): 55 | if ind >= self.max_len: 56 | break 57 | if w in self.vocab: 58 | array.append(self.vocab[w]) 59 | else: 60 | array.append(self.vocab[ "" ]) 61 | self.total_unk += 1 62 | self.total_token += 1 63 | array = array + [0] * ( self.max_len - len(s) ) 64 | trans.append( array ) 65 | 66 | return np.array( trans, dtype = "int32" ) 67 | 68 | 69 | 70 | def load_cr(filename): 71 | lines = list( open(filename, "r").readlines() ) 72 | lines = [ l.strip() for l in lines ] 73 | 74 | sents = [] 75 | labels = [] 76 | domains = [] 77 | for l in lines: 78 | 79 | label, sent, domain = l.split('\t') 80 | 81 | # sents.append( clean_str(sent) ) 82 | sents.append( sent ) 83 | labels.append( int(label) ) 84 | domains.append( int(domain) ) 85 | return sents, labels, domains 86 | 87 | 88 | def load_data(): 89 | sents, labels, domains = load_cr(filename = "./cr_domain_BDEK.data") 90 | print("Totally load {} data".format( len(sents) )) 91 | 92 | max_sent_length = max( [len(x.split(' ')) for x in sents] ) 93 | print("Max sentence Length: {} ".format(max_sent_length) ) 94 | if max_sent_length > 256: 95 | max_sent_length = 256 96 | print("Max sentence Length trimmed to {} ".format(max_sent_length) ) 97 | 98 | #Vocab 99 | vocab_processor = VOCAB_processor( max_sent_length ) 100 | vocab_size = vocab_processor.fit( sents ) 101 | print("Vocabulary Size: {:d}".format( vocab_size )) 102 | x = vocab_processor.transform( sents ) 103 | 104 | #labels 105 | num_label = 2 106 | y = np.zeros( (len(sents), num_label ) ) 107 | y[ np.arange(len(sents)), np.array(labels) ] = 1 108 | 109 | # 110 | # num_domains = max(domains) 111 | num_domain = 22 112 | d = np.zeros( (len(sents), num_domain ) ) 113 | d[ np.arange(len(sents)), np.array(domains) ] = 1 114 | 115 | return max_sent_length, vocab_size, num_label, num_domain, \ 116 | x, y, d 117 | 118 | 119 | 120 | class batch_iter(object): 121 | #data := list of np.darray 122 | def __init__(self, data, batch_size, is_shuffle=True): 123 | assert( len(data) > 0 ) 124 | self.data = data 125 | self.batch_size = batch_size 126 | self.data_size = len( data[0] ) 127 | assert (self.data_size >= self.batch_size) 128 | 129 | self.index = self.data_size 130 | self.is_shuffle = is_shuffle 131 | 132 | def fetch_batch(self, start, end): 133 | batch_list = [] 134 | for data in self.data: 135 | batch_list.append(data[start: end]) 136 | return batch_list 137 | 138 | def shuffle(self): 139 | shuffle_indices = np.random.permutation( np.arange(self.data_size) ) 140 | for i in range(len(self.data)): 141 | self.data[i] = (self.data[i])[shuffle_indices] 142 | 143 | def next_full_batch(self): 144 | if self.index < self.data_size - self.batch_size: 145 | self.index += self.batch_size 146 | return self.fetch_batch(self.index - self.batch_size, self.index) 147 | else: 148 | if self.is_shuffle: 149 | self.shuffle() 150 | self.index = self.batch_size 151 | return self.fetch_batch(0, self.batch_size) 152 | 153 | 154 | #this is a quick iter not for general usage 155 | class cross_validation_iter(object): 156 | 157 | def __init__(self, data, fold = 10): 158 | for i in range(1, len(data)): 159 | assert( len(data[0]) == len(data[i]) ) 160 | self.x = data[0] 161 | self.y = data[1] 162 | self.d = data[2] 163 | self.fold = self.d.shape[1] 164 | self.cv = [0, 1, 2, 3] 165 | 166 | def fetch_next(self): 167 | x_train = [ ] 168 | y_train = [ ] 169 | d_train = [ ] 170 | 171 | x_test = [ ] 172 | y_test = [ ] 173 | d_test = [ ] 174 | 175 | cv = self.cv 176 | for i in range( len(self.x) ): 177 | if np.argmax(self.d[i]) in self.cv: 178 | x_test.append(self.x[i]) 179 | y_test.append(self.y[i]) 180 | d_test.append(self.d[i]) 181 | 182 | else: 183 | x_train.append(self.x[i]) 184 | y_train.append(self.y[i]) 185 | d_train.append(self.d[i]) 186 | 187 | for i in range( len(self.cv) ): 188 | self.cv[i] = (self.cv[i] + 4) % self.fold 189 | 190 | return np.array(x_train), np.array(y_train), np.array(d_train), \ 191 | np.array(x_test), np.array(y_test), np.array(d_test) 192 | 193 | class cross_validation_indomain_iter(object): 194 | 195 | def __init__(self, data, fold = 10): 196 | for i in range(1, len(data)): 197 | assert( len(data[0]) == len(data[i]) ) 198 | self.x = data[0] 199 | self.y = data[1] 200 | self.d = data[2] 201 | self.fold = 10 202 | self.cv = 0 203 | 204 | def fetch_next(self): 205 | x_train = [ ] 206 | y_train = [ ] 207 | d_train = [ ] 208 | 209 | x_test = [ ] 210 | y_test = [ ] 211 | d_test = [ ] 212 | 213 | cv = self.cv 214 | cv_domain = [] 215 | for _ in range( (self.d).shape[1] ): 216 | cv_domain.append( 0 ) 217 | x_test.append( [ ] ) 218 | y_test.append( [ ] ) 219 | d_test.append( [ ] ) 220 | 221 | for i in range( len(self.x) ): 222 | dom = np.argmax( self.d[i] ) 223 | if cv_domain[dom] % self.fold == cv: 224 | x_test[dom].append(self.x[i]) 225 | y_test[dom].append(self.y[i]) 226 | d_test[dom].append(self.d[i]) 227 | else: 228 | x_train.append(self.x[i]) 229 | y_train.append(self.y[i]) 230 | d_train.append(self.d[i]) 231 | cv_domain[dom] += 1 232 | 233 | self.cv = (self.cv + 1) % self.fold 234 | 235 | return np.array(x_train), np.array(y_train), np.array(d_train), \ 236 | np.array(x_test), np.array(y_test), np.array(d_test) 237 | -------------------------------------------------------------------------------- /ood_BDEK_adv_baseline.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | import time 5 | import datetime 6 | # import cPickle 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | 11 | import data_helpers 12 | from text_cnn_baseline import TextCNN 13 | 14 | from tensorflow.contrib import learn 15 | 16 | # Parameters 17 | # ================================================== 18 | 19 | # Model Hyperparameters 20 | tf.flags.DEFINE_integer("embedding_dim", 300, "Dimensionality of character embedding (default: 128)") 21 | tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')") 22 | tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") 23 | tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)") 24 | tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)") 25 | tf.flags.DEFINE_float("adv_lambda", 1e-3, "Robust Regularizaion lambda (default: 1e-3)") 26 | tf.flags.DEFINE_float("learning_rate", 1e-3, "Learning rate alpha") 27 | 28 | # Training parameters 29 | tf.flags.DEFINE_integer("batch_size", 32, "Batch Size (default: 64)") 30 | tf.flags.DEFINE_integer("num_train_epochs", 50, "Number of training epochs") 31 | tf.flags.DEFINE_integer("num_tune_epochs", 50, "Number of training epochs") 32 | tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)") 33 | tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)") 34 | # Misc Parameters 35 | tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") 36 | tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") 37 | 38 | print(tf.__version__) 39 | FLAGS = tf.flags.FLAGS 40 | FLAGS.batch_size 41 | print("\nParameters:") 42 | for attr, value in sorted(FLAGS.__flags.items()): 43 | print("{}={}".format(attr.upper(), value)) 44 | print("") 45 | 46 | 47 | # Load data 48 | print("Loading data...") 49 | sent_length, vocab_size, num_label, num_domain, x, y, d = data_helpers.load_data() 50 | # Randomly shuffle data 51 | # np.random.seed(101) 52 | 53 | score_sum = [] 54 | best_score = 0 55 | 56 | with tf.Graph().as_default(): 57 | session_conf = tf.ConfigProto( 58 | allow_soft_placement=FLAGS.allow_soft_placement, 59 | log_device_placement=FLAGS.log_device_placement, 60 | intra_op_parallelism_threads=2, 61 | inter_op_parallelism_threads=4) 62 | sess = tf.Session(config=session_conf) 63 | with sess.as_default(): 64 | cnn = TextCNN( 65 | sequence_length = sent_length, 66 | num_classes = num_label, 67 | vocab_size = vocab_size, 68 | embedding_size = FLAGS.embedding_dim, 69 | filter_sizes = map(int, FLAGS.filter_sizes.split(",")), 70 | num_filters = FLAGS.num_filters, 71 | num_domains = num_domain, 72 | l2_reg_lambda=FLAGS.l2_reg_lambda, 73 | ) 74 | 75 | 76 | # Define Training procedure 77 | learning_rate = tf.placeholder(tf.float32, shape=[], name="learning_rate") 78 | adv_lambda = tf.placeholder(tf.float32, shape=[], name="adversarial_lambda") 79 | 80 | global_step = tf.Variable(0, name="global_step", trainable=False) 81 | all_var_list = tf.trainable_variables() 82 | 83 | optimizer_n = tf.train.AdamOptimizer( 84 | learning_rate = learning_rate 85 | ).minimize( 86 | cnn.y_loss, 87 | global_step=global_step 88 | ) 89 | 90 | var_d = [var for var in all_var_list if 'domain' in var.name or 'gen' in var.name] 91 | assert( len(var_d) == 4) 92 | optimizer_d = tf.train.AdamOptimizer( 93 | learning_rate = learning_rate 94 | ).minimize( 95 | adv_lambda * cnn.domain_loss, 96 | var_list=var_d 97 | ) 98 | 99 | var_g = [var for var in all_var_list if var not in var_d] 100 | optimizer_g = tf.train.AdamOptimizer( 101 | learning_rate=learning_rate 102 | ).minimize( 103 | cnn.y_loss - adv_lambda * cnn.domain_loss, 104 | var_list=var_g, 105 | global_step = global_step 106 | ) 107 | 108 | 109 | def train_batch(x_batch, y_batch, d_batch, opt, adv_lbd, lr): 110 | """ 111 | A single training step 112 | """ 113 | feed_dict = { 114 | cnn.input_x: x_batch, 115 | cnn.input_y: y_batch, 116 | cnn.input_d: d_batch, 117 | cnn.dropout_keep_prob: FLAGS.dropout_keep_prob, 118 | adv_lambda: adv_lbd, 119 | learning_rate: lr, 120 | } 121 | _, step, loss, accuracy, d_l, d_a = sess.run( 122 | [opt, global_step, cnn.y_loss, cnn.y_accuracy, cnn.domain_loss, cnn.domain_accuracy], 123 | feed_dict) 124 | 125 | def dev_batch(x_batch, y_batch, d_batch): 126 | """ 127 | Evaluates model on a dev set 128 | """ 129 | feed_dict = { 130 | cnn.input_x: x_batch, 131 | cnn.input_y: y_batch, 132 | cnn.input_d: d_batch, 133 | cnn.dropout_keep_prob: 1.0, 134 | adv_lambda: 0, 135 | } 136 | step, loss, accuracy, d_l, d_a = sess.run( 137 | [global_step, cnn.y_loss, cnn.y_accuracy, cnn.domain_loss, cnn.domain_accuracy], 138 | feed_dict) 139 | return accuracy 140 | 141 | def dev_step(x_dev, y_dev, d_dev): 142 | cor = 0. 143 | step = 512 144 | for ind in range(0, len(x_dev), step): 145 | num_ins = min(len(x_dev) - ind, step) 146 | acc = dev_batch( 147 | x_batch = x_dev[ind: ind + num_ins], 148 | y_batch = y_dev[ind: ind + num_ins], 149 | d_batch = d_dev[ind: ind + num_ins] 150 | ) 151 | cor = cor + num_ins * acc 152 | acc = cor / len( x_dev ) 153 | return acc 154 | 155 | 156 | #data_split 157 | cv_iter = data_helpers.cross_validation_iter( 158 | data=[x, y, d], 159 | ) 160 | best_scores_pre = [] 161 | best_scores = [] 162 | 163 | for _ in range(1): 164 | x_train, y_train, d_train,\ 165 | x_test_all, y_test_all, d_test_all = cv_iter.fetch_next() 166 | print("split train {} / dev {}".format(len(x_train), len(x_test_all))) 167 | x_test = [ [], [], [], [] ] 168 | y_test = [ [], [], [], [] ] 169 | d_test = [ [], [], [], [] ] 170 | for i in range( len(x_test_all) ): 171 | dom = np.argmax( d_test_all[i] ) 172 | x_test[dom].append( x_test_all[i] ) 173 | y_test[dom].append( y_test_all[i] ) 174 | d_test[dom].append( d_test_all[i] ) 175 | x_test = np.array( x_test ) 176 | y_test = np.array( y_test ) 177 | d_test = np.array( d_test ) 178 | 179 | # Initialize all variables 180 | sess.run(tf.global_variables_initializer()) 181 | # sess.run(cnn.W.assign(w2v)) 182 | best_score_pre = np.zeros( (4) ) 183 | best_score_cv = np.zeros( (4) ) 184 | data_size = len(x_train) 185 | 186 | # Generate batches 187 | train_batch_iter = data_helpers.batch_iter( 188 | data = [x_train, y_train, d_train], 189 | batch_size = FLAGS.batch_size) 190 | 191 | # pre-train 192 | for _ in range( FLAGS.num_train_epochs * data_size / FLAGS.batch_size): 193 | x_batch, y_batch, d_batch = train_batch_iter.next_full_batch() 194 | 195 | train_batch( x_batch, y_batch, d_batch, opt=optimizer_n, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 196 | 197 | current_step = tf.train.global_step(sess, global_step) 198 | 199 | if current_step % FLAGS.evaluate_every == 0: 200 | for dom in range( 4 ): 201 | acc = dev_step( x_test[dom], y_test[dom], d_test[dom] ) 202 | if acc > best_score_pre[dom]: 203 | best_score_pre[dom] = acc 204 | 205 | best_scores_pre.append(best_score_pre) 206 | 207 | # Training loop. For each batch... 208 | for _ in range( FLAGS.num_tune_epochs * data_size / FLAGS.batch_size ): 209 | x_batch, y_batch, d_batch = train_batch_iter.next_full_batch() 210 | 211 | train_batch( x_batch, y_batch, d_batch, 212 | opt=optimizer_d, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 213 | train_batch( x_batch, y_batch, d_batch, 214 | opt=optimizer_g, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 215 | 216 | current_step = tf.train.global_step(sess, global_step) 217 | if current_step % FLAGS.evaluate_every == 0: 218 | for dom in range( 4 ): 219 | acc = dev_step( x_test[dom], y_test[dom], d_test[dom] ) 220 | if acc > best_score_cv[dom]: 221 | best_score_cv[dom] = acc 222 | 223 | best_scores.append(best_score_cv) 224 | print("best phase 1 score {}".format(best_score_pre)) 225 | print("best phase 2 score {}".format(best_score_cv)) 226 | 227 | print( best_scores_pre) 228 | print( np.average( best_scores_pre) ) 229 | 230 | print( best_scores ) 231 | print( np.average(best_scores) ) 232 | -------------------------------------------------------------------------------- /ood_BDEK_adv_cond.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | import time 5 | import datetime 6 | # import cPickle 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | 11 | import data_helpers 12 | from text_cnn_cond import TextCNN 13 | 14 | from tensorflow.contrib import learn 15 | from scipy.stats import entropy 16 | 17 | # Parameters 18 | # ================================================== 19 | 20 | # Model Hyperparameters 21 | tf.flags.DEFINE_integer("embedding_dim", 300, "Dimensionality of character embedding (default: 128)") 22 | tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')") 23 | tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") 24 | tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)") 25 | tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)") 26 | tf.flags.DEFINE_float("adv_lambda", 1e-3, "Robust Regularizaion lambda (default: 1e-3)") 27 | tf.flags.DEFINE_float("learning_rate", 1e-3, "Learning rate alpha") 28 | 29 | # Training parameters 30 | tf.flags.DEFINE_integer("batch_size", 32, "Batch Size (default: 64)") 31 | tf.flags.DEFINE_integer("num_train_epochs", 50, "Number of training epochs") 32 | tf.flags.DEFINE_integer("num_tune_epochs", 50, "Number of tuning epochs") 33 | tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)") 34 | tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)") 35 | # Misc Parameters 36 | tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") 37 | tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") 38 | 39 | print(tf.__version__) 40 | FLAGS = tf.flags.FLAGS 41 | FLAGS.batch_size 42 | print("\nParameters:") 43 | for attr, value in sorted(FLAGS.__flags.items()): 44 | print("{}={}".format(attr.upper(), value)) 45 | print("") 46 | 47 | 48 | # Load data 49 | print("Loading data...") 50 | sent_length, vocab_size, num_label, num_domain, x, y, d = data_helpers.load_data() 51 | # Randomly shuffle data 52 | # np.random.seed(101) 53 | 54 | score_sum = [] 55 | best_score = 0 56 | 57 | with tf.Graph().as_default(): 58 | session_conf = tf.ConfigProto( 59 | allow_soft_placement=FLAGS.allow_soft_placement, 60 | log_device_placement=FLAGS.log_device_placement, 61 | intra_op_parallelism_threads=2, 62 | inter_op_parallelism_threads=4) 63 | sess = tf.Session(config=session_conf) 64 | with sess.as_default(): 65 | cnn = TextCNN( 66 | sequence_length = sent_length, 67 | num_classes = num_label, 68 | vocab_size = vocab_size, 69 | embedding_size = FLAGS.embedding_dim, 70 | filter_sizes = map(int, FLAGS.filter_sizes.split(",")), 71 | num_filters = FLAGS.num_filters, 72 | num_domains = num_domain, 73 | l2_reg_lambda=FLAGS.l2_reg_lambda, 74 | ) 75 | 76 | 77 | # Define Training procedure 78 | learning_rate = tf.placeholder(tf.float32, shape=[], name="learning_rate") 79 | adv_lambda = tf.placeholder(tf.float32, shape=[], name="adversarial_lambda") 80 | 81 | global_step = tf.Variable(0, name="global_step", trainable=False) 82 | all_var_list = tf.trainable_variables() 83 | 84 | optimizer_n = tf.train.AdamOptimizer( 85 | learning_rate = learning_rate 86 | ).minimize( 87 | cnn.y_loss, 88 | global_step=global_step 89 | ) 90 | 91 | var_d = [var for var in all_var_list if 'domain' in var.name] 92 | assert( len(var_d) == 4) 93 | optimizer_d = tf.train.AdamOptimizer( 94 | learning_rate = learning_rate 95 | ).minimize( 96 | adv_lambda * cnn.domain_loss, 97 | var_list=var_d 98 | ) 99 | 100 | var_g = [var for var in all_var_list if var not in var_d] 101 | optimizer_g = tf.train.AdamOptimizer( 102 | learning_rate=learning_rate 103 | ).minimize( 104 | cnn.y_loss - adv_lambda * cnn.domain_loss, 105 | var_list=var_g, 106 | global_step = global_step 107 | ) 108 | 109 | 110 | def train_batch(x_batch, y_batch, d_batch, opt, adv_lbd, lr): 111 | """ 112 | A single training step 113 | """ 114 | feed_dict = { 115 | cnn.input_x: x_batch, 116 | cnn.input_y: y_batch, 117 | cnn.input_d: d_batch, 118 | cnn.dropout_keep_prob: FLAGS.dropout_keep_prob, 119 | adv_lambda: adv_lbd, 120 | learning_rate: lr, 121 | } 122 | _, step, loss, accuracy, d_l, d_a = sess.run( 123 | [opt, global_step, cnn.y_loss, cnn.y_accuracy, cnn.domain_loss, cnn.domain_accuracy], 124 | feed_dict) 125 | 126 | 127 | def get_indice_by_min_H(matrix): 128 | index = 0 129 | min_h = entropy( matrix[0] ) 130 | for i in range(1, len(matrix) ): 131 | h = entropy( matrix[i] ) 132 | if h < min_h: 133 | min_h = h 134 | index = i 135 | return np.argmax( matrix[index] ) 136 | 137 | def dev_batch(x_batch, y_batch, d_batch): 138 | """ 139 | Evaluates model on a dev set 140 | """ 141 | # NUM_TRAINING_DOMAIN = 4 .. 20 142 | scores_list = [] 143 | for i in range(4, 20): 144 | d_i = np.zeros( ( len(x_batch), num_domain ) ) 145 | index_i = np.array( [i] * len(x_batch) ) 146 | d_i[ np.arange(len(x_batch)), index_i ] = 1 147 | 148 | feed_dict = { 149 | cnn.input_x: x_batch, 150 | cnn.input_y: y_batch, 151 | cnn.input_d: d_i, 152 | cnn.dropout_keep_prob: 1.0, 153 | adv_lambda: 0, 154 | } 155 | step, loss, scores, accuracy, d_l, d_a = sess.run( 156 | [global_step, cnn.y_loss, cnn.y_scores_softmax, cnn.y_accuracy, cnn.domain_loss, cnn.domain_accuracy], 157 | feed_dict) 158 | scores_list.append( scores ) 159 | scores_list = np.stack(scores_list, axis = 1) 160 | cor = 0. 161 | for i in range( len(x_batch) ): 162 | ind = get_indice_by_min_H( scores_list[i] ) 163 | if ind == np.argmax(y_batch[i]): 164 | cor += 1 165 | accuracy = cor / len(x_batch) 166 | 167 | return accuracy 168 | 169 | def dev_step(x_dev, y_dev, d_dev): 170 | cor = 0. 171 | step = 512 172 | for ind in range(0, len(x_dev), step): 173 | num_ins = min(len(x_dev) - ind, step) 174 | acc = dev_batch( 175 | x_batch = x_dev[ind: ind + num_ins], 176 | y_batch = y_dev[ind: ind + num_ins], 177 | d_batch = d_dev[ind: ind + num_ins] 178 | ) 179 | cor = cor + num_ins * acc 180 | acc = cor / len( x_dev ) 181 | return acc 182 | 183 | 184 | #data_split 185 | cv_iter = data_helpers.cross_validation_iter( 186 | data=[x, y, d], 187 | ) 188 | best_scores_pre = [] 189 | best_scores = [] 190 | 191 | for _ in range(1): 192 | x_train, y_train, d_train,\ 193 | x_test_all, y_test_all, d_test_all = cv_iter.fetch_next() 194 | print("split train {} / dev {}".format(len(x_train), len(x_test_all))) 195 | x_test = [ [], [], [], [] ] 196 | y_test = [ [], [], [], [] ] 197 | d_test = [ [], [], [], [] ] 198 | for i in range( len(x_test_all) ): 199 | dom = np.argmax( d_test_all[i] ) 200 | x_test[dom].append( x_test_all[i] ) 201 | y_test[dom].append( y_test_all[i] ) 202 | d_test[dom].append( d_test_all[i] ) 203 | x_test = np.array( x_test ) 204 | y_test = np.array( y_test ) 205 | d_test = np.array( d_test ) 206 | 207 | # Initialize all variables 208 | sess.run(tf.global_variables_initializer()) 209 | # sess.run(cnn.W.assign(w2v)) 210 | best_score_pre = np.zeros( (4) ) 211 | best_score_cv = np.zeros( (4) ) 212 | data_size = len(x_train) 213 | 214 | # Generate batches 215 | train_batch_iter = data_helpers.batch_iter( 216 | data = [x_train, y_train, d_train], 217 | batch_size = FLAGS.batch_size) 218 | 219 | # pre-train 220 | for _ in range( FLAGS.num_train_epochs * data_size / FLAGS.batch_size): 221 | x_batch, y_batch, d_batch = train_batch_iter.next_full_batch() 222 | 223 | train_batch( x_batch, y_batch, d_batch, opt=optimizer_n, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 224 | 225 | current_step = tf.train.global_step(sess, global_step) 226 | 227 | if current_step % FLAGS.evaluate_every == 0: 228 | for dom in range( 4 ): 229 | acc = dev_step( x_test[dom], y_test[dom], d_test[dom] ) 230 | if acc > best_score_pre[dom]: 231 | best_score_pre[dom] = acc 232 | 233 | best_scores_pre.append(best_score_pre) 234 | 235 | # Training loop. For each batch... 236 | for _ in range( FLAGS.num_tune_epochs * data_size / FLAGS.batch_size ): 237 | x_batch, y_batch, d_batch = train_batch_iter.next_full_batch() 238 | 239 | train_batch( x_batch, y_batch, d_batch, 240 | opt=optimizer_d, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 241 | train_batch( x_batch, y_batch, d_batch, 242 | opt=optimizer_g, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 243 | 244 | current_step = tf.train.global_step(sess, global_step) 245 | if current_step % FLAGS.evaluate_every == 0: 246 | for dom in range( 4 ): 247 | acc = dev_step( x_test[dom], y_test[dom], d_test[dom] ) 248 | if acc > best_score_cv[dom]: 249 | best_score_cv[dom] = acc 250 | 251 | best_scores.append(best_score_cv) 252 | print("best phase 1 score {}".format(best_score_pre)) 253 | print("best phase 2 score {}".format(best_score_cv)) 254 | 255 | print( best_scores_pre) 256 | print( np.average( best_scores_pre) ) 257 | 258 | print( best_scores ) 259 | print( np.average(best_scores) ) 260 | -------------------------------------------------------------------------------- /ood_BDEK_adv_gen.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | import time 5 | import datetime 6 | # import cPickle 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | 11 | import data_helpers 12 | from text_cnn_gen import TextCNN 13 | 14 | from tensorflow.contrib import learn 15 | 16 | # Parameters 17 | # ================================================== 18 | 19 | # Model Hyperparameters 20 | tf.flags.DEFINE_integer("embedding_dim", 300, "Dimensionality of character embedding (default: 128)") 21 | tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')") 22 | tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") 23 | tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)") 24 | tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)") 25 | tf.flags.DEFINE_float("adv_lambda", 1e-3, "Robust Regularizaion lambda (default: 1e-3)") 26 | tf.flags.DEFINE_float("learning_rate", 1e-3, "Learning rate alpha") 27 | 28 | # Training parameters 29 | tf.flags.DEFINE_integer("batch_size", 32, "Batch Size (default: 64)") 30 | tf.flags.DEFINE_integer("num_train_epochs", 100, "Number of training epochs") 31 | tf.flags.DEFINE_integer("num_tune_epochs", 100, "Number of training epochs") 32 | tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)") 33 | tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)") 34 | # Misc Parameters 35 | tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") 36 | tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") 37 | 38 | print(tf.__version__) 39 | FLAGS = tf.flags.FLAGS 40 | FLAGS.batch_size 41 | print("\nParameters:") 42 | for attr, value in sorted(FLAGS.__flags.items()): 43 | print("{}={}".format(attr.upper(), value)) 44 | print("") 45 | 46 | 47 | # Load data 48 | print("Loading data...") 49 | sent_length, vocab_size, num_label, num_domain, x, y, d = data_helpers.load_data() 50 | # Randomly shuffle data 51 | # np.random.seed(101) 52 | 53 | score_sum = [] 54 | best_score = 0 55 | 56 | with tf.Graph().as_default(): 57 | session_conf = tf.ConfigProto( 58 | allow_soft_placement=FLAGS.allow_soft_placement, 59 | log_device_placement=FLAGS.log_device_placement, 60 | intra_op_parallelism_threads=2, 61 | inter_op_parallelism_threads=4) 62 | sess = tf.Session(config=session_conf) 63 | with sess.as_default(): 64 | cnn = TextCNN( 65 | sequence_length = sent_length, 66 | num_classes = num_label, 67 | vocab_size = vocab_size, 68 | embedding_size = FLAGS.embedding_dim, 69 | filter_sizes = map(int, FLAGS.filter_sizes.split(",")), 70 | num_filters = FLAGS.num_filters, 71 | num_domains = num_domain, 72 | l2_reg_lambda=FLAGS.l2_reg_lambda, 73 | ) 74 | 75 | 76 | # Define Training procedure 77 | learning_rate = tf.placeholder(tf.float32, shape=[], name="learning_rate") 78 | adv_lambda = tf.placeholder(tf.float32, shape=[], name="adversarial_lambda") 79 | 80 | global_step = tf.Variable(0, name="global_step", trainable=False) 81 | all_var_list = tf.trainable_variables() 82 | 83 | optimizer_n = tf.train.AdamOptimizer( 84 | learning_rate = learning_rate 85 | ).minimize( 86 | cnn.y_loss, 87 | global_step=global_step 88 | ) 89 | 90 | var_d = [var for var in all_var_list if 'domain' in var.name or 'gen' in var.name] 91 | assert( len(var_d) == 8) 92 | optimizer_d = tf.train.AdamOptimizer( 93 | learning_rate = learning_rate 94 | ).minimize( 95 | adv_lambda * cnn.domain_loss + adv_lambda * cnn.gen_loss, 96 | var_list=var_d 97 | ) 98 | 99 | var_g = [var for var in all_var_list if var not in var_d] 100 | optimizer_g = tf.train.AdamOptimizer( 101 | learning_rate=learning_rate 102 | ).minimize( 103 | cnn.y_loss + adv_lambda * cnn.gen_loss - adv_lambda * cnn.domain_loss, 104 | var_list=var_g, 105 | global_step = global_step 106 | ) 107 | 108 | 109 | def train_batch(x_batch, y_batch, d_batch, opt, adv_lbd, lr): 110 | """ 111 | A single training step 112 | """ 113 | feed_dict = { 114 | cnn.input_x: x_batch, 115 | cnn.input_y: y_batch, 116 | cnn.input_d: d_batch, 117 | cnn.dropout_keep_prob: FLAGS.dropout_keep_prob, 118 | adv_lambda: adv_lbd, 119 | learning_rate: lr, 120 | } 121 | _, step, loss, accuracy, d_l, d_a = sess.run( 122 | [opt, global_step, cnn.y_loss, cnn.y_accuracy, cnn.domain_loss, cnn.domain_accuracy], 123 | feed_dict) 124 | 125 | def dev_batch(x_batch, y_batch, d_batch): 126 | """ 127 | Evaluates model on a dev set 128 | """ 129 | feed_dict = { 130 | cnn.input_x: x_batch, 131 | cnn.input_y: y_batch, 132 | cnn.input_d: d_batch, 133 | cnn.dropout_keep_prob: 1.0, 134 | adv_lambda: 0, 135 | } 136 | step, loss, accuracy, d_l, d_a = sess.run( 137 | [global_step, cnn.y_loss, cnn.y_accuracy, cnn.domain_loss, cnn.domain_accuracy], 138 | feed_dict) 139 | return accuracy 140 | 141 | def dev_step(x_dev, y_dev, d_dev): 142 | cor = 0. 143 | step = 512 144 | for ind in range(0, len(x_dev), step): 145 | num_ins = min(len(x_dev) - ind, step) 146 | acc = dev_batch( 147 | x_batch = x_dev[ind: ind + num_ins], 148 | y_batch = y_dev[ind: ind + num_ins], 149 | d_batch = d_dev[ind: ind + num_ins] 150 | ) 151 | cor = cor + num_ins * acc 152 | acc = cor / len( x_dev ) 153 | return acc 154 | 155 | 156 | #data_split 157 | cv_iter = data_helpers.cross_validation_iter( 158 | data=[x, y, d], 159 | ) 160 | best_scores_pre = [] 161 | best_scores = [] 162 | 163 | for _ in range(1): 164 | x_train, y_train, d_train,\ 165 | x_test_all, y_test_all, d_test_all = cv_iter.fetch_next() 166 | print("split train {} / dev {}".format(len(x_train), len(x_test_all))) 167 | x_test = [ [], [], [], [] ] 168 | y_test = [ [], [], [], [] ] 169 | d_test = [ [], [], [], [] ] 170 | for i in range( len(x_test_all) ): 171 | dom = np.argmax( d_test_all[i] ) 172 | x_test[dom].append( x_test_all[i] ) 173 | y_test[dom].append( y_test_all[i] ) 174 | d_test[dom].append( d_test_all[i] ) 175 | x_test = np.array( x_test ) 176 | y_test = np.array( y_test ) 177 | d_test = np.array( d_test ) 178 | 179 | 180 | # Initialize all variables 181 | sess.run(tf.global_variables_initializer()) 182 | # sess.run(cnn.W.assign(w2v)) 183 | best_score_pre = np.zeros( (4) ) 184 | best_score_cv = np.zeros( (4) ) 185 | data_size = len(x_train) 186 | 187 | # Generate batches 188 | train_batch_iter = data_helpers.batch_iter( 189 | data = [x_train, y_train, d_train], 190 | batch_size = FLAGS.batch_size) 191 | 192 | # pre-train 193 | for _ in range( FLAGS.num_train_epochs * data_size / FLAGS.batch_size): 194 | x_batch, y_batch, d_batch = train_batch_iter.next_full_batch() 195 | 196 | train_batch( x_batch, y_batch, d_batch, opt=optimizer_n, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 197 | 198 | current_step = tf.train.global_step(sess, global_step) 199 | 200 | if current_step % FLAGS.evaluate_every == 0: 201 | for dom in range( 4 ): 202 | acc = dev_step( x_test[dom], y_test[dom], d_test[dom] ) 203 | if acc > best_score_pre[dom]: 204 | best_score_pre[dom] = acc 205 | 206 | best_scores_pre.append(best_score_pre) 207 | 208 | # Training loop. For each batch... 209 | for _ in range( FLAGS.num_tune_epochs * data_size / FLAGS.batch_size ): 210 | x_batch, y_batch, d_batch = train_batch_iter.next_full_batch() 211 | 212 | train_batch( x_batch, y_batch, d_batch, 213 | opt=optimizer_d, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 214 | train_batch( x_batch, y_batch, d_batch, 215 | opt=optimizer_g, adv_lbd=FLAGS.adv_lambda, lr=FLAGS.learning_rate ) 216 | 217 | current_step = tf.train.global_step(sess, global_step) 218 | if current_step % FLAGS.evaluate_every == 0: 219 | for dom in range( 4 ): 220 | acc = dev_step( x_test[dom], y_test[dom], d_test[dom] ) 221 | if acc > best_score_cv[dom]: 222 | best_score_cv[dom] = acc 223 | 224 | best_scores.append(best_score_cv) 225 | print("best phase 1 score {}".format(best_score_pre)) 226 | print("best phase 2 score {}".format(best_score_cv)) 227 | 228 | print( best_scores_pre ) 229 | print( np.average( best_scores_pre) ) 230 | 231 | print( best_scores ) 232 | print( np.average(best_scores) ) 233 | -------------------------------------------------------------------------------- /text_cnn_baseline.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | 5 | class TextCNN(object): 6 | 7 | def cnn(self, scope_number, embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters): 8 | with tf.variable_scope("cnn%s" % scope_number): 9 | # Create a convolution + maxpool layer for each filter size 10 | pooled_outputs = [] 11 | for i, filter_size in enumerate(filter_sizes): 12 | with tf.variable_scope("conv-maxpool-%s" % filter_size): 13 | # Convolution Layer 14 | filter_shape = [filter_size, embedding_size, 1, num_filters] 15 | W = tf.get_variable( 16 | name="W", 17 | shape=filter_shape, 18 | initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1) 19 | ) 20 | b = tf.get_variable( 21 | name="b", 22 | shape=[num_filters], 23 | initializer=tf.constant_initializer(0.1) 24 | ) 25 | conv = tf.nn.conv2d( 26 | embedded_chars_expanded, 27 | W, 28 | strides=[1, 1, 1, 1], 29 | padding="VALID", 30 | name="conv") 31 | # Apply nonlinearity 32 | h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") 33 | # Maxpooling over the outputs 34 | pooled = tf.nn.max_pool( 35 | h, 36 | ksize=[1, sequence_length - filter_size + 1, 1, 1], 37 | strides=[1, 1, 1, 1], 38 | padding='VALID', 39 | name="pool") 40 | pooled_outputs.append(pooled) 41 | 42 | pooled = tf.concat(pooled_outputs, 3) 43 | return tf.reshape(pooled, [-1, num_filters * len(filter_sizes)]) 44 | 45 | 46 | def wx_plus_b(self, scope_name, x, size): 47 | with tf.variable_scope("full_connect_%s" % scope_name) as scope: 48 | W = tf.get_variable( 49 | name="W", 50 | shape=size, 51 | initializer=tf.contrib.layers.xavier_initializer()) 52 | b = tf.get_variable( 53 | name="b", 54 | shape=[size[1]], 55 | initializer=tf.constant_initializer(0.1, ) 56 | ) 57 | y = tf.nn.xw_plus_b(x, W, b, name="hidden") 58 | return y 59 | 60 | #main enter 61 | def __init__(self, sequence_length, num_classes, vocab_size, 62 | embedding_size, filter_sizes, num_filters, num_domains, 63 | l2_reg_lambda): 64 | 65 | self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x") 66 | self.input_y = tf.placeholder(tf.int32, [None, num_classes], name = "input_y") 67 | self.input_d = tf.placeholder(tf.int32, [None, num_domains], name = "input_d") 68 | 69 | l2_loss = tf.constant(0.0) 70 | 71 | with tf.variable_scope("embedding"): 72 | self.emb_W = tf.get_variable( 73 | name="lookup_emb", 74 | shape=[vocab_size, embedding_size], 75 | initializer=tf.random_uniform_initializer(minval=-1.0, maxval=1.0), 76 | trainable=False 77 | ) 78 | embedded_chars = tf.nn.embedding_lookup(self.emb_W, self.input_x) 79 | self.embedded_chars_expanded = tf.expand_dims(embedded_chars, -1) 80 | 81 | #cnn+pooling 82 | num_filters_total = num_filters * len(filter_sizes) 83 | #shared 84 | self.pub_h_pool = self.cnn("shared-public", self.embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters) 85 | #private 86 | # self.pri_h_pool = self.cnn("shared-private", self.embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters) 87 | 88 | #final representation 89 | self.h_pool = self.pub_h_pool 90 | input_dim = num_filters_total 91 | 92 | # Add dropout 93 | with tf.name_scope("dropout"): 94 | self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") 95 | self.h_drop = tf.nn.dropout(self.h_pool, self.dropout_keep_prob) 96 | self.pub_h_drop = tf.nn.dropout(self.pub_h_pool, self.dropout_keep_prob) 97 | # self.pri_h_drop = tf.nn.dropout(self.pri_h_pool, self.dropout_keep_prob) 98 | 99 | hidden_size = 300 100 | with tf.variable_scope("label"): 101 | h1 = self.wx_plus_b( 102 | scope_name="h1", 103 | x=self.h_drop, 104 | size=[input_dim, hidden_size] 105 | ) 106 | self.y_scores = self.wx_plus_b( 107 | scope_name='score', 108 | x = h1, 109 | size=[hidden_size, num_classes] 110 | ) 111 | # CalculateMean cross-entropy loss 112 | with tf.name_scope("loss"): 113 | losses = tf.nn.softmax_cross_entropy_with_logits( 114 | logits=self.y_scores, 115 | labels=self.input_y, 116 | ) 117 | self.y_loss = tf.reduce_mean(losses, name="task_loss") 118 | 119 | with tf.name_scope("accuracy"): 120 | self.y_pred = tf.argmax(self.y_scores, 1, name="predictions") 121 | cor_pred = tf.cast( 122 | tf.equal( self.y_pred, tf.argmax(self.input_y, 1) ), 123 | "float" 124 | ) 125 | self.y_accuracy = tf.reduce_mean( cor_pred, name="accuracy" ) 126 | 127 | 128 | with tf.variable_scope("domain"): 129 | h1 = self.wx_plus_b( 130 | scope_name="h1", 131 | x=self.pub_h_drop, 132 | size=[num_filters_total, hidden_size] 133 | ) 134 | self.domain_scores = self.wx_plus_b( 135 | scope_name="score", 136 | x=h1, 137 | size=[hidden_size, num_domains] 138 | ) 139 | with tf.name_scope("loss"): 140 | losses = tf.nn.softmax_cross_entropy_with_logits( 141 | logits=self.domain_scores, 142 | labels=self.input_d, 143 | ) 144 | self.domain_loss = tf.reduce_mean(losses) 145 | with tf.name_scope("accuracy"): 146 | self.domain_pred = tf.argmax(self.domain_scores, 1, name="predictions") 147 | cor_pred = tf.cast( 148 | tf.equal(self.domain_pred, tf.argmax(self.input_d, 1) ), 149 | "float" 150 | ) 151 | self.domain_accuracy = tf.reduce_mean(cor_pred, name="acc") 152 | 153 | 154 | # with tf.variable_scope("gen"): 155 | # h1 = self.wx_plus_b( 156 | # scope_name="h1", 157 | # x=self.pri_h_drop, 158 | # size=[num_filters_total, hidden_size] 159 | # ) 160 | # self.gen_scores = self.wx_plus_b( 161 | # scope_name="score", 162 | # x=h1, 163 | # size=[hidden_size, num_domains] 164 | # ) 165 | # with tf.name_scope("loss"): 166 | # losses = tf.nn.softmax_cross_entropy_with_logits( 167 | # logits=self.gen_scores, 168 | # labels=self.input_d, 169 | # ) 170 | # self.gen_loss = tf.reduce_mean(losses) 171 | # with tf.name_scope("accuracy"): 172 | # self.gen_pred = tf.argmax(self.gen_scores, 1, name="predictions") 173 | # cor_pred = tf.cast( 174 | # tf.equal(self.gen_pred, tf.argmax(self.input_d, 1) ), 175 | # "float" 176 | # ) 177 | # self.gen_accuracy = tf.reduce_mean(cor_pred, name="acc") 178 | -------------------------------------------------------------------------------- /text_cnn_cond.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | 5 | class TextCNN(object): 6 | 7 | def cnn(self, scope_number, embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters): 8 | with tf.variable_scope("cnn%s" % scope_number): 9 | # Create a convolution + maxpool layer for each filter size 10 | pooled_outputs = [] 11 | for i, filter_size in enumerate(filter_sizes): 12 | with tf.variable_scope("conv-maxpool-%s" % filter_size): 13 | # Convolution Layer 14 | filter_shape = [filter_size, embedding_size, 1, num_filters] 15 | W = tf.get_variable( 16 | name="W", 17 | shape=filter_shape, 18 | initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1) 19 | ) 20 | b = tf.get_variable( 21 | name="b", 22 | shape=[num_filters], 23 | initializer=tf.constant_initializer(0.1) 24 | ) 25 | conv = tf.nn.conv2d( 26 | embedded_chars_expanded, 27 | W, 28 | strides=[1, 1, 1, 1], 29 | padding="VALID", 30 | name="conv") 31 | # Apply nonlinearity 32 | h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") 33 | # Maxpooling over the outputs 34 | pooled = tf.nn.max_pool( 35 | h, 36 | ksize=[1, sequence_length - filter_size + 1, 1, 1], 37 | strides=[1, 1, 1, 1], 38 | padding='VALID', 39 | name="pool") 40 | pooled_outputs.append(pooled) 41 | 42 | pooled = tf.concat(pooled_outputs, 3) 43 | return tf.reshape(pooled, [-1, num_filters * len(filter_sizes)]) 44 | 45 | 46 | def wx_plus_b(self, scope_name, x, size): 47 | with tf.variable_scope("full_connect_%s" % scope_name) as scope: 48 | W = tf.get_variable( 49 | name="W", 50 | shape=size, 51 | initializer=tf.contrib.layers.xavier_initializer()) 52 | b = tf.get_variable( 53 | name="b", 54 | shape=[size[1]], 55 | initializer=tf.constant_initializer(0.1, ) 56 | ) 57 | y = tf.nn.xw_plus_b(x, W, b, name="hidden") 58 | return y 59 | 60 | #main enter 61 | def __init__(self, sequence_length, num_classes, vocab_size, 62 | embedding_size, filter_sizes, num_filters, num_domains, 63 | l2_reg_lambda): 64 | 65 | self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x") 66 | self.input_y = tf.placeholder(tf.int32, [None, num_classes], name = "input_y") 67 | self.input_d = tf.placeholder(tf.int32, [None, num_domains], name = "input_d") 68 | 69 | l2_loss = tf.constant(0.0) 70 | 71 | with tf.variable_scope("embedding"): 72 | self.emb_W = tf.get_variable( 73 | name="lookup_emb", 74 | shape=[vocab_size, embedding_size], 75 | initializer=tf.random_uniform_initializer(minval=-1.0, maxval=1.0), 76 | trainable=False 77 | ) 78 | embedded_chars = tf.nn.embedding_lookup(self.emb_W, self.input_x) 79 | self.embedded_chars_expanded = tf.expand_dims(embedded_chars, -1) 80 | 81 | #cnn+pooling 82 | num_filters_total = num_filters * len(filter_sizes) 83 | #shared 84 | self.pub_h_pool = self.cnn("shared", self.embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters) 85 | #private 86 | all_pri_h_pool = [] 87 | for i in range( num_domains ): 88 | all_pri_h_pool.append( 89 | self.cnn("pri-%s" % i, self.embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters) 90 | ) 91 | all_pri_h_pool = tf.stack(all_pri_h_pool, axis = 1) #[batch_size, num_domains, feature] 92 | self.pri_h_pool = tf.matmul( 93 | tf.cast( tf.expand_dims(self.input_d, axis=1), "float32"), 94 | all_pri_h_pool 95 | ) 96 | self.pri_h_pool = tf.reshape(self.pri_h_pool, shape = [-1, num_filters_total]) 97 | 98 | #final representation 99 | self.h_pool = tf.concat([self.pri_h_pool, self.pub_h_pool], axis=1) 100 | input_dim = num_filters_total * 2 101 | 102 | # Add dropout 103 | with tf.name_scope("dropout"): 104 | self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") 105 | self.h_drop = tf.nn.dropout(self.h_pool, self.dropout_keep_prob) 106 | self.pub_h_drop = tf.nn.dropout(self.pub_h_pool, self.dropout_keep_prob) 107 | 108 | hidden_size = 300 109 | with tf.variable_scope("label"): 110 | h1 = self.wx_plus_b( 111 | scope_name="h1", 112 | x=self.h_drop, 113 | size=[input_dim, hidden_size] 114 | ) 115 | self.y_scores = self.wx_plus_b( 116 | scope_name='score', 117 | x = h1, 118 | size=[hidden_size, num_classes] 119 | ) 120 | self.y_scores_softmax = tf.nn.softmax(self.y_scores) 121 | # CalculateMean cross-entropy loss 122 | with tf.name_scope("loss"): 123 | losses = tf.nn.softmax_cross_entropy_with_logits( 124 | logits=self.y_scores, 125 | labels=self.input_y, 126 | ) 127 | self.y_loss = tf.reduce_mean(losses, name="task_loss") 128 | 129 | with tf.name_scope("accuracy"): 130 | self.y_pred = tf.argmax(self.y_scores, 1, name="predictions") 131 | cor_pred = tf.cast( 132 | tf.equal( self.y_pred, tf.argmax(self.input_y, 1) ), 133 | "float" 134 | ) 135 | self.y_accuracy = tf.reduce_mean( cor_pred, name="accuracy" ) 136 | 137 | 138 | with tf.variable_scope("domain"): 139 | h1 = self.wx_plus_b( 140 | scope_name="h1", 141 | x=self.pub_h_drop, 142 | size=[num_filters_total, hidden_size] 143 | ) 144 | self.domain_scores = self.wx_plus_b( 145 | scope_name="score", 146 | x=h1, 147 | size=[hidden_size, num_domains] 148 | ) 149 | with tf.name_scope("loss"): 150 | losses = tf.nn.softmax_cross_entropy_with_logits( 151 | logits=self.domain_scores, 152 | labels=self.input_d, 153 | ) 154 | self.domain_loss = tf.reduce_mean(losses) 155 | with tf.name_scope("accuracy"): 156 | self.domain_pred = tf.argmax(self.domain_scores, 1, name="predictions") 157 | cor_pred = tf.cast( 158 | tf.equal(self.domain_pred, tf.argmax(self.input_d, 1) ), 159 | "float" 160 | ) 161 | self.domain_accuracy = tf.reduce_mean(cor_pred, name="acc") 162 | -------------------------------------------------------------------------------- /text_cnn_gen.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | 5 | class TextCNN(object): 6 | 7 | def cnn(self, scope_number, embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters): 8 | with tf.variable_scope("cnn%s" % scope_number): 9 | # Create a convolution + maxpool layer for each filter size 10 | pooled_outputs = [] 11 | for i, filter_size in enumerate(filter_sizes): 12 | with tf.variable_scope("conv-maxpool-%s" % filter_size): 13 | # Convolution Layer 14 | filter_shape = [filter_size, embedding_size, 1, num_filters] 15 | W = tf.get_variable( 16 | name="W", 17 | shape=filter_shape, 18 | initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1) 19 | ) 20 | b = tf.get_variable( 21 | name="b", 22 | shape=[num_filters], 23 | initializer=tf.constant_initializer(0.1) 24 | ) 25 | conv = tf.nn.conv2d( 26 | embedded_chars_expanded, 27 | W, 28 | strides=[1, 1, 1, 1], 29 | padding="VALID", 30 | name="conv") 31 | # Apply nonlinearity 32 | h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") 33 | # Maxpooling over the outputs 34 | pooled = tf.nn.max_pool( 35 | h, 36 | ksize=[1, sequence_length - filter_size + 1, 1, 1], 37 | strides=[1, 1, 1, 1], 38 | padding='VALID', 39 | name="pool") 40 | pooled_outputs.append(pooled) 41 | 42 | pooled = tf.concat(pooled_outputs, 3) 43 | return tf.reshape(pooled, [-1, num_filters * len(filter_sizes)]) 44 | 45 | 46 | def wx_plus_b(self, scope_name, x, size): 47 | with tf.variable_scope("full_connect_%s" % scope_name) as scope: 48 | W = tf.get_variable( 49 | name="W", 50 | shape=size, 51 | initializer=tf.contrib.layers.xavier_initializer()) 52 | b = tf.get_variable( 53 | name="b", 54 | shape=[size[1]], 55 | initializer=tf.constant_initializer(0.1, ) 56 | ) 57 | y = tf.nn.xw_plus_b(x, W, b, name="hidden") 58 | return y 59 | 60 | #main enter 61 | def __init__(self, sequence_length, num_classes, vocab_size, 62 | embedding_size, filter_sizes, num_filters, num_domains, 63 | l2_reg_lambda): 64 | 65 | self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x") 66 | self.input_y = tf.placeholder(tf.int32, [None, num_classes], name = "input_y") 67 | self.input_d = tf.placeholder(tf.int32, [None, num_domains], name = "input_d") 68 | 69 | l2_loss = tf.constant(0.0) 70 | 71 | with tf.variable_scope("embedding"): 72 | self.emb_W = tf.get_variable( 73 | name="lookup_emb", 74 | shape=[vocab_size, embedding_size], 75 | initializer=tf.random_uniform_initializer(minval=-1.0, maxval=1.0), 76 | trainable=False 77 | ) 78 | embedded_chars = tf.nn.embedding_lookup(self.emb_W, self.input_x) 79 | self.embedded_chars_expanded = tf.expand_dims(embedded_chars, -1) 80 | 81 | #cnn+pooling 82 | num_filters_total = num_filters * len(filter_sizes) 83 | #shared 84 | self.pub_h_pool = self.cnn("shared-public", self.embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters) 85 | #private 86 | self.pri_h_pool = self.cnn("shared-private", self.embedded_chars_expanded, sequence_length, embedding_size, filter_sizes, num_filters) 87 | 88 | #final representation 89 | self.h_pool = tf.concat([self.pri_h_pool, self.pub_h_pool], axis=1) 90 | input_dim = num_filters_total * 2 91 | 92 | # Add dropout 93 | with tf.name_scope("dropout"): 94 | self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") 95 | self.h_drop = tf.nn.dropout(self.h_pool, self.dropout_keep_prob) 96 | self.pub_h_drop = tf.nn.dropout(self.pub_h_pool, self.dropout_keep_prob) 97 | self.pri_h_drop = tf.nn.dropout(self.pri_h_pool, self.dropout_keep_prob) 98 | 99 | hidden_size = 300 100 | with tf.variable_scope("label"): 101 | h1 = self.wx_plus_b( 102 | scope_name="h1", 103 | x=self.h_drop, 104 | size=[input_dim, hidden_size] 105 | ) 106 | self.y_scores = self.wx_plus_b( 107 | scope_name='score', 108 | x = h1, 109 | size=[hidden_size, num_classes] 110 | ) 111 | # CalculateMean cross-entropy loss 112 | with tf.name_scope("loss"): 113 | losses = tf.nn.softmax_cross_entropy_with_logits( 114 | logits=self.y_scores, 115 | labels=self.input_y, 116 | ) 117 | self.y_loss = tf.reduce_mean(losses, name="task_loss") 118 | 119 | with tf.name_scope("accuracy"): 120 | self.y_pred = tf.argmax(self.y_scores, 1, name="predictions") 121 | cor_pred = tf.cast( 122 | tf.equal( self.y_pred, tf.argmax(self.input_y, 1) ), 123 | "float" 124 | ) 125 | self.y_accuracy = tf.reduce_mean( cor_pred, name="accuracy" ) 126 | 127 | 128 | with tf.variable_scope("domain"): 129 | h1 = self.wx_plus_b( 130 | scope_name="h1", 131 | x=self.pub_h_drop, 132 | size=[num_filters_total, hidden_size] 133 | ) 134 | self.domain_scores = self.wx_plus_b( 135 | scope_name="score", 136 | x=h1, 137 | size=[hidden_size, num_domains] 138 | ) 139 | with tf.name_scope("loss"): 140 | losses = tf.nn.softmax_cross_entropy_with_logits( 141 | logits=self.domain_scores, 142 | labels=self.input_d, 143 | ) 144 | self.domain_loss = tf.reduce_mean(losses) 145 | with tf.name_scope("accuracy"): 146 | self.domain_pred = tf.argmax(self.domain_scores, 1, name="predictions") 147 | cor_pred = tf.cast( 148 | tf.equal(self.domain_pred, tf.argmax(self.input_d, 1) ), 149 | "float" 150 | ) 151 | self.domain_accuracy = tf.reduce_mean(cor_pred, name="acc") 152 | 153 | 154 | with tf.variable_scope("gen"): 155 | h1 = self.wx_plus_b( 156 | scope_name="h1", 157 | x=self.pri_h_drop, 158 | size=[num_filters_total, hidden_size] 159 | ) 160 | self.gen_scores = self.wx_plus_b( 161 | scope_name="score", 162 | x=h1, 163 | size=[hidden_size, num_domains] 164 | ) 165 | with tf.name_scope("loss"): 166 | losses = tf.nn.softmax_cross_entropy_with_logits( 167 | logits=self.gen_scores, 168 | labels=self.input_d, 169 | ) 170 | self.gen_loss = tf.reduce_mean(losses) 171 | with tf.name_scope("accuracy"): 172 | self.gen_pred = tf.argmax(self.gen_scores, 1, name="predictions") 173 | cor_pred = tf.cast( 174 | tf.equal(self.gen_pred, tf.argmax(self.input_d, 1) ), 175 | "float" 176 | ) 177 | self.gen_accuracy = tf.reduce_mean(cor_pred, name="acc") 178 | --------------------------------------------------------------------------------