├── README.md ├── couplet_by_seq_tagging.py ├── seq2seq.py └── seq2seq_bidecoder.py /README.md: -------------------------------------------------------------------------------- 1 | # seq2seq 2 | 3 | keras example of seq2seq, auto title 4 | 5 | https://kexue.fm/archives/5861 6 | 7 | ## 交流 8 | QQ交流群:67729435,微信群请加机器人微信号spaces_ac_cn 9 | -------------------------------------------------------------------------------- /couplet_by_seq_tagging.py: -------------------------------------------------------------------------------- 1 | #! -*- coding: utf-8 -*- 2 | # 基于序列标注的思路对对联。 3 | 4 | import codecs 5 | import numpy as np 6 | import uniout 7 | from keras.models import Model 8 | from keras.layers import * 9 | from keras.callbacks import Callback 10 | 11 | 12 | min_count = 2 13 | maxlen = 16 14 | batch_size = 64 15 | char_size = 128 16 | 17 | 18 | def read_data(txtname): 19 | txt = codecs.open(txtname, encoding='utf-8').read() 20 | txt = txt.strip().split('\n') 21 | txt = [l.strip().split(' ') for l in txt] 22 | txt = [l for l in txt if len(l) <= maxlen] # 删除过长的对联 23 | return txt 24 | 25 | 26 | x_train_txt = read_data('couplet/train/in.txt') 27 | y_train_txt = read_data('couplet/train/out.txt') 28 | x_test_txt = read_data('couplet/test/in.txt') 29 | y_test_txt = read_data('couplet/test/out.txt') 30 | 31 | 32 | chars = {} 33 | for txt in [x_train_txt, y_train_txt, x_test_txt, y_test_txt]: 34 | for l in txt: 35 | for w in l: 36 | chars[w] = chars.get(w, 0) + 1 37 | 38 | 39 | chars = {i:j for i,j in chars.items() if j >= min_count} 40 | id2char = {i+1:j for i,j in enumerate(chars)} 41 | char2id = {j:i for i,j in id2char.items()} 42 | 43 | 44 | def string2id(s): 45 | # 0: 46 | return [char2id.get(c, 0) for c in s] 47 | 48 | x_train = map(string2id, x_train_txt) 49 | y_train = map(string2id, y_train_txt) 50 | x_test = map(string2id, x_test_txt) 51 | y_test = map(string2id, y_test_txt) 52 | 53 | 54 | # 按字数分组存放 55 | train_dict = {} 56 | test_dict = {} 57 | 58 | for i,x in enumerate(x_train): 59 | j = len(x) 60 | if j not in train_dict: 61 | train_dict[j] = [[], []] 62 | train_dict[j][0].append(x) 63 | train_dict[j][1].append(y_train[i]) 64 | 65 | for i,x in enumerate(x_test): 66 | j = len(x) 67 | if j not in test_dict: 68 | test_dict[j] = [[], []] 69 | test_dict[j][0].append(x) 70 | test_dict[j][1].append(y_test[i]) 71 | 72 | for j in train_dict: 73 | train_dict[j][0] = np.array(train_dict[j][0]) 74 | train_dict[j][1] = np.array(train_dict[j][1]) 75 | 76 | for j in test_dict: 77 | test_dict[j][0] = np.array(test_dict[j][0]) 78 | test_dict[j][1] = np.array(test_dict[j][1]) 79 | 80 | 81 | def data_generator(data): 82 | data_p = [float(len(i[0])) for i in data.values()] 83 | data_p = np.array(data_p) / sum(data_p) 84 | while True: # 随机选一个字数,然后随机选样本,生成字数一样的一个batch 85 | idx = np.random.choice(len(data_p), p=data_p) + 1 86 | size = min(batch_size, len(data[idx][0])) 87 | idxs = np.random.choice(len(data[idx][0]), size=size) 88 | np.random.shuffle(idxs) 89 | yield data[idx][0][idxs], np.expand_dims(data[idx][1][idxs], 2) 90 | 91 | 92 | def gated_resnet(x, ksize=3): 93 | # 门卷积 + 残差 94 | x_dim = K.int_shape(x)[-1] 95 | xo = Conv1D(x_dim*2, ksize, padding='same')(x) 96 | return Lambda(lambda x: x[0] * K.sigmoid(x[1][..., :x_dim]) \ 97 | + x[1][..., x_dim:] * K.sigmoid(-x[1][..., :x_dim]))([x, xo]) 98 | 99 | 100 | x_in = Input(shape=(None,)) 101 | x = x_in 102 | x = Embedding(len(chars)+1, char_size)(x) 103 | x = Dropout(0.25)(x) 104 | 105 | x = gated_resnet(x) 106 | x = gated_resnet(x) 107 | x = gated_resnet(x) 108 | x = gated_resnet(x) 109 | x = gated_resnet(x) 110 | x = gated_resnet(x) 111 | 112 | x = Dense(len(chars)+1, activation='softmax')(x) 113 | 114 | model = Model(x_in, x) 115 | model.compile(loss='sparse_categorical_crossentropy', 116 | optimizer='adam') 117 | 118 | 119 | def couplet_match(s): 120 | # 输出对联 121 | # 先验知识:跟上联同一位置的字不能一样 122 | x = np.array([string2id(s)]) 123 | y = model.predict(x)[0] 124 | for i,j in enumerate(x[0]): 125 | y[i, j] = 0. 126 | y = y[:, 1:].argmax(axis=1) + 1 127 | r = ''.join([id2char[i] for i in y]) 128 | print u'上联:%s,下联:%s' % (s, r) 129 | return r 130 | 131 | 132 | class Evaluate(Callback): 133 | def __init__(self): 134 | self.lowest = 1e10 135 | def on_epoch_end(self, epoch, logs=None): 136 | # 训练过程中观察几个例子,显示对联质量提高的过程 137 | couplet_match(u'晚风摇树树还挺') 138 | couplet_match(u'今天天气不错') 139 | couplet_match(u'鱼跃此时海') 140 | couplet_match(u'只有香如故') 141 | # 保存最优结果 142 | if logs['val_loss'] <= self.lowest: 143 | self.lowest = logs['val_loss'] 144 | model.save_weights('./best_model.weights') 145 | 146 | 147 | evaluator = Evaluate() 148 | 149 | model.fit_generator(data_generator(train_dict), 150 | steps_per_epoch=1000, 151 | epochs=100, 152 | validation_data=data_generator(test_dict), 153 | validation_steps=100, 154 | callbacks=[evaluator]) 155 | -------------------------------------------------------------------------------- /seq2seq.py: -------------------------------------------------------------------------------- 1 | #! -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | import pymongo 5 | from tqdm import tqdm 6 | import os,json 7 | import uniout 8 | import tensorflow as tf 9 | import keras 10 | from keras.layers import * 11 | from keras_layer_normalization import LayerNormalization 12 | from keras.models import Model 13 | from keras import backend as K 14 | from keras.callbacks import Callback 15 | from keras.optimizers import Adam 16 | 17 | 18 | min_count = 32 19 | maxlen = 400 20 | batch_size = 64 21 | epochs = 100 22 | char_size = 128 23 | z_dim = 128 24 | db = pymongo.MongoClient().text.thucnews # 我的数据存在mongodb中 25 | 26 | 27 | if os.path.exists('seq2seq_config.json'): 28 | chars,id2char,char2id = json.load(open('seq2seq_config.json')) 29 | id2char = {int(i):j for i,j in id2char.items()} 30 | else: 31 | chars = {} 32 | for a in tqdm(db.find()): 33 | for w in a['content']: # 纯文本,不用分词 34 | chars[w] = chars.get(w,0) + 1 35 | for w in a['title']: # 纯文本,不用分词 36 | chars[w] = chars.get(w,0) + 1 37 | chars = {i:j for i,j in chars.items() if j >= min_count} 38 | # 0: mask 39 | # 1: unk 40 | # 2: start 41 | # 3: end 42 | id2char = {i+4:j for i,j in enumerate(chars)} 43 | char2id = {j:i for i,j in id2char.items()} 44 | json.dump([chars,id2char,char2id], open('seq2seq_config.json', 'w')) 45 | 46 | 47 | def str2id(s, start_end=False): 48 | # 文字转整数id 49 | if start_end: # 补上标记 50 | ids = [char2id.get(c, 1) for c in s[:maxlen-2]] 51 | ids = [2] + ids + [3] 52 | else: # 普通转化 53 | ids = [char2id.get(c, 1) for c in s[:maxlen]] 54 | return ids 55 | 56 | 57 | def id2str(ids): 58 | # id转文字,找不到的用空字符代替 59 | return ''.join([id2char.get(i, '') for i in ids]) 60 | 61 | 62 | def padding(x): 63 | # padding至batch内的最大长度 64 | ml = max([len(i) for i in x]) 65 | return [i + [0] * (ml-len(i)) for i in x] 66 | 67 | 68 | def data_generator(): 69 | # 数据生成器 70 | X,Y = [],[] 71 | while True: 72 | for a in db.find(): 73 | X.append(str2id(a['content'])) 74 | Y.append(str2id(a['title'], start_end=True)) 75 | if len(X) == batch_size: 76 | X = np.array(padding(X)) 77 | Y = np.array(padding(Y)) 78 | yield [X,Y], None 79 | X,Y = [],[] 80 | 81 | 82 | def to_one_hot(x): 83 | """输出一个词表大小的向量,来标记该词是否在文章出现过 84 | """ 85 | x, x_mask = x 86 | x = K.cast(x, 'int32') 87 | x = K.one_hot(x, len(chars)+4) 88 | x = K.sum(x_mask * x, 1, keepdims=True) 89 | x = K.cast(K.greater(x, 0.5), 'float32') 90 | return x 91 | 92 | 93 | class ScaleShift(Layer): 94 | """缩放平移变换层(Scale and shift) 95 | """ 96 | def __init__(self, **kwargs): 97 | super(ScaleShift, self).__init__(**kwargs) 98 | def build(self, input_shape): 99 | kernel_shape = (1,) * (len(input_shape)-1) + (input_shape[-1],) 100 | self.log_scale = self.add_weight(name='log_scale', 101 | shape=kernel_shape, 102 | initializer='zeros') 103 | self.shift = self.add_weight(name='shift', 104 | shape=kernel_shape, 105 | initializer='zeros') 106 | def call(self, inputs): 107 | x_outs = K.exp(self.log_scale) * inputs + self.shift 108 | return x_outs 109 | 110 | 111 | class OurLayer(Layer): 112 | """定义新的Layer,增加reuse方法,允许在定义Layer时调用现成的层 113 | """ 114 | def reuse(self, layer, *args, **kwargs): 115 | if not layer.built: 116 | if len(args) > 0: 117 | inputs = args[0] 118 | else: 119 | inputs = kwargs['inputs'] 120 | if isinstance(inputs, list): 121 | input_shape = [K.int_shape(x) for x in inputs] 122 | else: 123 | input_shape = K.int_shape(inputs) 124 | layer.build(input_shape) 125 | outputs = layer.call(*args, **kwargs) 126 | if not keras.__version__.startswith('2.3.'): 127 | for w in layer.trainable_weights: 128 | if w not in self._trainable_weights: 129 | self._trainable_weights.append(w) 130 | for w in layer.non_trainable_weights: 131 | if w not in self._non_trainable_weights: 132 | self._non_trainable_weights.append(w) 133 | for u in layer.updates: 134 | if not hasattr(self, '_updates'): 135 | self._updates = [] 136 | if u not in self._updates: 137 | self._updates.append(u) 138 | return outputs 139 | 140 | 141 | class OurBidirectional(OurLayer): 142 | """自己封装双向RNN,允许传入mask,保证对齐 143 | """ 144 | def __init__(self, layer, **args): 145 | super(OurBidirectional, self).__init__(**args) 146 | self.forward_layer = layer.__class__.from_config(layer.get_config()) 147 | self.backward_layer = layer.__class__.from_config(layer.get_config()) 148 | self.forward_layer.name = 'forward_' + self.forward_layer.name 149 | self.backward_layer.name = 'backward_' + self.backward_layer.name 150 | def reverse_sequence(self, x, mask): 151 | """这里的mask.shape是[batch_size, seq_len, 1] 152 | """ 153 | seq_len = K.round(K.sum(mask, 1)[:, 0]) 154 | seq_len = K.cast(seq_len, 'int32') 155 | return K.tf.reverse_sequence(x, seq_len, seq_dim=1) 156 | def call(self, inputs): 157 | x, mask = inputs 158 | x_forward = self.reuse(self.forward_layer, x) 159 | x_backward = self.reverse_sequence(x, mask) 160 | x_backward = self.reuse(self.backward_layer, x_backward) 161 | x_backward = self.reverse_sequence(x_backward, mask) 162 | x = K.concatenate([x_forward, x_backward], -1) 163 | if K.ndim(x) == 3: 164 | return x * mask 165 | else: 166 | return x 167 | def compute_output_shape(self, input_shape): 168 | return input_shape[0][:-1] + (self.forward_layer.units * 2,) 169 | 170 | 171 | def seq_avgpool(x): 172 | """seq是[None, seq_len, s_size]的格式, 173 | mask是[None, seq_len, 1]的格式,先除去mask部分, 174 | 然后再做avgpooling。 175 | """ 176 | seq, mask = x 177 | return K.sum(seq * mask, 1) / (K.sum(mask, 1) + 1e-6) 178 | 179 | 180 | def seq_maxpool(x): 181 | """seq是[None, seq_len, s_size]的格式, 182 | mask是[None, seq_len, 1]的格式,先除去mask部分, 183 | 然后再做maxpooling。 184 | """ 185 | seq, mask = x 186 | seq -= (1 - mask) * 1e10 187 | return K.max(seq, 1) 188 | 189 | 190 | class SelfModulatedLayerNormalization(OurLayer): 191 | """模仿Self-Modulated Batch Normalization, 192 | 只不过将Batch Normalization改为Layer Normalization 193 | """ 194 | def __init__(self, num_hidden, **kwargs): 195 | super(SelfModulatedLayerNormalization, self).__init__(**kwargs) 196 | self.num_hidden = num_hidden 197 | def build(self, input_shape): 198 | super(SelfModulatedLayerNormalization, self).build(input_shape) 199 | output_dim = input_shape[0][-1] 200 | self.layernorm = LayerNormalization(center=False, scale=False) 201 | self.beta_dense_1 = Dense(self.num_hidden, activation='relu') 202 | self.beta_dense_2 = Dense(output_dim) 203 | self.gamma_dense_1 = Dense(self.num_hidden, activation='relu') 204 | self.gamma_dense_2 = Dense(output_dim) 205 | def call(self, inputs): 206 | inputs, cond = inputs 207 | inputs = self.reuse(self.layernorm, inputs) 208 | beta = self.reuse(self.beta_dense_1, cond) 209 | beta = self.reuse(self.beta_dense_2, beta) 210 | gamma = self.reuse(self.gamma_dense_1, cond) 211 | gamma = self.reuse(self.gamma_dense_2, gamma) 212 | for _ in range(K.ndim(inputs) - K.ndim(cond)): 213 | beta = K.expand_dims(beta, 1) 214 | gamma = K.expand_dims(gamma, 1) 215 | return inputs * (gamma + 1) + beta 216 | def compute_output_shape(self, input_shape): 217 | return input_shape[0] 218 | 219 | 220 | class Attention(OurLayer): 221 | """多头注意力机制 222 | """ 223 | def __init__(self, heads, size_per_head, key_size=None, 224 | mask_right=False, **kwargs): 225 | super(Attention, self).__init__(**kwargs) 226 | self.heads = heads 227 | self.size_per_head = size_per_head 228 | self.out_dim = heads * size_per_head 229 | self.key_size = key_size if key_size else size_per_head 230 | self.mask_right = mask_right 231 | def build(self, input_shape): 232 | super(Attention, self).build(input_shape) 233 | self.q_dense = Dense(self.key_size * self.heads, use_bias=False) 234 | self.k_dense = Dense(self.key_size * self.heads, use_bias=False) 235 | self.v_dense = Dense(self.out_dim, use_bias=False) 236 | def mask(self, x, mask, mode='mul'): 237 | if mask is None: 238 | return x 239 | else: 240 | for _ in range(K.ndim(x) - K.ndim(mask)): 241 | mask = K.expand_dims(mask, K.ndim(mask)) 242 | if mode == 'mul': 243 | return x * mask 244 | else: 245 | return x - (1 - mask) * 1e10 246 | def call(self, inputs): 247 | q, k, v = inputs[:3] 248 | v_mask, q_mask = None, None 249 | if len(inputs) > 3: 250 | v_mask = inputs[3] 251 | if len(inputs) > 4: 252 | q_mask = inputs[4] 253 | # 线性变换 254 | qw = self.reuse(self.q_dense, q) 255 | kw = self.reuse(self.k_dense, k) 256 | vw = self.reuse(self.v_dense, v) 257 | # 形状变换 258 | qw = K.reshape(qw, (-1, K.shape(qw)[1], self.heads, self.key_size)) 259 | kw = K.reshape(kw, (-1, K.shape(kw)[1], self.heads, self.key_size)) 260 | vw = K.reshape(vw, (-1, K.shape(vw)[1], self.heads, self.size_per_head)) 261 | # 维度置换 262 | qw = K.permute_dimensions(qw, (0, 2, 1, 3)) 263 | kw = K.permute_dimensions(kw, (0, 2, 1, 3)) 264 | vw = K.permute_dimensions(vw, (0, 2, 1, 3)) 265 | # Attention 266 | a = tf.einsum('ijkl,ijml->ijkm', qw, kw) / self.key_size**0.5 267 | a = K.permute_dimensions(a, (0, 3, 2, 1)) 268 | a = self.mask(a, v_mask, 'add') 269 | a = K.permute_dimensions(a, (0, 3, 2, 1)) 270 | if self.mask_right: 271 | ones = K.ones_like(a[:1, :1]) 272 | mask = (ones - K.tf.matrix_band_part(ones, -1, 0)) * 1e10 273 | a = a - mask 274 | a = K.softmax(a) 275 | # 完成输出 276 | o = tf.einsum('ijkl,ijlm->ijkm', a, vw) 277 | o = K.permute_dimensions(o, (0, 2, 1, 3)) 278 | o = K.reshape(o, (-1, K.shape(o)[1], self.out_dim)) 279 | o = self.mask(o, q_mask, 'mul') 280 | return o 281 | def compute_output_shape(self, input_shape): 282 | return (input_shape[0][0], input_shape[0][1], self.out_dim) 283 | 284 | 285 | # 搭建seq2seq模型 286 | 287 | x_in = Input(shape=(None,)) 288 | y_in = Input(shape=(None,)) 289 | x, y = x_in, y_in 290 | 291 | x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x) 292 | y_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(y) 293 | 294 | x_one_hot = Lambda(to_one_hot)([x, x_mask]) 295 | x_prior = ScaleShift()(x_one_hot) # 学习输出的先验分布(标题的字词很可能在文章出现过) 296 | 297 | embedding = Embedding(len(chars)+4, char_size) 298 | x = embedding(x) 299 | y = embedding(y) 300 | 301 | # encoder,双层双向LSTM 302 | x = LayerNormalization()(x) 303 | x = OurBidirectional(CuDNNLSTM(z_dim // 2, return_sequences=True))([x, x_mask]) 304 | x = LayerNormalization()(x) 305 | x = OurBidirectional(CuDNNLSTM(z_dim // 2, return_sequences=True))([x, x_mask]) 306 | x_max = Lambda(seq_maxpool)([x, x_mask]) 307 | 308 | # decoder,双层单向LSTM 309 | y = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 310 | y = CuDNNLSTM(z_dim, return_sequences=True)(y) 311 | y = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 312 | y = CuDNNLSTM(z_dim, return_sequences=True)(y) 313 | y = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 314 | 315 | # attention交互 316 | xy = Attention(8, 16)([y, x, x, x_mask]) 317 | xy = Concatenate()([y, xy]) 318 | 319 | # 输出分类 320 | xy = Dense(char_size)(xy) 321 | xy = LeakyReLU(0.2)(xy) 322 | xy = Dense(len(chars)+4)(xy) 323 | xy = Lambda(lambda x: (x[0]+x[1])/2)([xy, x_prior]) # 与先验结果平均 324 | xy = Activation('softmax')(xy) 325 | 326 | # 交叉熵作为loss,但mask掉padding部分 327 | cross_entropy = K.sparse_categorical_crossentropy(y_in[:, 1:], xy[:, :-1]) 328 | cross_entropy = K.sum(cross_entropy * y_mask[:, 1:, 0]) / K.sum(y_mask[:, 1:, 0]) 329 | 330 | model = Model([x_in, y_in], xy) 331 | model.add_loss(cross_entropy) 332 | model.compile(optimizer=Adam(1e-3)) 333 | 334 | 335 | def gen_sent(s, topk=3, maxlen=64): 336 | """beam search解码 337 | 每次只保留topk个最优候选结果;如果topk=1,那么就是贪心搜索 338 | """ 339 | xid = np.array([str2id(s)] * topk) # 输入转id 340 | yid = np.array([[2]] * topk) # 解码均以开头,这里的id为2 341 | scores = [0] * topk # 候选答案分数 342 | for i in range(maxlen): # 强制要求输出不超过maxlen字 343 | proba = model.predict([xid, yid])[:, i, 3:] # 直接忽略 344 | log_proba = np.log(proba + 1e-6) # 取对数,方便计算 345 | arg_topk = log_proba.argsort(axis=1)[:,-topk:] # 每一项选出topk 346 | _yid = [] # 暂存的候选目标序列 347 | _scores = [] # 暂存的候选目标序列得分 348 | if i == 0: 349 | for j in range(topk): 350 | _yid.append(list(yid[j]) + [arg_topk[0][j]+3]) 351 | _scores.append(scores[j] + log_proba[0][arg_topk[0][j]]) 352 | else: 353 | for j in range(topk): 354 | for k in range(topk): # 遍历topk*topk的组合 355 | _yid.append(list(yid[j]) + [arg_topk[j][k]+3]) 356 | _scores.append(scores[j] + log_proba[j][arg_topk[j][k]]) 357 | _arg_topk = np.argsort(_scores)[-topk:] # 从中选出新的topk 358 | _yid = [_yid[k] for k in _arg_topk] 359 | _scores = [_scores[k] for k in _arg_topk] 360 | yid = np.array(_yid) 361 | scores = np.array(_scores) 362 | best_one = np.argmax(scores) 363 | if yid[best_one][-1] == 3: 364 | return id2str(yid[best_one]) 365 | # 如果maxlen字都找不到,直接返回 366 | return id2str(yid[np.argmax(scores)]) 367 | 368 | 369 | s1 = u'夏天来临,皮肤在强烈紫外线的照射下,晒伤不可避免,因此,晒后及时修复显得尤为重要,否则可能会造成长期伤害。专家表示,选择晒后护肤品要慎重,芦荟凝胶是最安全,有效的一种选择,晒伤严重者,还请及时就医 。' 370 | s2 = u'8月28日,网络爆料称,华住集团旗下连锁酒店用户数据疑似发生泄露。从卖家发布的内容看,数据包含华住旗下汉庭、禧玥、桔子、宜必思等10余个品牌酒店的住客信息。泄露的信息包括华住官网注册资料、酒店入住登记的身份信息及酒店开房记录,住客姓名、手机号、邮箱、身份证号、登录账号密码等。卖家对这个约5亿条数据打包出售。第三方安全平台威胁猎人对信息出售者提供的三万条数据进行验证,认为数据真实性非常高。当天下午,华住集 团发声明称,已在内部迅速开展核查,并第一时间报警。当晚,上海警方消息称,接到华住集团报案,警方已经介入调查。' 371 | 372 | class Evaluate(Callback): 373 | def __init__(self): 374 | self.lowest = 1e10 375 | def on_epoch_end(self, epoch, logs=None): 376 | # 训练过程中观察一两个例子,显示标题质量提高的过程 377 | print gen_sent(s1) 378 | print gen_sent(s2) 379 | # 保存最优结果 380 | if logs['loss'] <= self.lowest: 381 | self.lowest = logs['loss'] 382 | model.save_weights('./best_model.weights') 383 | 384 | 385 | evaluator = Evaluate() 386 | 387 | model.fit_generator(data_generator(), 388 | steps_per_epoch=1000, 389 | epochs=epochs, 390 | callbacks=[evaluator]) 391 | -------------------------------------------------------------------------------- /seq2seq_bidecoder.py: -------------------------------------------------------------------------------- 1 | #! -*- coding: utf-8 -*- 2 | # seq2seq,双向解码机制的Keras实现 3 | 4 | import numpy as np 5 | import pymongo 6 | from tqdm import tqdm 7 | import os,json 8 | import uniout 9 | import tensorflow as tf 10 | import keras 11 | from keras.layers import * 12 | from keras_layer_normalization import LayerNormalization 13 | from keras.models import Model 14 | from keras import backend as K 15 | from keras.callbacks import Callback 16 | from keras.optimizers import Adam 17 | 18 | 19 | min_count = 32 20 | maxlen = 400 21 | batch_size = 64 22 | epochs = 100 23 | char_size = 128 24 | z_dim = 128 25 | db = pymongo.MongoClient().text.thucnews # 我的数据存在mongodb中 26 | 27 | 28 | if os.path.exists('seq2seq_config.json'): 29 | chars,id2char,char2id = json.load(open('seq2seq_config.json')) 30 | id2char = {int(i):j for i,j in id2char.items()} 31 | else: 32 | chars = {} 33 | for a in tqdm(db.find()): 34 | for w in a['content']: # 纯文本,不用分词 35 | chars[w] = chars.get(w,0) + 1 36 | for w in a['title']: # 纯文本,不用分词 37 | chars[w] = chars.get(w,0) + 1 38 | chars = {i:j for i,j in chars.items() if j >= min_count} 39 | # 0: mask 40 | # 1: unk 41 | # 2: start 42 | # 3: end 43 | id2char = {i+4:j for i,j in enumerate(chars)} 44 | char2id = {j:i for i,j in id2char.items()} 45 | json.dump([chars,id2char,char2id], open('seq2seq_config.json', 'w')) 46 | 47 | 48 | def str2id(s, start_end=False): 49 | # 文字转整数id 50 | if start_end: # 补上标记 51 | ids = [char2id.get(c, 1) for c in s[:maxlen-2]] 52 | ids = [2] + ids + [3] 53 | else: # 普通转化 54 | ids = [char2id.get(c, 1) for c in s[:maxlen]] 55 | return ids 56 | 57 | 58 | def id2str(ids): 59 | # id转文字,找不到的用空字符代替 60 | return ''.join([id2char.get(i, '') for i in ids]) 61 | 62 | 63 | def padding(x): 64 | # padding至batch内的最大长度 65 | ml = max([len(i) for i in x]) 66 | return [i + [0] * (ml-len(i)) for i in x] 67 | 68 | 69 | def data_generator(): 70 | # 数据生成器 71 | X, Y1, Y2 = [], [], [] 72 | while True: 73 | for a in db.find(): 74 | X.append(str2id(a['content'])) 75 | Y1.append(str2id(a['title'], start_end=True)) 76 | Y2.append(str2id(a['title'], start_end=True)[::-1]) 77 | if len(X) == batch_size: 78 | X = np.array(padding(X)) 79 | Y1 = np.array(padding(Y1)) 80 | Y2 = np.array(padding(Y2)) 81 | yield [X, Y1, Y2], None 82 | X, Y1, Y2 = [], [], [] 83 | 84 | 85 | class OurLayer(Layer): 86 | """定义新的Layer,增加reuse方法,允许在定义Layer时调用现成的层 87 | """ 88 | def reuse(self, layer, *args, **kwargs): 89 | if not layer.built: 90 | if len(args) > 0: 91 | inputs = args[0] 92 | else: 93 | inputs = kwargs['inputs'] 94 | if isinstance(inputs, list): 95 | input_shape = [K.int_shape(x) for x in inputs] 96 | else: 97 | input_shape = K.int_shape(inputs) 98 | layer.build(input_shape) 99 | outputs = layer.call(*args, **kwargs) 100 | if not keras.__version__.startswith('2.3.'): 101 | for w in layer.trainable_weights: 102 | if w not in self._trainable_weights: 103 | self._trainable_weights.append(w) 104 | for w in layer.non_trainable_weights: 105 | if w not in self._non_trainable_weights: 106 | self._non_trainable_weights.append(w) 107 | for u in layer.updates: 108 | if not hasattr(self, '_updates'): 109 | self._updates = [] 110 | if u not in self._updates: 111 | self._updates.append(u) 112 | return outputs 113 | 114 | 115 | def to_one_hot(x): 116 | """输出一个词表大小的向量,来标记该词是否在文章出现过 117 | """ 118 | x, x_mask = x 119 | x = K.cast(x, 'int32') 120 | x = K.one_hot(x, len(chars)+4) 121 | x = K.sum(x_mask * x, 1, keepdims=True) 122 | x = K.cast(K.greater(x, 0.5), 'float32') 123 | return x 124 | 125 | 126 | class ScaleShift(Layer): 127 | """缩放平移变换层(Scale and shift) 128 | """ 129 | def __init__(self, **kwargs): 130 | super(ScaleShift, self).__init__(**kwargs) 131 | def build(self, input_shape): 132 | kernel_shape = (1,) * (len(input_shape)-1) + (input_shape[-1],) 133 | self.log_scale = self.add_weight(name='log_scale', 134 | shape=kernel_shape, 135 | initializer='zeros') 136 | self.shift = self.add_weight(name='shift', 137 | shape=kernel_shape, 138 | initializer='zeros') 139 | def call(self, inputs): 140 | x_outs = K.exp(self.log_scale) * inputs + self.shift 141 | return x_outs 142 | 143 | 144 | class OurBidirectional(OurLayer): 145 | """自己封装双向RNN,允许传入mask,保证对齐 146 | """ 147 | def __init__(self, layer, **args): 148 | super(OurBidirectional, self).__init__(**args) 149 | self.forward_layer = layer.__class__.from_config(layer.get_config()) 150 | self.backward_layer = layer.__class__.from_config(layer.get_config()) 151 | self.forward_layer.name = 'forward_' + self.forward_layer.name 152 | self.backward_layer.name = 'backward_' + self.backward_layer.name 153 | def reverse_sequence(self, x, mask): 154 | """这里的mask.shape是[batch_size, seq_len, 1] 155 | """ 156 | seq_len = K.round(K.sum(mask, 1)[:, 0]) 157 | seq_len = K.cast(seq_len, 'int32') 158 | return tf.reverse_sequence(x, seq_len, seq_dim=1) 159 | def call(self, inputs): 160 | x, mask = inputs 161 | x_forward = self.reuse(self.forward_layer, x) 162 | x_backward = self.reverse_sequence(x, mask) 163 | x_backward = self.reuse(self.backward_layer, x_backward) 164 | x_backward = self.reverse_sequence(x_backward, mask) 165 | x = K.concatenate([x_forward, x_backward], -1) 166 | if K.ndim(x) == 3: 167 | return x * mask 168 | else: 169 | return x 170 | def compute_output_shape(self, input_shape): 171 | return input_shape[0][:-1] + (self.forward_layer.units * 2,) 172 | 173 | 174 | def seq_avgpool(x): 175 | """seq是[None, seq_len, s_size]的格式, 176 | mask是[None, seq_len, 1]的格式,先除去mask部分, 177 | 然后再做avgpooling。 178 | """ 179 | seq, mask = x 180 | return K.sum(seq * mask, 1) / (K.sum(mask, 1) + 1e-6) 181 | 182 | 183 | def seq_maxpool(x): 184 | """seq是[None, seq_len, s_size]的格式, 185 | mask是[None, seq_len, 1]的格式,先除去mask部分, 186 | 然后再做maxpooling。 187 | """ 188 | seq, mask = x 189 | seq -= (1 - mask) * 1e10 190 | return K.max(seq, 1) 191 | 192 | 193 | class SelfModulatedLayerNormalization(OurLayer): 194 | """模仿Self-Modulated Batch Normalization, 195 | 只不过降Batch Normalization改为Layer Normalization 196 | """ 197 | def __init__(self, num_hidden, **kwargs): 198 | super(SelfModulatedLayerNormalization, self).__init__(**kwargs) 199 | self.num_hidden = num_hidden 200 | def build(self, input_shape): 201 | super(SelfModulatedLayerNormalization, self).build(input_shape) 202 | output_dim = input_shape[0][-1] 203 | self.layernorm = LayerNormalization(center=False, scale=False) 204 | self.beta_dense_1 = Dense(self.num_hidden, activation='relu') 205 | self.beta_dense_2 = Dense(output_dim) 206 | self.gamma_dense_1 = Dense(self.num_hidden, activation='relu') 207 | self.gamma_dense_2 = Dense(output_dim) 208 | def call(self, inputs): 209 | inputs, cond = inputs 210 | inputs = self.reuse(self.layernorm, inputs) 211 | beta = self.reuse(self.beta_dense_1, cond) 212 | beta = self.reuse(self.beta_dense_2, beta) 213 | gamma = self.reuse(self.gamma_dense_1, cond) 214 | gamma = self.reuse(self.gamma_dense_2, gamma) 215 | for _ in range(K.ndim(inputs) - K.ndim(cond)): 216 | beta = K.expand_dims(beta, 1) 217 | gamma = K.expand_dims(gamma, 1) 218 | return inputs * (gamma + 1) + beta 219 | def compute_output_shape(self, input_shape): 220 | return input_shape[0] 221 | 222 | 223 | class Attention(OurLayer): 224 | """多头注意力机制 225 | """ 226 | def __init__(self, heads, size_per_head, key_size=None, 227 | mask_right=False, **kwargs): 228 | super(Attention, self).__init__(**kwargs) 229 | self.heads = heads 230 | self.size_per_head = size_per_head 231 | self.out_dim = heads * size_per_head 232 | self.key_size = key_size if key_size else size_per_head 233 | self.mask_right = mask_right 234 | def build(self, input_shape): 235 | super(Attention, self).build(input_shape) 236 | self.q_dense = Dense(self.key_size * self.heads, use_bias=False) 237 | self.k_dense = Dense(self.key_size * self.heads, use_bias=False) 238 | self.v_dense = Dense(self.out_dim, use_bias=False) 239 | def mask(self, x, mask, mode='mul'): 240 | if mask is None: 241 | return x 242 | else: 243 | for _ in range(K.ndim(x) - K.ndim(mask)): 244 | mask = K.expand_dims(mask, K.ndim(mask)) 245 | if mode == 'mul': 246 | return x * mask 247 | else: 248 | return x - (1 - mask) * 1e10 249 | def call(self, inputs): 250 | q, k, v = inputs[:3] 251 | v_mask, q_mask = None, None 252 | if len(inputs) > 3: 253 | v_mask = inputs[3] 254 | if len(inputs) > 4: 255 | q_mask = inputs[4] 256 | # 线性变换 257 | qw = self.reuse(self.q_dense, q) 258 | kw = self.reuse(self.k_dense, k) 259 | vw = self.reuse(self.v_dense, v) 260 | # 形状变换 261 | qw = K.reshape(qw, (-1, K.shape(qw)[1], self.heads, self.key_size)) 262 | kw = K.reshape(kw, (-1, K.shape(kw)[1], self.heads, self.key_size)) 263 | vw = K.reshape(vw, (-1, K.shape(vw)[1], self.heads, self.size_per_head)) 264 | # 维度置换 265 | qw = K.permute_dimensions(qw, (0, 2, 1, 3)) 266 | kw = K.permute_dimensions(kw, (0, 2, 1, 3)) 267 | vw = K.permute_dimensions(vw, (0, 2, 1, 3)) 268 | # Attention 269 | a = tf.einsum('ijkl,ijml->ijkm', qw, kw) / self.key_size**0.5 270 | a = K.permute_dimensions(a, (0, 3, 2, 1)) 271 | a = self.mask(a, v_mask, 'add') 272 | a = K.permute_dimensions(a, (0, 3, 2, 1)) 273 | if self.mask_right: 274 | ones = K.ones_like(a[:1, :1]) 275 | mask = (ones - K.tf.matrix_band_part(ones, -1, 0)) * 1e10 276 | a = a - mask 277 | a = K.softmax(a) 278 | # 完成输出 279 | o = tf.einsum('ijkl,ijlm->ijkm', a, vw) 280 | o = K.permute_dimensions(o, (0, 2, 1, 3)) 281 | o = K.reshape(o, (-1, K.shape(o)[1], self.out_dim)) 282 | o = self.mask(o, q_mask, 'mul') 283 | return o 284 | def compute_output_shape(self, input_shape): 285 | return (input_shape[0][0], input_shape[0][1], self.out_dim) 286 | 287 | 288 | # 搭建seq2seq模型 289 | 290 | x_in = Input(shape=(None,)) 291 | yl_in = Input(shape=(None,)) 292 | yr_in = Input(shape=(None,)) 293 | x, yl, yr = x_in, yl_in, yr_in 294 | 295 | x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x) 296 | y_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(yl) 297 | 298 | x_one_hot = Lambda(to_one_hot)([x, x_mask]) 299 | x_prior = ScaleShift()(x_one_hot) # 学习输出的先验分布(标题的字词很可能在文章出现过) 300 | 301 | embedding = Embedding(len(chars)+4, char_size) 302 | x = embedding(x) 303 | 304 | # encoder,双层双向LSTM 305 | x = LayerNormalization()(x) 306 | x = OurBidirectional(CuDNNLSTM(z_dim // 2, return_sequences=True))([x, x_mask]) 307 | x = LayerNormalization()(x) 308 | x = OurBidirectional(CuDNNLSTM(z_dim // 2, return_sequences=True))([x, x_mask]) 309 | x_max = Lambda(seq_maxpool)([x, x_mask]) 310 | 311 | # 正向decoder,单向LSTM 312 | y = embedding(yl) 313 | y = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 314 | y = CuDNNLSTM(z_dim, return_sequences=True)(y) 315 | y = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 316 | y = CuDNNLSTM(z_dim, return_sequences=True)(y) 317 | yl = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 318 | 319 | # 逆向decoder,单向LSTM 320 | y = embedding(yr) 321 | y = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 322 | y = CuDNNLSTM(z_dim, return_sequences=True)(y) 323 | y = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 324 | y = CuDNNLSTM(z_dim, return_sequences=True)(y) 325 | yr = SelfModulatedLayerNormalization(z_dim // 4)([y, x_max]) 326 | 327 | # 对齐attention + 检索attention 328 | yl_ = Attention(8, 16, mask_right=True)([yl, yr, yr]) 329 | ylx = Attention(8, 16)([yl, x, x, x_mask]) 330 | yl = Concatenate()([yl, yl_, ylx]) 331 | # 对齐attention + 检索attention 332 | yr_ = Attention(8, 16, mask_right=True)([yr, yl, yl]) 333 | yrx = Attention(8, 16)([yr, x, x, x_mask]) 334 | yr = Concatenate()([yr, yr_, yrx]) 335 | 336 | # 最后的输出分类(左右共享权重) 337 | classifier = Dense(len(chars)+4) 338 | 339 | yl = Dense(char_size)(yl) 340 | yl = LeakyReLU(0.2)(yl) 341 | yl = classifier(yl) 342 | yl = Lambda(lambda x: (x[0]+x[1])/2)([yl, x_prior]) # 与先验结果平均 343 | yl = Activation('softmax')(yl) 344 | 345 | yr = Dense(char_size)(yr) 346 | yr = LeakyReLU(0.2)(yr) 347 | yr = classifier(yr) 348 | yr = Lambda(lambda x: (x[0]+x[1])/2)([yr, x_prior]) # 与先验结果平均 349 | yr = Activation('softmax')(yr) 350 | 351 | # 交叉熵作为loss,但mask掉padding部分 352 | cross_entropy_1 = K.sparse_categorical_crossentropy(yl_in[:, 1:], yl[:, :-1]) 353 | cross_entropy_1 = K.sum(cross_entropy_1 * y_mask[:, 1:, 0]) / K.sum(y_mask[:, 1:, 0]) 354 | cross_entropy_2 = K.sparse_categorical_crossentropy(yr_in[:, 1:], yr[:, :-1]) 355 | cross_entropy_2 = K.sum(cross_entropy_2 * y_mask[:, 1:, 0]) / K.sum(y_mask[:, 1:, 0]) 356 | cross_entropy = (cross_entropy_1 + cross_entropy_2) / 2 357 | 358 | model = Model([x_in, yl_in, yr_in], [yl, yr]) 359 | model.add_loss(cross_entropy) 360 | model.compile(optimizer=Adam(1e-3)) 361 | 362 | 363 | def gen_sent(s, topk=3, maxlen=64): 364 | """双向beam search解码 365 | 每次只保留topk个最优候选结果;如果topk=1,那么就是贪心搜索 366 | """ 367 | xid = np.array([str2id(s)] * topk**2) # 输入转id 368 | yl_id = np.array([[2]] * topk) # L2R解码均以开头,这里的id为2 369 | yr_id = np.array([[3]] * topk) # R2L解码均以开头,这里的id为3 370 | l_scores, r_scores = [0] * topk, [0] * topk # 候选答案分数 371 | l_order, r_order = [], [] # 组合顺序 372 | for i in range(topk): 373 | for j in range(topk): 374 | l_order.append(i) 375 | r_order.append(j) 376 | for i in range(maxlen): # 强制要求输出不超过maxlen字 377 | l_proba, r_proba = model.predict([xid, yl_id[l_order], yr_id[r_order]]) # 计算左右解码概率 378 | l_proba = l_proba[:, i, 3:] # 直接忽略 379 | r_proba = np.concatenate([r_proba[:, i, 2: 3], r_proba[:, i, 4:]], 1) # 直接忽略 380 | l_proba = l_proba.reshape((topk, topk, -1)).mean(1) # 对所有候选R2L序列求平均,得到当前L2R方向的预测结果 381 | r_proba = r_proba.reshape((topk, topk, -1)).mean(0) # 对所有候选L2R序列求平均,得到当前R2L方向的预测结果 382 | l_log_proba = np.log(l_proba + 1e-6) # 取对数方便计算 383 | r_log_proba = np.log(r_proba + 1e-6) # 取对数方便计算 384 | l_arg_topk = l_log_proba.argsort(axis=1)[:, -topk:] # 每一项选出topk 385 | r_arg_topk = r_log_proba.argsort(axis=1)[:, -topk:] # 每一项选出topk 386 | _yl_id, _yr_id = [], [] # 暂存的候选目标序列 387 | _l_scores, _r_scores = [], [] # 暂存的候选目标序列得分 388 | if i == 0: 389 | for j in range(topk): 390 | _yl_id.append(list(yl_id[j]) + [l_arg_topk[0][j]+3]) 391 | _l_scores.append(l_log_proba[0][l_arg_topk[0][j]]) 392 | _yr_id.append(list(yr_id[j]) + [r_arg_topk[0][j]+3]) 393 | _r_scores.append(r_log_proba[0][r_arg_topk[0][j]]) 394 | else: 395 | for j in range(topk): 396 | for k in range(topk): # 遍历topk*topk的组合 397 | _yl_id.append(list(yl_id[j]) + [l_arg_topk[j][k]+3]) 398 | _l_scores.append(l_scores[j] + l_log_proba[j][l_arg_topk[j][k]]) 399 | _yr_id.append(list(yr_id[j]) + [r_arg_topk[j][k]+3]) 400 | _r_scores.append(r_scores[j] + r_log_proba[j][r_arg_topk[j][k]]) 401 | _l_arg_topk = np.argsort(_l_scores)[-topk:] # 从中选出新的topk 402 | _r_arg_topk = np.argsort(_r_scores)[-topk:] # 从中选出新的topk 403 | _yl_id = [_yl_id[k] for k in _l_arg_topk] 404 | _l_scores = [_l_scores[k] for k in _l_arg_topk] 405 | _yr_id = [_yr_id[k] for k in _r_arg_topk] 406 | _r_scores = [_r_scores[k] for k in _r_arg_topk] 407 | yl_id = np.array(_yl_id) 408 | yr_id = np.array(_yr_id) 409 | l_scores = np.array(_l_scores) 410 | r_scores = np.array(_r_scores) 411 | l_best_one = l_scores.argmax() 412 | r_best_one = r_scores.argmax() 413 | if yl_id[l_best_one][-1] == 3 and l_scores[l_best_one] >= r_scores[r_best_one]: 414 | return id2str(yl_id[l_best_one]) 415 | if yr_id[r_best_one][-1] == 3 and r_scores[r_best_one] >= l_scores[l_best_one]: 416 | return id2str(yr_id[r_best_one][::-1) 417 | # 如果maxlen字都找不到,直接返回 418 | l_best_one = l_scores.argmax() 419 | r_best_one = r_scores.argmax() 420 | if l_scores[l_best_one] >= r_scores[r_best_one]: 421 | return id2str(yl_id[l_best_one]) 422 | else: 423 | return id2str(yr_id[r_best_one][::-1]) 424 | 425 | 426 | s1 = u'夏天来临,皮肤在强烈紫外线的照射下,晒伤不可避免,因此,晒后及时修复显得尤为重要,否则可能会造成长期伤害。专家表示,选择晒后护肤品要慎重,芦荟凝胶是最安全,有效的一种选择,晒伤严重者,还请及时就医 。' 427 | s2 = u'8月28日,网络爆料称,华住集团旗下连锁酒店用户数据疑似发生泄露。从卖家发布的内容看,数据包含华住旗下汉庭、禧玥、桔子、宜必思等10余个品牌酒店的住客信息。泄露的信息包括华住官网注册资料、酒店入住登记的身份信息及酒店开房记录,住客姓名、手机号、邮箱、身份证号、登录账号密码等。卖家对这个约5亿条数据打包出售。第三方安全平台威胁猎人对信息出售者提供的三万条数据进行验证,认为数据真实性非常高。当天下午,华住集 团发声明称,已在内部迅速开展核查,并第一时间报警。当晚,上海警方消息称,接到华住集团报案,警方已经介入调查。' 428 | 429 | class Evaluate(Callback): 430 | def __init__(self): 431 | self.lowest = 1e10 432 | def on_epoch_end(self, epoch, logs=None): 433 | # 训练过程中观察一两个例子,显示标题质量提高的过程 434 | print gen_sent(s1) 435 | print gen_sent(s2) 436 | # 保存最优结果 437 | if logs['loss'] <= self.lowest: 438 | self.lowest = logs['loss'] 439 | model.save_weights('./best_model.weights') 440 | 441 | 442 | evaluator = Evaluate() 443 | 444 | model.fit_generator(data_generator(), 445 | steps_per_epoch=1000, 446 | epochs=epochs, 447 | callbacks=[evaluator]) 448 | --------------------------------------------------------------------------------