├── README.md └── el.py /README.md: -------------------------------------------------------------------------------- 1 | # el-2019-baseline 2 | 2019年百度的实体链指比赛( ccks2019,https://biendata.com/competition/ccks_2019_el/ ),一个baseline 3 | 4 | 注:正式版已经更新至 https://github.com/bojone/el-2019 5 | 6 | ## 模型 7 | 用BiLSTM做实体标注,然后匹配实体id。 8 | 9 | 1、标注结构是“半指针-半标注”结构,以前也曾介绍过( https://kexue.fm/archives/5409 , https://github.com/bojone/kg-2019-baseline )。标注结构是自己设计的,我看了很多实体识别相关的论文,没有发现类似的做法。所以,如果你基于此模型做出后的修改,最终获奖了或者发表paper什么的,烦请注明一下(其实也不是太奢望) 10 | 11 | ``` 12 | @misc{ 13 | jianlin2019bdel, 14 | title={Hybrid Structure of Pointer and Tagging for Entity Recognition and Linking: A Baseline}, 15 | author={Jianlin Su}, 16 | year={2019}, 17 | publisher={GitHub}, 18 | howpublished={\url{https://github.com/bojone/el-2019-baseline}}, 19 | } 20 | ``` 21 | 22 | 2、识别实体后,再识别具体的实体ID。方法是:首先把实体在数据库中的所有“属性、属性值”都拼成一段文本,作为该实体ID的描述;然后输入“query句子”、“实体标注”、“实体ID描述”,来做一个二分类问题。 23 | 24 | ## 用法 25 | `python el.py`即可。gtx 1060上4分钟训练一个epoch,6分钟完成验证,所以每个epoch的总时间是10分钟左右。 26 | 27 | ## 结果 28 | 5个epoch左右线下划分的验证集的F1应该就能到达0.61~0.62了,实测最后F1能跑到0.65+(f1: 0.6576, precision: 0.7285, recall: 0.5993),自动保存F1最优的模型。 29 | 30 | 训练有随机性,建议多重跑几次,选择最优模型。 31 | 32 | ## 环境 33 | Python 2.7 + Keras 2.2.4 + Tensorflow 1.8,其中关系最大的应该是Python 2.7了,如果你用Python 3,需要修改几行代码,至于修改哪几行,自己想办法,我不是你的debugger。 34 | 35 | 欢迎入坑Keras。人生苦短,我用Keras~ 36 | 37 | ## 声明 38 | 欢迎测试、修改使用,但这是我比较早的模型,文件里边有些做法在我最新版已经被抛弃,所以以后如果发现有什么不合理的地方,不要怪我故意将大家引入歧途就行了。 39 | 40 | 欢迎跟我交流讨论,但请尽量交流一些有意义的问题,而不是debug。(如果Keras不熟悉,请先自学一个星期Keras。) 41 | 42 | 特别强调:baseline的初衷是供参赛选手测试使用,如果你已经错过了参赛日期,但想要训练数据,请自行想办法向主办方索取。我不负责提供数据下载服务。 43 | 44 | ## 链接 45 | - https://kexue.fm 46 | - https://keras.io 47 | -------------------------------------------------------------------------------- /el.py: -------------------------------------------------------------------------------- 1 | #! -*- coding: utf-8 -*- 2 | # 2019年百度的实体链指比赛( ccks2019,https://biendata.com/competition/ccks_2019_el/ ),一个baseline 3 | 4 | import json 5 | from tqdm import tqdm 6 | import os 7 | import numpy as np 8 | from random import choice 9 | from itertools import groupby 10 | 11 | 12 | mode = 0 13 | min_count = 2 14 | char_size = 128 15 | 16 | 17 | id2kb = {} 18 | with open('../ccks2019_el/kb_data') as f: 19 | for l in tqdm(f): 20 | _ = json.loads(l) 21 | subject_id = _['subject_id'] 22 | subject_alias = list(set([_['subject']] + _.get('alias', []))) 23 | subject_alias = [alias.lower() for alias in subject_alias] 24 | subject_desc = '\n'.join(u'%s:%s' % (i['predicate'], i['object']) for i in _['data']) 25 | subject_desc = subject_desc.lower() 26 | if subject_desc: 27 | id2kb[subject_id] = {'subject_alias': subject_alias, 'subject_desc': subject_desc} 28 | 29 | 30 | kb2id = {} 31 | for i,j in id2kb.items(): 32 | for k in j['subject_alias']: 33 | if k not in kb2id: 34 | kb2id[k] = [] 35 | kb2id[k].append(i) 36 | 37 | 38 | train_data = [] 39 | with open('../ccks2019_el/train.json') as f: 40 | for l in tqdm(f): 41 | _ = json.loads(l) 42 | train_data.append({ 43 | 'text': _['text'].lower(), 44 | 'mention_data': [(x['mention'].lower(), int(x['offset']), x['kb_id']) 45 | for x in _['mention_data'] if x['kb_id'] != 'NIL' 46 | ] 47 | }) 48 | 49 | 50 | if not os.path.exists('../all_chars_me.json'): 51 | chars = {} 52 | for d in tqdm(iter(id2kb.values())): 53 | for c in d['subject_desc']: 54 | chars[c] = chars.get(c, 0) + 1 55 | for d in tqdm(iter(train_data)): 56 | for c in d['text']: 57 | chars[c] = chars.get(c, 0) + 1 58 | chars = {i:j for i,j in chars.items() if j >= min_count} 59 | id2char = {i+2:j for i,j in enumerate(chars)} # 0: mask, 1: padding 60 | char2id = {j:i for i,j in id2char.items()} 61 | json.dump([id2char, char2id], open('../all_chars_me.json', 'w')) 62 | else: 63 | id2char, char2id = json.load(open('../all_chars_me.json')) 64 | 65 | 66 | if not os.path.exists('../random_order_train.json'): 67 | random_order = range(len(train_data)) 68 | np.random.shuffle(random_order) 69 | json.dump( 70 | random_order, 71 | open('../random_order_train.json', 'w'), 72 | indent=4 73 | ) 74 | else: 75 | random_order = json.load(open('../random_order_train.json')) 76 | 77 | 78 | dev_data = [train_data[j] for i, j in enumerate(random_order) if i % 9 == mode] 79 | train_data = [train_data[j] for i, j in enumerate(random_order) if i % 9 != mode] 80 | 81 | 82 | def seq_padding(X, padding=0): 83 | L = [len(x) for x in X] 84 | ML = max(L) 85 | return np.array([ 86 | np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X 87 | ]) 88 | 89 | 90 | class data_generator: 91 | def __init__(self, data, batch_size=64): 92 | self.data = data 93 | self.batch_size = batch_size 94 | self.steps = len(self.data) // self.batch_size 95 | if len(self.data) % self.batch_size != 0: 96 | self.steps += 1 97 | def __len__(self): 98 | return self.steps 99 | def __iter__(self): 100 | while True: 101 | idxs = range(len(self.data)) 102 | np.random.shuffle(idxs) 103 | X1, X2, S1, S2, Y, T = [], [], [], [], [], [] 104 | for i in idxs: 105 | d = self.data[i] 106 | text = d['text'] 107 | x1 = [char2id.get(c, 1) for c in text] 108 | s1, s2 = np.zeros(len(text)), np.zeros(len(text)) 109 | mds = {} 110 | for md in d['mention_data']: 111 | if md[0] in kb2id: 112 | j1 = md[1] 113 | j2 = j1 + len(md[0]) 114 | s1[j1] = 1 115 | s2[j2 - 1] = 1 116 | mds[(j1, j2)] = (md[0], md[2]) 117 | if mds: 118 | j1, j2 = choice(mds.keys()) 119 | y = np.zeros(len(text)) 120 | y[j1: j2] = 1 121 | x2 = choice(kb2id[mds[(j1, j2)][0]]) 122 | if x2 == mds[(j1, j2)][1]: 123 | t = [1] 124 | else: 125 | t = [0] 126 | x2 = id2kb[x2]['subject_desc'] 127 | x2 = [char2id.get(c, 1) for c in x2] 128 | X1.append(x1) 129 | X2.append(x2) 130 | S1.append(s1) 131 | S2.append(s2) 132 | Y.append(y) 133 | T.append(t) 134 | if len(X1) == self.batch_size or i == idxs[-1]: 135 | X1 = seq_padding(X1) 136 | X2 = seq_padding(X2) 137 | S1 = seq_padding(S1) 138 | S2 = seq_padding(S2) 139 | Y = seq_padding(Y) 140 | T = seq_padding(T) 141 | yield [X1, X2, S1, S2, Y, T], None 142 | X1, X2, S1, S2, Y, T = [], [], [], [], [], [] 143 | 144 | 145 | from keras.layers import * 146 | from keras.models import Model 147 | import keras.backend as K 148 | from keras.callbacks import Callback 149 | from keras.optimizers import Adam 150 | 151 | 152 | def seq_maxpool(x): 153 | """seq是[None, seq_len, s_size]的格式, 154 | mask是[None, seq_len, 1]的格式,先除去mask部分, 155 | 然后再做maxpooling。 156 | """ 157 | seq, mask = x 158 | seq -= (1 - mask) * 1e10 159 | return K.max(seq, 1) 160 | 161 | 162 | x1_in = Input(shape=(None,)) # 待识别句子输入 163 | x2_in = Input(shape=(None,)) # 实体语义表达输入 164 | s1_in = Input(shape=(None,)) # 实体左边界(标签) 165 | s2_in = Input(shape=(None,)) # 实体右边界(标签) 166 | y_in = Input(shape=(None,)) # 实体标记 167 | t_in = Input(shape=(1,)) # 是否有关联(标签) 168 | 169 | 170 | x1, x2, s1, s2, y, t = x1_in, x2_in, s1_in, s2_in, y_in, t_in 171 | x1_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x1) 172 | x2_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x2) 173 | 174 | embedding = Embedding(len(id2char)+2, char_size) 175 | 176 | 177 | x1 = embedding(x1) 178 | x1 = Dropout(0.2)(x1) 179 | x1 = Lambda(lambda x: x[0] * x[1])([x1, x1_mask]) 180 | x1 = Bidirectional(CuDNNLSTM(char_size//2, return_sequences=True))(x1) 181 | x1 = Lambda(lambda x: x[0] * x[1])([x1, x1_mask]) 182 | x1 = Bidirectional(CuDNNLSTM(char_size//2, return_sequences=True))(x1) 183 | x1 = Lambda(lambda x: x[0] * x[1])([x1, x1_mask]) 184 | 185 | h = Conv1D(char_size, 3, activation='relu', padding='same')(x1) 186 | ps1 = Dense(1, activation='sigmoid')(h) 187 | ps2 = Dense(1, activation='sigmoid')(h) 188 | 189 | s_model = Model(x1_in, [ps1, ps2]) 190 | 191 | 192 | y = Lambda(lambda x: K.expand_dims(x, 2))(y) 193 | x1 = Concatenate()([x1, y]) 194 | x1 = Conv1D(char_size, 3, padding='same')(x1) 195 | 196 | x2 = embedding(x2) 197 | x2 = Dropout(0.2)(x2) 198 | x2 = Lambda(lambda x: x[0] * x[1])([x2, x2_mask]) 199 | x2 = Bidirectional(CuDNNLSTM(char_size//2, return_sequences=True))(x2) 200 | x2 = Lambda(lambda x: x[0] * x[1])([x2, x2_mask]) 201 | x2 = Bidirectional(CuDNNLSTM(char_size//2, return_sequences=True))(x2) 202 | x2 = Lambda(lambda x: x[0] * x[1])([x2, x2_mask]) 203 | 204 | x1 = Lambda(seq_maxpool)([x1, x1_mask]) 205 | x2 = Lambda(seq_maxpool)([x2, x2_mask]) 206 | x12 = Multiply()([x1, x2]) 207 | x = Concatenate()([x1, x2, x12]) 208 | x = Dense(char_size, activation='relu')(x) 209 | pt = Dense(1, activation='sigmoid')(x) 210 | 211 | t_model = Model([x1_in, x2_in, y_in], pt) 212 | 213 | 214 | train_model = Model([x1_in, x2_in, s1_in, s2_in, y_in, t_in], 215 | [ps1, ps2, pt]) 216 | 217 | s1 = K.expand_dims(s1, 2) 218 | s2 = K.expand_dims(s2, 2) 219 | 220 | s1_loss = K.binary_crossentropy(s1, ps1) 221 | s1_loss = K.sum(s1_loss * x1_mask) / K.sum(x1_mask) 222 | s2_loss = K.binary_crossentropy(s2, ps2) 223 | s2_loss = K.sum(s2_loss * x1_mask) / K.sum(x1_mask) 224 | pt_loss = K.mean(K.binary_crossentropy(t, pt)) 225 | 226 | loss = s1_loss + s2_loss + pt_loss 227 | 228 | train_model.add_loss(loss) 229 | train_model.compile(optimizer=Adam(1e-3)) 230 | train_model.summary() 231 | 232 | 233 | def extract_items(text_in): 234 | _x1 = [char2id.get(c, 1) for c in text_in] 235 | _x1 = np.array([_x1]) 236 | _k1, _k2 = s_model.predict(_x1) 237 | _k1, _k2 = _k1[0, :, 0], _k2[0, :, 0] 238 | _k1, _k2 = np.where(_k1 > 0.5)[0], np.where(_k2 > 0.5)[0] 239 | _subjects = [] 240 | for i in _k1: 241 | j = _k2[_k2 >= i] 242 | if len(j) > 0: 243 | j = j[0] 244 | _subject = text_in[i: j+1] 245 | _subjects.append((_subject, i, j+1)) 246 | if _subjects: 247 | R = [] 248 | _X2, _Y = [], [] 249 | _S, _IDXS = [], {} 250 | for _s in _subjects: 251 | _y = np.zeros(len(text_in)) 252 | _y[_s[1]: _s[2]] = 1 253 | _IDXS[_s] = kb2id.get(_s[0], []) 254 | for i in _IDXS[_s]: 255 | _x2 = id2kb[i]['subject_desc'] 256 | _x2 = [char2id.get(c, 1) for c in _x2] 257 | _X2.append(_x2) 258 | _Y.append(_y) 259 | _S.append(_s) 260 | if _X2: 261 | _X2 = seq_padding(_X2) 262 | _Y = seq_padding(_Y) 263 | _X1 = np.repeat(_x1, len(_X2), 0) 264 | scores = t_model.predict([_X1, _X2, _Y])[:, 0] 265 | for k, v in groupby(zip(_S, scores), key=lambda s: s[0]): 266 | v = np.array([j[1] for j in v]) 267 | kbid = _IDXS[k][np.argmax(v)] 268 | R.append((k[0], k[1], kbid)) 269 | return R 270 | else: 271 | return [] 272 | 273 | 274 | class Evaluate(Callback): 275 | def __init__(self): 276 | self.F1 = [] 277 | self.best = 0. 278 | def on_epoch_end(self, epoch, logs=None): 279 | f1, precision, recall = self.evaluate() 280 | self.F1.append(f1) 281 | if f1 > self.best: 282 | self.best = f1 283 | train_model.save_weights('best_model.weights') 284 | print 'f1: %.4f, precision: %.4f, recall: %.4f, best f1: %.4f\n' % (f1, precision, recall, self.best) 285 | def evaluate(self): 286 | A, B, C = 1e-10, 1e-10, 1e-10 287 | for d in tqdm(iter(dev_data)): 288 | R = set(extract_items(d['text'])) 289 | T = set(d['mention_data']) 290 | A += len(R & T) 291 | B += len(R) 292 | C += len(T) 293 | return 2 * A / (B + C), A / B, A / C 294 | 295 | 296 | evaluator = Evaluate() 297 | train_D = data_generator(train_data) 298 | 299 | train_model.fit_generator(train_D.__iter__(), 300 | steps_per_epoch=len(train_D), 301 | epochs=40, 302 | callbacks=[evaluator] 303 | ) 304 | --------------------------------------------------------------------------------