├── README.md ├── code ├── main.sh ├── metric_utils.py ├── run_NEZHA.py ├── run_UER.py ├── sequence_labeling.py ├── special_tokens.py ├── word_embedding_matrix.npy └── word_embedding_matrix_v2.npy ├── data └── External │ └── UER-large │ └── checkpoint ├── requirments.txt └── test ├── accum_optimizer.py ├── metric_utils.py ├── sequence_labeling.py ├── special_tokens.py ├── test.py ├── vocab.txt ├── vocab_chinese.txt └── vocab_clue.txt /README.md: -------------------------------------------------------------------------------- 1 | # 思路 2 | 3 | ## 环境 4 | - Red Hat 4.8.5-16 5 | - python 3.7.3 6 | - cudatoolkit 10.0.130 7 | - cudnn 7.6.0 8 | 9 | ## 方案 10 | - 根据数据传递性 数据增强 11 | - 特征融合(预训练模型 + 腾讯词向量 + fasttext词向量) 12 | - 预训练模型NEZHA(https://github.com/huawei-noah/Pretrained-Language-Model/tree/master/NEZHA) 13 | - 预训练模型UER(https://github.com/dbiir/UER-py) 14 | - 腾讯词向量(https://ai.tencent.com/ailab/nlp/embedding.html) 15 | - fasttext词向量(https://fasttext.cc/) 16 | - 对抗学习(FGM) 17 | - 模型融合 两个5fold模型ensemble 18 | 19 | ## 数据和预训练模自行下载 20 | 21 | ## 运行 22 | ```shell 23 | > cd code 24 | > sh main.sh 25 | ``` 26 | -------------------------------------------------------------------------------- /code/main.sh: -------------------------------------------------------------------------------- 1 | python run_UER.py 2 | python run_NEZHA.py 3 | cd ../test 4 | python test.py 5 | 6 | -------------------------------------------------------------------------------- /code/metric_utils.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import re 3 | import string 4 | 5 | 6 | def normalize_answer_v2(s): 7 | """Lower text and remove punctuation, articles and extra whitespace.""" 8 | def remove_space(text): 9 | regex = re.compile('\s+', re.UNICODE) 10 | return re.sub(regex, '', text) 11 | 12 | def remove_punc(text): 13 | punctuations = '~.!?' + '﹔·!?。。' + string.punctuation 14 | exclude = set(punctuations) 15 | return ''.join(ch for ch in text if ch not in exclude) 16 | 17 | def lower(text): 18 | return text.lower() 19 | 20 | return remove_space(s) 21 | 22 | 23 | def get_tokens(s): 24 | if not s: 25 | return [] 26 | return list(normalize_answer_v2(s)) 27 | 28 | 29 | def compute_exact(a_gold, a_pred): 30 | return int(normalize_answer_v2(a_gold) == normalize_answer_v2(a_pred)) 31 | 32 | 33 | def compute_f1(a_gold, a_pred): 34 | gold_toks = get_tokens(a_gold) 35 | pred_toks = get_tokens(a_pred) 36 | common = collections.Counter(gold_toks) & collections.Counter(pred_toks) 37 | num_same = sum(common.values()) 38 | if len(gold_toks) == 0 or len(pred_toks) == 0: 39 | # If either is no-answer, then F1 is 1 if they agree, 0 otherwise 40 | return int(gold_toks == pred_toks) 41 | if num_same == 0: 42 | return 0 43 | precision = 1.0 * num_same / len(pred_toks) 44 | recall = 1.0 * num_same / len(gold_toks) 45 | f1 = (2 * precision * recall) / (precision + recall) 46 | return f1 -------------------------------------------------------------------------------- /code/run_NEZHA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[1]: 5 | 6 | 7 | import collections 8 | import gc 9 | import json 10 | import os 11 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 12 | from random import choice, seed, randint, random 13 | import pandas as pd 14 | import numpy as np 15 | import tensorflow as tf 16 | import keras.backend as K 17 | import keras 18 | from keras.models import Sequential, Model 19 | from keras.layers import Input, CuDNNGRU as GRU, CuDNNLSTM as LSTM, Dropout, BatchNormalization 20 | from keras.layers import Dense, Concatenate, Activation, Embedding, SpatialDropout1D, Bidirectional, Lambda, Conv1D 21 | from keras.layers import Add, Average 22 | from keras.optimizers import Nadam, Adam, Adamax 23 | from keras.activations import absolute_import 24 | from keras.legacy import interfaces 25 | # from keras.preprocessing.sequence import pad_sequaencesget_ 26 | from keras.callbacks import Callback 27 | from keras.utils import to_categorical 28 | from sklearn.model_selection import KFold as KF 29 | from sklearn.model_selection import StratifiedKFold as SKF 30 | from keras_bert.loader import load_trained_model_from_checkpoint 31 | from keras_bert import AdamWarmup, calc_train_steps 32 | from keras.engine import Layer 33 | from keras.engine import InputSpec 34 | from keras.objectives import categorical_crossentropy 35 | from keras.objectives import sparse_categorical_crossentropy 36 | from keras import activations, initializers, regularizers, constraints 37 | from keras.models import load_model 38 | from keras_bert import get_custom_objects 39 | from tqdm import tqdm 40 | from sklearn.metrics import roc_auc_score, accuracy_score 41 | from special_tokens import CHINESE_MAP 42 | from metric_utils import compute_f1, compute_exact 43 | from collections import OrderedDict, Counter 44 | from sklearn.metrics import classification_report 45 | from time import time 46 | 47 | 48 | # In[2]: 49 | 50 | 51 | BERT_PRETRAINED_DIR = "../data/External/NEZHA_large/NEZHA-Large/" 52 | TRN_FILENAME = "../data/train_20200228.csv" 53 | DEV_FILENAME = "../data/dev_20200228.csv" 54 | SAVE_DIR = "../user_data/" 55 | PREFIX = "nezha_v13_augm" 56 | if "large-clue" in BERT_PRETRAINED_DIR or "large-pair" in BERT_PRETRAINED_DIR: 57 | W2V_FILE = "./word_embedding_matrix_v2" 58 | else: 59 | W2V_FILE = "./word_embedding_matrix" 60 | MAX_EPOCH = 15 61 | RUN_EPOCH = 10 62 | MAX_LEN = 60 63 | MAX_DOC_LEN = MAX_LEN // 2 64 | THRE = 0.5 65 | B_SIZE = 32 66 | ACCUM_STEP = int(32 // B_SIZE) 67 | FOLD_ID = list(range(5, 10)) 68 | FOLD_NUM = 25 69 | SEED = 2020 70 | 71 | SHUFFLE = True 72 | DOC_STRIDE = 128 73 | cfg = {} 74 | 75 | cfg["base_dir"] = BERT_PRETRAINED_DIR 76 | cfg["span_mode"] = True 77 | cfg["lr"] = 6e-6 78 | cfg['min_lr'] = 6e-8 79 | cfg["ch_type"] = "tx_ft" 80 | cfg["trainable"] = True 81 | cfg["bert_trainable"] = True 82 | cfg["accum_step"] = ACCUM_STEP 83 | cfg["cls_num"] = 4 84 | cfg["unit1"] = 128 85 | cfg["unit2"] = 128 86 | cfg["unit3"] = 512 87 | cfg["conv_num"] = 128 88 | cfg['maxlen'] = MAX_LEN 89 | cfg["adv_training"] = True 90 | cfg["W2V_FILE"] = W2V_FILE 91 | cfg["use_embed"] = True 92 | cfg["use_embed_v2"] = True 93 | PREFIX += "_seed" + str(SEED) 94 | cfg["verbose"] = PREFIX 95 | PREFIX = PREFIX + "_embed_v2" if cfg["use_embed_v2"] else PREFIX 96 | 97 | train_data = pd.read_csv(TRN_FILENAME) 98 | train_data.fillna("", inplace=True) 99 | dev_data = pd.read_csv(DEV_FILENAME) 100 | dev_data.fillna("", inplace=True) 101 | all_data = pd.concat([train_data, dev_data], axis=0, ignore_index=True) 102 | 103 | def get_data(df_data): 104 | 105 | df_gb = df_data.groupby('query1') 106 | res = {} 107 | for index, data in df_gb: 108 | query2s = data["query2"] 109 | lables = data["label"] 110 | ele = {} 111 | pos_qs = [] 112 | neg_qs = [] 113 | for q, lable in zip(query2s, lables): 114 | if lable == 1: 115 | pos_qs.append(q) 116 | elif lable == 0: 117 | neg_qs.append(q) 118 | else: 119 | print("wrong data", index, q, lable) 120 | ele["pos"] = pos_qs 121 | ele["neg"] = neg_qs 122 | res[index] = ele 123 | return res 124 | 125 | # train_data_dict = get_data(train_data) 126 | 127 | 128 | # In[3]: 129 | 130 | 131 | def get_vocab(base_dir=BERT_PRETRAINED_DIR, albert=False): 132 | if albert or "albert"in cfg["verbose"].lower(): 133 | dict_path = os.path.join(base_dir, 'vocab_chinese.txt') 134 | else: 135 | dict_path = os.path.join(base_dir, 'vocab.txt') 136 | with open(dict_path, mode="r", encoding="utf8") as f: 137 | lines = f.readlines() 138 | lines = [l.strip() for l in lines] 139 | 140 | word_index = {v: k for k, v in enumerate(lines)} 141 | for k, v in CHINESE_MAP.items(): 142 | assert v in word_index 143 | if k in word_index: 144 | print("[!] CHINESE_MAP k = {} is in word_index, DON'T using `{}` to replace".format(k, v)) 145 | continue 146 | # word_index[k] = word_index[v] 147 | del word_index[v] 148 | return word_index 149 | 150 | 151 | def get_label(): 152 | labels = ["0", "1"] 153 | label2id = {k: v for v, k in enumerate(labels)} 154 | id2label = {v: k for k, v in label2id.items()} 155 | return label2id, id2label, labels 156 | 157 | 158 | def get_coefs(word, *arr): 159 | return word, np.asarray(arr, dtype=np.float16) 160 | 161 | 162 | def load_embed(path, dim=300, word_index=None): 163 | embedding_index = {} 164 | with open(path, mode="r", encoding="utf8") as f: 165 | lines = f.readlines() 166 | for l in lines: 167 | l = l.strip().split() 168 | word, arr = l[0], l[1:] 169 | if len(arr) != dim: 170 | print("[!] l = {}".format(l)) 171 | continue 172 | if word_index and word not in word_index: 173 | continue 174 | word, arr = get_coefs(word, arr) 175 | embedding_index[word] = arr 176 | return embedding_index 177 | 178 | 179 | def build_matrix(path, word_index=None, max_features=None, dim=300): 180 | embedding_index = load_embed(path, dim=dim, word_index=word_index) 181 | max_features = len(word_index) + 1 if max_features is None else max_features 182 | embedding_matrix = np.zeros((max_features + 1, dim)) 183 | unknown_words = [] 184 | 185 | for word, i in word_index.items(): 186 | if i <= max_features: 187 | try: 188 | embedding_matrix[i] = embedding_index[word] 189 | except KeyError: 190 | unknown_words.append(word) 191 | return embedding_matrix, unknown_words 192 | 193 | 194 | def load_word_embed(word_embed_f1="../../../chinese_embedding/Tencent_AILab_ChineseEmbedding.txt", 195 | word_embed_f2="../../../chinese_embedding/cc.zh.300.vec", 196 | save_filename=W2V_FILE, 197 | word_index=None): 198 | if os.path.exists(save_filename + ".npy"): 199 | word_embedding_matrix = np.load(save_filename + ".npy").astype("float32") 200 | else: 201 | if "tx" in cfg["ch_type"]: 202 | tx_embed, tx_unk = build_matrix(word_embed_f1, word_index=word_index, dim=200) 203 | else: 204 | tx_embed = np.zeros(shape=(len(word_index) + 2, 0)) 205 | tx_unk = [] 206 | if "ft" in cfg["ch_type"]: 207 | ft_embed, ft_unk = build_matrix(word_embed_f2, word_index=word_index, dim=300) 208 | else: 209 | ft_embed = np.zeros(shape=(len(word_index) + 2, 0)) 210 | ft_unk = [] 211 | 212 | word_embedding_matrix = np.concatenate([tx_embed, ft_embed], axis=-1).astype("float32") 213 | print(word_embedding_matrix.shape, len(tx_unk), len(ft_unk)) 214 | np.save(save_filename, word_embedding_matrix ) 215 | return word_embedding_matrix 216 | 217 | 218 | word_index = get_vocab() 219 | label2id, id2label, labels = get_label() 220 | word_embedding_matrix = load_word_embed(word_index=word_index) 221 | 222 | NUM_CLASS = len(label2id) 223 | cfg["x_pad"] = word_index["[PAD]"] 224 | cfg["num_class"] = NUM_CLASS 225 | cfg["filename"] = "{}_{}_{}_{}".format(PREFIX, cfg["ch_type"], FOLD_NUM, cfg["lr"]) 226 | cfg["filename"] = cfg["filename"] + "_adv_training" if cfg["adv_training"] else cfg["filename"] 227 | cfg["filename"] = cfg["filename"] + "_embed" if cfg["use_embed"] else cfg["filename"] 228 | cfg["filename"] = cfg["filename"] + "_v2" if cfg["use_embed_v2"]and cfg["use_embed"] else cfg["filename"] 229 | print(label2id, id2label, labels, len(word_index), cfg["filename"]) 230 | 231 | 232 | # In[4]: 233 | 234 | 235 | def build_model(cfg, summary=False, word_embedding_matrix=None, bert_summary=False): 236 | def _get_model(base_dir, cfg_=None): 237 | if "albert"in cfg["verbose"].lower(): 238 | from bert4keras.bert import build_bert_model 239 | config_file = os.path.join(base_dir, 'albert_config.json') 240 | checkpoint_file = os.path.join(base_dir, 'model.ckpt-best') 241 | model = build_bert_model( 242 | config_path=config_file, 243 | checkpoint_path=checkpoint_file, 244 | model='albert', 245 | return_keras_model=True 246 | ) 247 | if cfg_["cls_num"] > 1: 248 | output = Concatenate(axis=-1)([model.get_layer("Encoder-1-FeedForward-Norm").get_output_at(-i) for i in range(1, cfg["cls_num"] + 1)]) 249 | model = Model(model.inputs[: 2], outputs=output) 250 | model.trainable = cfg_["bert_trainable"] 251 | elif "nezha_wwm"in cfg["verbose"].lower(): 252 | from bert4keras.bert import build_bert_model 253 | config_file = os.path.join(base_dir, 'bert_config.json') 254 | checkpoint_file = os.path.join(base_dir, 'model.ckpt-346400') 255 | model = build_bert_model( 256 | config_path=config_file, 257 | checkpoint_path=checkpoint_file, 258 | model='nezha', 259 | return_keras_model=True 260 | ) 261 | if bert_summary: 262 | model.summary() 263 | if cfg_["cls_num"] > 1: 264 | output = Concatenate(axis=-1)([ 265 | model.get_layer("Encoder-{}-FeedForward-Norm".format(24 - i)).output 266 | for i in range(0, cfg["cls_num"])]) 267 | 268 | model = Model(model.inputs[: 2], outputs=output) 269 | model = Model(model.inputs[: 2], outputs=output) 270 | model.trainable = cfg_["bert_trainable"] 271 | elif "nezha"in cfg["verbose"].lower(): 272 | from bert4keras.bert import build_bert_model 273 | config_file = os.path.join(base_dir, 'bert_config.json') 274 | checkpoint_file = os.path.join(base_dir, 'model.ckpt-325810') 275 | model = build_bert_model( 276 | config_path=config_file, 277 | checkpoint_path=checkpoint_file, 278 | model='nezha', 279 | return_keras_model=True, 280 | ) 281 | if bert_summary: 282 | model.summary() 283 | 284 | if cfg_["cls_num"] > 1: 285 | output = Concatenate(axis=-1)( 286 | [model.get_layer("Encoder-{}-FeedForward-Norm".format(24 - i)).output 287 | for i in range(0, cfg["cls_num"])]) 288 | model = Model(model.inputs[: 2], outputs=output) 289 | model.trainable = cfg_["bert_trainable"] 290 | else: 291 | config_file = os.path.join(base_dir, 'bert_config.json') 292 | checkpoint_file = os.path.join(base_dir, 'bert_model.ckpt') 293 | if not os.path.exists(config_file): 294 | config_file = os.path.join(base_dir, 'bert_config_large.json') 295 | checkpoint_file = os.path.join(base_dir, 'roberta_l24_large_model') 296 | model = load_trained_model_from_checkpoint(config_file, 297 | checkpoint_file, 298 | training=False, 299 | trainable=cfg_["bert_trainable"], 300 | output_layer_num=cfg_["cls_num"], 301 | seq_len=cfg_['maxlen']) 302 | 303 | # model = Model(inputs=model.inputs[: 2], outputs=model.layers[-7].output) 304 | print(config_file, checkpoint_file) 305 | return model 306 | 307 | def _get_opt(num_example, warmup_proportion=0.1, lr=2e-5, min_lr=None): 308 | total_steps, warmup_steps = calc_train_steps( 309 | num_example=num_example, 310 | batch_size=B_SIZE, 311 | epochs=MAX_EPOCH, 312 | warmup_proportion=warmup_proportion, 313 | ) 314 | opt = AdamWarmup(total_steps, warmup_steps, lr=lr, min_lr=min_lr) 315 | if cfg.get("accum_step", None) and cfg["accum_step"] > 1: 316 | print("[!] using accum_step = {}".format(cfg["accum_step"])) 317 | from accum_optimizer import AccumOptimizer 318 | opt = AccumOptimizer(opt, steps_per_update=cfg["accum_step"]) 319 | 320 | return opt 321 | 322 | bert_model = _get_model(cfg["base_dir"], cfg) 323 | 324 | if word_embedding_matrix is not None: 325 | embed = Embedding(input_dim=word_embedding_matrix.shape[0], 326 | output_dim=word_embedding_matrix.shape[1], 327 | weights=[word_embedding_matrix], 328 | trainable=cfg["trainable"], 329 | name="char_embed" 330 | ) 331 | 332 | t1_in = Input(shape=(None, )) 333 | t2_in = Input(shape=(None, )) 334 | o1_in = Input(shape=(1, )) 335 | o2_in = Input(shape=(1, )) 336 | 337 | t1, t2, o1, o2 = t1_in, t2_in, o1_in, o2_in 338 | 339 | t = bert_model([t1, t2]) 340 | mask = Lambda(lambda x: K.cast(K.not_equal(x, cfg["x_pad"]), 'float32'))(t1) 341 | ## Char information 342 | if word_embedding_matrix is not None: 343 | word_embed = embed(t1) 344 | if cfg.get("use_embed_v2", False): 345 | _t2 = Lambda(lambda x: K.expand_dims(x, axis=-1))(t2) 346 | word_embed = Concatenate(axis=-1)([word_embed, _t2]) 347 | word_embed = Lambda(lambda x: x[0] * K.expand_dims(x[1], axis=-1))([word_embed, mask]) 348 | word_embed = Bidirectional(LSTM(cfg["unit1"], return_sequences=True), merge_mode="sum")(word_embed) 349 | word_embed = Lambda(lambda x: x[0] * K.expand_dims(x[1], axis=-1))([word_embed, mask]) 350 | t = Concatenate(axis=-1)([t, word_embed]) 351 | 352 | t = Lambda(lambda x: x[0] * K.expand_dims(x[1], axis=-1))([t, mask]) 353 | t = Bidirectional(LSTM(cfg["unit3"], return_sequences=True), merge_mode="concat")(t) 354 | # t = Lambda(lambda x: x[0] * K.expand_dims(x[1], axis=-1))([t, mask]) 355 | # t = Conv1D(cfg["conv_num"], kernel_size=3, padding="same")(t) 356 | t = Lambda(lambda x: x[:, 0, :], name="extract_layer")(t) 357 | if cfg.get("num_class", 1) == 2: 358 | po1_logit = Dense(1, name="po1_logit")(t) 359 | po1 = Activation('sigmoid', name="po1")(po1_logit) 360 | train_model = Model(inputs=[t1_in, t2_in, o1_in], 361 | outputs=[po1]) 362 | o1_loss = K.binary_crossentropy(o1, po1) 363 | loss = K.mean(o1_loss) 364 | else: 365 | po1_logit = Dense(cfg["num_class"], name="po1_logit")(t) 366 | po1 = Activation('softmax', name="po1")(po1_logit) 367 | train_model = Model(inputs=[t1_in, t2_in, o1_in], 368 | outputs=[po1]) 369 | loss = K.categorical_crossentropy(o1, po1, axis=-1) 370 | loss = K.mean(loss) 371 | 372 | train_model.add_loss(loss) 373 | opt = _get_opt(num_example=cfg["num_example"], lr=cfg["lr"], min_lr=cfg['min_lr']) 374 | train_model.compile(optimizer=opt) 375 | if summary: 376 | train_model.summary() 377 | return train_model 378 | 379 | 380 | # print("----------------build model ---------------") 381 | # model = build_model(cfg, 382 | # summary=True, 383 | # word_embedding_matrix=word_embedding_matrix if cfg["use_embed"] else None, 384 | # bert_summary=True) 385 | 386 | # del model 387 | 388 | 389 | # In[5]: 390 | 391 | 392 | def token2id_X(x, x_dict, x2=None, maxlen=None, maxlen1=None): 393 | if x2: 394 | x1 = x 395 | del x 396 | maxlen -= 3 397 | maxlen1 -= 2 398 | assert maxlen > maxlen1 399 | maxlen2 = maxlen - maxlen1 - 1 400 | x1 = ["[CLS]"] + list(x1)[: maxlen1] + ["[SEP]"] 401 | x1 = [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x1] 402 | seg1= [0 for _ in x1] 403 | 404 | x2 = list(x2)[: maxlen2] + ["[SEP]"] 405 | x2= [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x2] 406 | seg2 = [1 for _ in x2] 407 | x = x1 + x2 408 | seg = seg1 + seg2 409 | 410 | else: 411 | maxlen -= 2 412 | x = ["[CLS]"] + list(x)[: maxlen] + ["[SEP]"] 413 | x = [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x] 414 | seg = [0 for _ in x] 415 | return x, seg 416 | 417 | 418 | def seq_padding(X, maxlen=None, padding_value=None, debug=False): 419 | L = [len(x) for x in X] 420 | if maxlen is None: 421 | maxlen = max(L) 422 | 423 | pad_X = np.array([ 424 | np.concatenate([x, [padding_value] * (maxlen - len(x))]) if len(x) < maxlen else x for x in X 425 | ]) 426 | if debug: 427 | print("[!] before pading {}\n".format(X)) 428 | print("[!] after pading {}\n".format(pad_X)) 429 | return pad_X 430 | 431 | 432 | class data_generator: 433 | 434 | def __init__(self, data, batch_size=B_SIZE, shuffle=SHUFFLE, augm_frac=0.75): 435 | self.data = data 436 | self.batch_size = batch_size 437 | self.steps = cfg["num_example"] // self.batch_size 438 | self.shuffle = shuffle 439 | self.data_dict = get_data(data) 440 | self.augm_frac = augm_frac 441 | if cfg["num_example"] % self.batch_size != 0: 442 | self.steps += 1 443 | 444 | def __len__(self): 445 | return self.steps 446 | 447 | def __iter__(self): 448 | 449 | while True: 450 | idxs = list(range(len(self.data))) 451 | if self.shuffle: 452 | np.random.shuffle(idxs) 453 | T1, T2, O1, O2 = [], [], [], [] 454 | for i in idxs: 455 | d = self.data.iloc[i] 456 | text = d["query1"] 457 | label_text = d["query2"] 458 | o1 = d["label"] 459 | 460 | if random() > self.augm_frac: 461 | data_d = self.data_dict[text] 462 | pos_data = data_d["pos"] 463 | neg_data = data_d["neg"] 464 | if pos_data and neg_data: 465 | if random() > 0.5: 466 | o1 = 1 467 | label_text = choice(pos_data) 468 | if len(pos_data) >= 2: 469 | _pos_data = [e for e in pos_data if e != label_text] 470 | text = choice(_pos_data) 471 | else: 472 | o1 = 0 473 | text = choice(pos_data) 474 | label_text = choice(neg_data) 475 | 476 | if random() > 0.5: 477 | text, label_text = label_text, text 478 | 479 | if o1 == "": 480 | continue 481 | o1 = float(o1) 482 | assert 0 <= o1 <= 1 483 | 484 | O1.append(o1) 485 | t1, t2 = token2id_X(text, x2=label_text, x_dict=word_index, maxlen=MAX_LEN, maxlen1=MAX_DOC_LEN) 486 | assert len(t1) == len(t2) 487 | 488 | T1.append(t1) 489 | T2.append(t2) 490 | 491 | if len(T1) == self.batch_size or i == idxs[-1]: 492 | O1 = np.array(O1).reshape(-1, 1) 493 | T1 = seq_padding(T1, padding_value=cfg["x_pad"]) 494 | T2 = seq_padding(T2, padding_value=0) 495 | assert T1.shape == T2.shape and T1.shape[0] == O1.shape[0] 496 | 497 | yield [T1, T2, O1], None 498 | T1, T2, O1, = [], [], [] 499 | 500 | 501 | 502 | # In[6]: 503 | 504 | 505 | def get_model(model_): 506 | model_inp_ind = [0, 1] 507 | inputs = [model_.inputs[e] for e in model_inp_ind] 508 | sub_model = Model(inputs=inputs, outputs=[model_.get_layer("po1").output]) 509 | return sub_model 510 | 511 | 512 | def find_best_acc_score(y_pred, y_true, use_plt=True, bins=1000): 513 | thres = [i / bins for i in range(1, bins)] 514 | scores = [accuracy_score(y_true, np.array(y_pred > thre, "int32")) for thre in thres] 515 | # if use_plt: 516 | # import matplotlib 517 | # import matplotlib.pyplot as plt 518 | # %matplotlib inline 519 | # plt.plot(scores) 520 | # plt.show() 521 | ind = np.argmax(scores) 522 | max_score = np.max(scores) 523 | assert abs(scores[ind] - max_score) < 1e-15 524 | return max_score, thres[ind] 525 | 526 | 527 | def evaluate(sub_model, data, bs=32): 528 | idxs = list(range(len(data))) 529 | T1, T2, O1, O2 = [], [], [], [] 530 | preds = [] 531 | for i in idxs: 532 | d = data.iloc[i] 533 | text = d["query1"] 534 | label_text = d["query2"] 535 | 536 | t1, t2 = token2id_X(text, x2=label_text, x_dict=word_index, maxlen=MAX_LEN, maxlen1=MAX_DOC_LEN) 537 | assert len(t1) == len(t2) 538 | 539 | T1.append(t1) 540 | T2.append(t2) 541 | 542 | o1 = float(d["label"]) 543 | O1.append(o1) 544 | if len(T1) == bs or i == idxs[-1]: 545 | T1 = seq_padding(T1, padding_value=cfg["x_pad"]) 546 | T2 = seq_padding(T2, padding_value=0) 547 | assert T1.shape == T2.shape 548 | pred = sub_model.predict([T1, T2]) 549 | preds.append(pred) 550 | T1, T2 = [], [] 551 | 552 | preds = np.concatenate(preds, axis=0).reshape(-1) 553 | O1 = np.array(O1).reshape(-1) 554 | O1 = O1.astype("int32") 555 | auc = roc_auc_score(O1, preds) 556 | best_res = find_best_acc_score(preds, O1) 557 | print("[!] best accurary&threshold = {}".format(best_res)) 558 | print("[!] best threshold classification_report") 559 | print(classification_report(O1, np.array(preds > best_res[1], "int32"), digits=6)) 560 | print("-" * 80) 561 | print("[!] np.mean(preds) = {}".format(np.mean(preds))) 562 | print("[!] classification_report") 563 | print(classification_report(O1, np.array(preds > 0.5, "int32"), digits=6)) 564 | acc = accuracy_score(O1, np.array(preds > 0.5, "int32")) 565 | return auc, acc 566 | 567 | 568 | class Evaluate(Callback): 569 | def __init__(self, data, filename=None): 570 | self.F1 = [] 571 | self.best = 0. 572 | self.filename = filename 573 | self.data = data 574 | 575 | def on_epoch_begin(self, epoch, logs=None): 576 | if epoch == 0: 577 | print("[!] test load&save model") 578 | f = self.filename + ".h5" 579 | f = os.path.join(SAVE_DIR, f) 580 | self.model.save(f, include_optimizer=False, overwrite=False) 581 | if "albert" in cfg["verbose"]: 582 | model_ = load_model(f) 583 | elif "nezha" in cfg["verbose"]: 584 | model_ = load_model(f) 585 | else: 586 | model_ = load_model(f, custom_objects=get_custom_objects()) 587 | 588 | def on_epoch_end(self, epoch, logs=None): 589 | if epoch + 1 < 1: 590 | return 591 | # if epoch + 1 in [3, 6, 9, 10, 12, 15, 18, 20]: 592 | # f = self.filename + "_{}.h5".format(epoch + 1) 593 | # f = os.path.join(SAVE_DIR, f) 594 | # self.model.save(f, include_optimizer=False) 595 | 596 | sub_model = get_model(self.model) 597 | f1, class_f1 = evaluate(sub_model, data=self.data) 598 | self.F1.append(f1) 599 | if f1 > self.best: 600 | f = self.filename + ".h5" 601 | f = os.path.join(SAVE_DIR, f) 602 | self.model.save(f, include_optimizer=False) 603 | 604 | if f1 > self.best: 605 | self.best = f1 606 | print("[!] epoch = {}, new best_auc = {}".format(epoch + 1, f1)) 607 | print('[!] epoch = {}, auc = {}, best auc {}'.format(epoch + 1, f1, self.best)) 608 | print('[!] epoch = {}, acc = {}\n'.format(epoch + 1, class_f1)) 609 | 610 | 611 | # In[7]: 612 | 613 | 614 | def search_layer(inputs, name, exclude_from=None): 615 | """根据inputs和name来搜索层 616 | 说明:inputs为某个层或某个层的输出;name为目标层的名字。 617 | 实现:根据inputs一直往上递归搜索,直到发现名字为name的层为止; 618 | 如果找不到,那就返回None。 619 | """ 620 | if exclude_from is None: 621 | exclude_from = set() 622 | 623 | if isinstance(inputs, keras.layers.Layer): 624 | layer = inputs 625 | else: 626 | layer = inputs._keras_history[0] 627 | 628 | if layer.name == name: 629 | return layer 630 | elif layer in exclude_from: 631 | return None 632 | else: 633 | exclude_from.add(layer) 634 | if isinstance(layer, keras.models.Model): 635 | model = layer 636 | for layer in model.layers: 637 | if layer.name == name: 638 | return layer 639 | inbound_layers = layer._inbound_nodes[0].inbound_layers 640 | if not isinstance(inbound_layers, list): 641 | inbound_layers = [inbound_layers] 642 | if len(inbound_layers) > 0: 643 | for layer in inbound_layers: 644 | layer = search_layer(layer, name, exclude_from) 645 | if layer is not None: 646 | return layer 647 | 648 | def adversarial_training(model, embedding_names, epsilon=1): 649 | """给模型添加对抗训练 650 | 其中model是需要添加对抗训练的keras模型,embedding_names 651 | 则是model里边Embedding层的名字。要在模型compile之后使用。 652 | """ 653 | if model.train_function is None: # 如果还没有训练函数 654 | model._make_train_function() # 手动make 655 | old_train_function = model.train_function # 备份旧的训练函数 656 | 657 | # 查找Embedding层 658 | embedding_layers = [] 659 | for embedding_name in embedding_names: 660 | for output in model.outputs: 661 | embedding_layer = search_layer(output, embedding_name) 662 | if embedding_layer is not None: 663 | embedding_layers.append(embedding_layer) 664 | break 665 | for embedding_layer in embedding_layers: 666 | if embedding_layer is None: 667 | raise Exception('Embedding layer not found') 668 | 669 | # 求Embedding梯度 670 | embeddings = [embedding_layer.embeddings for embedding_layer in embedding_layers] # Embedding矩阵 671 | gradients = K.gradients(model.total_loss, embeddings) # Embedding梯度 672 | # gradients = K.zeros_like(embeddings) + gradients[0] # 转为dense tensor 673 | gradients = [K.zeros_like(embedding) + gradient for embedding, gradient in zip(embeddings, gradients)] 674 | 675 | # 封装为函数 676 | inputs = (model._feed_inputs + 677 | model._feed_targets + 678 | model._feed_sample_weights) # 所有输入层 679 | embedding_gradients = K.function( 680 | inputs=inputs, 681 | outputs=gradients, 682 | name='embedding_gradients', 683 | ) # 封装为函数 684 | 685 | def train_function(inputs): # 重新定义训练函数 686 | # grads = embedding_gradients(inputs)[0] # Embedding梯度 687 | # delta = epsilon * grads / (np.sqrt((grads**2).sum()) + 1e-8) # 计算扰动 688 | grads = embedding_gradients(inputs) # Embedding梯度 689 | deltas = [epsilon * grad / (np.sqrt((grad**2).sum()) + 1e-8) for grad in grads] # 计算扰动 690 | # 注入扰动 691 | # K.set_value(embeddings, K.eval(embeddings) + delta) 692 | for embedding, delta in zip(embeddings, deltas): 693 | K.set_value(embedding, K.eval(embedding) + delta) 694 | 695 | outputs = old_train_function(inputs) # 梯度下降 696 | # 删除扰动 697 | # K.set_value(embeddings, K.eval(embeddings) - delta) # 删除扰动 698 | for embedding, delta in zip(embeddings, deltas): 699 | K.set_value(embedding, K.eval(embedding) - delta) 700 | return outputs 701 | 702 | model.train_function = train_function # 覆盖原训练函数 703 | 704 | adv_layer_names = ['Embedding-Token', 'char_embed'] 705 | 706 | if -1 in FOLD_ID: 707 | fold_id = -1 708 | cfg["num_example"] = len(train_data) 709 | print("-" * 81) 710 | print("[!] start fold_id =", fold_id, train_data.shape, dev_data.shape) 711 | print(cfg) 712 | K.clear_session() 713 | gc.collect() 714 | train_D = data_generator(train_data) 715 | seed(SEED + fold_id) 716 | np.random.seed(SEED + fold_id) 717 | tf.random.set_random_seed(SEED + fold_id) 718 | model = build_model(cfg, summary=True, word_embedding_matrix=word_embedding_matrix if cfg["use_embed"] else None) 719 | if cfg["adv_training"]: 720 | print("[!] using adv_training") 721 | adversarial_training(model, adv_layer_names, 0.5) 722 | evaluator = Evaluate(filename=cfg["filename"] + "_fold{}".format(fold_id), data=dev_data) 723 | model.fit_generator(train_D.__iter__(), 724 | steps_per_epoch=len(train_D), 725 | epochs=RUN_EPOCH, 726 | callbacks=[evaluator], 727 | shuffle=True 728 | ) 729 | del model, train_data, dev_data 730 | gc.collect() 731 | print("[!] finish fold_id =", fold_id) 732 | print("-" * 81) 733 | 734 | 735 | skf = SKF(FOLD_NUM, shuffle=False, random_state=SEED) 736 | 737 | print(all_data.shape) 738 | _t0 = time() 739 | for fold_id, (trn_ind, val_ind) in enumerate(skf.split(range(len(all_data)), all_data["label"])): 740 | if fold_id not in FOLD_ID: 741 | continue 742 | t0 = time() 743 | dev_data = all_data.iloc[val_ind].reset_index(drop=True) 744 | train_data = all_data.iloc[trn_ind].reset_index(drop=True) 745 | cfg["num_example"] = len(train_data) 746 | print("-" * 81) 747 | print("[!] start fold_id =", fold_id, train_data.shape, dev_data.shape) 748 | print(cfg) 749 | K.clear_session() 750 | gc.collect() 751 | train_D = data_generator(train_data) 752 | seed(SEED + fold_id) 753 | np.random.seed(SEED + fold_id) 754 | tf.random.set_random_seed(SEED + fold_id) 755 | model = build_model(cfg, summary=True, word_embedding_matrix=word_embedding_matrix if cfg["use_embed"] else None) 756 | if cfg["adv_training"]: 757 | print("[!] using adv_training") 758 | adversarial_training(model, adv_layer_names, 0.5) 759 | evaluator = Evaluate(filename=cfg["filename"] + "_fold{}".format(fold_id), data=dev_data) 760 | model.fit_generator(train_D.__iter__(), 761 | steps_per_epoch=len(train_D), 762 | epochs=RUN_EPOCH, 763 | callbacks=[evaluator], 764 | shuffle=True 765 | ) 766 | print(evaluator.F1, max(evaluator.F1)) 767 | print("[{}] finish fold_id =".format(time() - t0), fold_id) 768 | print("-" * 81) 769 | del model, train_data, dev_data, evaluator 770 | gc.collect() 771 | print("[{}] finish =".format(time() - _t0)) 772 | -------------------------------------------------------------------------------- /code/run_UER.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[1]: 5 | 6 | 7 | import collections 8 | import gc 9 | import json 10 | import os 11 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 12 | from random import choice, seed, randint, random 13 | import pandas as pd 14 | import numpy as np 15 | import tensorflow as tf 16 | import keras.backend as K 17 | import keras 18 | from keras.models import Sequential, Model 19 | from keras.layers import Input, CuDNNGRU as GRU, CuDNNLSTM as LSTM, Dropout, BatchNormalization 20 | from keras.layers import Dense, Concatenate, Activation, Embedding, SpatialDropout1D, Bidirectional, Lambda, Conv1D 21 | from keras.layers import Add, Average 22 | from keras.optimizers import Nadam, Adam, Adamax 23 | from keras.activations import absolute_import 24 | from keras.legacy import interfaces 25 | # from keras.preprocessing.sequence import pad_sequaencesget_ 26 | from keras.callbacks import Callback 27 | from keras.utils import to_categorical 28 | from sklearn.model_selection import KFold as KF 29 | from sklearn.model_selection import StratifiedKFold as SKF 30 | from keras_bert.loader import load_trained_model_from_checkpoint 31 | from keras_bert import AdamWarmup, calc_train_steps 32 | from keras.engine import Layer 33 | from keras.engine import InputSpec 34 | from keras.objectives import categorical_crossentropy 35 | from keras.objectives import sparse_categorical_crossentropy 36 | from keras import activations, initializers, regularizers, constraints 37 | from keras.models import load_model 38 | from keras_bert import get_custom_objects 39 | from tqdm import tqdm 40 | from sklearn.metrics import roc_auc_score, accuracy_score 41 | from special_tokens import CHINESE_MAP 42 | from metric_utils import compute_f1, compute_exact 43 | from collections import OrderedDict, Counter 44 | from sklearn.metrics import classification_report 45 | from time import time 46 | 47 | 48 | # In[2]: 49 | 50 | 51 | BERT_PRETRAINED_DIR = "../data/External/UER-large/" 52 | TRN_FILENAME = "../data/train_20200228.csv" 53 | DEV_FILENAME = "../data/dev_20200228.csv" 54 | SAVE_DIR = "../user_data/" 55 | PREFIX = "USE_v12_augm" 56 | if "large-clue" in BERT_PRETRAINED_DIR or "large-pair" in BERT_PRETRAINED_DIR: 57 | W2V_FILE = "./word_embedding_matrix_v2" 58 | else: 59 | W2V_FILE = "./word_embedding_matrix" 60 | MAX_EPOCH = 15 61 | RUN_EPOCH = 10 62 | MAX_LEN = 60 63 | MAX_DOC_LEN = MAX_LEN // 2 64 | THRE = 0.5 65 | B_SIZE = 32 66 | ACCUM_STEP = int(32 // B_SIZE) 67 | FOLD_ID = list(range(10, 15)) 68 | FOLD_NUM = 25 69 | SEED = 2020 70 | 71 | SHUFFLE = True 72 | DOC_STRIDE = 128 73 | cfg = {} 74 | 75 | cfg["base_dir"] = BERT_PRETRAINED_DIR 76 | cfg["span_mode"] = True 77 | cfg["lr"] = 9e-6 78 | cfg['min_lr'] = 6e-8 79 | cfg["ch_type"] = "tx_ft" 80 | cfg["trainable"] = True 81 | cfg["bert_trainable"] = True 82 | cfg["accum_step"] = ACCUM_STEP 83 | cfg["cls_num"] = 4 84 | cfg["unit1"] = 128 85 | cfg["unit2"] = 128 86 | cfg["unit3"] = 512 87 | cfg["conv_num"] = 128 88 | cfg['maxlen'] = MAX_LEN 89 | cfg["adv_training"] = True 90 | cfg["W2V_FILE"] = W2V_FILE 91 | cfg["use_embed"] = True 92 | cfg["use_embed_v2"] = True 93 | PREFIX += "_seed" + str(SEED) 94 | cfg["verbose"] = PREFIX 95 | PREFIX = PREFIX + "_embed_v2" if cfg["use_embed_v2"] else PREFIX 96 | 97 | train_data = pd.read_csv(TRN_FILENAME) 98 | train_data.fillna("", inplace=True) 99 | dev_data = pd.read_csv(DEV_FILENAME) 100 | dev_data.fillna("", inplace=True) 101 | all_data = pd.concat([train_data, dev_data], axis=0, ignore_index=True) 102 | 103 | def get_data(df_data): 104 | 105 | df_gb = df_data.groupby('query1') 106 | res = {} 107 | for index, data in df_gb: 108 | query2s = data["query2"] 109 | lables = data["label"] 110 | ele = {} 111 | pos_qs = [] 112 | neg_qs = [] 113 | for q, lable in zip(query2s, lables): 114 | if lable == 1: 115 | pos_qs.append(q) 116 | elif lable == 0: 117 | neg_qs.append(q) 118 | else: 119 | print("wrong data", index, q, lable) 120 | ele["pos"] = pos_qs 121 | ele["neg"] = neg_qs 122 | res[index] = ele 123 | return res 124 | 125 | # train_data_dict = get_data(train_data) 126 | 127 | 128 | # In[3]: 129 | 130 | 131 | def get_vocab(base_dir=BERT_PRETRAINED_DIR, albert=False): 132 | if albert or "albert"in cfg["verbose"].lower(): 133 | dict_path = os.path.join(base_dir, 'vocab_chinese.txt') 134 | else: 135 | dict_path = os.path.join(base_dir, 'vocab.txt') 136 | with open(dict_path, mode="r", encoding="utf8") as f: 137 | lines = f.readlines() 138 | lines = [l.strip() for l in lines] 139 | 140 | word_index = {v: k for k, v in enumerate(lines)} 141 | for k, v in CHINESE_MAP.items(): 142 | assert v in word_index 143 | if k in word_index: 144 | print("[!] CHINESE_MAP k = {} is in word_index, DON'T using `{}` to replace".format(k, v)) 145 | continue 146 | # word_index[k] = word_index[v] 147 | del word_index[v] 148 | return word_index 149 | 150 | 151 | def get_label(): 152 | labels = ["0", "1"] 153 | label2id = {k: v for v, k in enumerate(labels)} 154 | id2label = {v: k for k, v in label2id.items()} 155 | return label2id, id2label, labels 156 | 157 | 158 | def get_coefs(word, *arr): 159 | return word, np.asarray(arr, dtype=np.float16) 160 | 161 | 162 | def load_embed(path, dim=300, word_index=None): 163 | embedding_index = {} 164 | with open(path, mode="r", encoding="utf8") as f: 165 | lines = f.readlines() 166 | for l in lines: 167 | l = l.strip().split() 168 | word, arr = l[0], l[1:] 169 | if len(arr) != dim: 170 | print("[!] l = {}".format(l)) 171 | continue 172 | if word_index and word not in word_index: 173 | continue 174 | word, arr = get_coefs(word, arr) 175 | embedding_index[word] = arr 176 | return embedding_index 177 | 178 | 179 | def build_matrix(path, word_index=None, max_features=None, dim=300): 180 | embedding_index = load_embed(path, dim=dim, word_index=word_index) 181 | max_features = len(word_index) + 1 if max_features is None else max_features 182 | embedding_matrix = np.zeros((max_features + 1, dim)) 183 | unknown_words = [] 184 | 185 | for word, i in word_index.items(): 186 | if i <= max_features: 187 | try: 188 | embedding_matrix[i] = embedding_index[word] 189 | except KeyError: 190 | unknown_words.append(word) 191 | return embedding_matrix, unknown_words 192 | 193 | 194 | def load_word_embed(word_embed_f1="../../../chinese_embedding/Tencent_AILab_ChineseEmbedding.txt", 195 | word_embed_f2="../../../chinese_embedding/cc.zh.300.vec", 196 | save_filename=W2V_FILE, 197 | word_index=None): 198 | if os.path.exists(save_filename + ".npy"): 199 | word_embedding_matrix = np.load(save_filename + ".npy").astype("float32") 200 | else: 201 | if "tx" in cfg["ch_type"]: 202 | tx_embed, tx_unk = build_matrix(word_embed_f1, word_index=word_index, dim=200) 203 | else: 204 | tx_embed = np.zeros(shape=(len(word_index) + 2, 0)) 205 | tx_unk = [] 206 | if "ft" in cfg["ch_type"]: 207 | ft_embed, ft_unk = build_matrix(word_embed_f2, word_index=word_index, dim=300) 208 | else: 209 | ft_embed = np.zeros(shape=(len(word_index) + 2, 0)) 210 | ft_unk = [] 211 | 212 | word_embedding_matrix = np.concatenate([tx_embed, ft_embed], axis=-1).astype("float32") 213 | print(word_embedding_matrix.shape, len(tx_unk), len(ft_unk)) 214 | np.save(save_filename, word_embedding_matrix ) 215 | return word_embedding_matrix 216 | 217 | 218 | word_index = get_vocab() 219 | label2id, id2label, labels = get_label() 220 | word_embedding_matrix = load_word_embed(word_index=word_index) 221 | 222 | NUM_CLASS = len(label2id) 223 | cfg["x_pad"] = word_index["[PAD]"] 224 | cfg["num_class"] = NUM_CLASS 225 | cfg["filename"] = "{}_{}_{}_{}".format(PREFIX, cfg["ch_type"], FOLD_NUM, cfg["lr"]) 226 | cfg["filename"] = cfg["filename"] + "_adv_training" if cfg["adv_training"] else cfg["filename"] 227 | cfg["filename"] = cfg["filename"] + "_embed" if cfg["use_embed"] else cfg["filename"] 228 | cfg["filename"] = cfg["filename"] + "_v2" if cfg["use_embed_v2"]and cfg["use_embed"] else cfg["filename"] 229 | print(label2id, id2label, labels, len(word_index), cfg["filename"]) 230 | 231 | 232 | # In[4]: 233 | 234 | 235 | def build_model(cfg, summary=False, word_embedding_matrix=None): 236 | def _get_model(base_dir, cfg_=None): 237 | if "albert"in cfg["verbose"].lower(): 238 | from bert4keras.bert import build_bert_model 239 | config_file = os.path.join(base_dir, 'albert_config.json') 240 | checkpoint_file = os.path.join(base_dir, 'model.ckpt-best') 241 | model = build_bert_model( 242 | config_path=config_file, 243 | checkpoint_path=checkpoint_file, 244 | model='albert', 245 | return_keras_model=True 246 | ) 247 | if cfg_["cls_num"] > 1: 248 | output = Concatenate(axis=-1)([model.get_layer("Encoder-1-FeedForward-Norm").get_output_at(-i) for i in range(1, cfg["cls_num"] + 1)]) 249 | model = Model(model.inputs[: 2], outputs=output) 250 | model.trainable = cfg_["bert_trainable"] 251 | else: 252 | config_file = os.path.join(base_dir, 'bert_config.json') 253 | checkpoint_file = os.path.join(base_dir, 'bert_model.ckpt') 254 | if not os.path.exists(config_file): 255 | config_file = os.path.join(base_dir, 'bert_config_large.json') 256 | checkpoint_file = os.path.join(base_dir, 'roberta_l24_large_model') 257 | model = load_trained_model_from_checkpoint(config_file, 258 | checkpoint_file, 259 | training=False, 260 | trainable=cfg_["bert_trainable"], 261 | output_layer_num=cfg_["cls_num"], 262 | seq_len=cfg_['maxlen']) 263 | 264 | # model = Model(inputs=model.inputs[: 2], outputs=model.layers[-7].output) 265 | 266 | return model 267 | 268 | def _get_opt(num_example, warmup_proportion=0.1, lr=2e-5, min_lr=None): 269 | total_steps, warmup_steps = calc_train_steps( 270 | num_example=num_example, 271 | batch_size=B_SIZE, 272 | epochs=MAX_EPOCH, 273 | warmup_proportion=warmup_proportion, 274 | ) 275 | opt = AdamWarmup(total_steps, warmup_steps, lr=lr, min_lr=min_lr) 276 | if cfg.get("accum_step", None) and cfg["accum_step"] > 1: 277 | print("[!] using accum_step = {}".format(cfg["accum_step"])) 278 | from accum_optimizer import AccumOptimizer 279 | opt = AccumOptimizer(opt, steps_per_update=cfg["accum_step"]) 280 | 281 | return opt 282 | 283 | bert_model = _get_model(cfg["base_dir"], cfg) 284 | 285 | if word_embedding_matrix is not None: 286 | embed = Embedding(input_dim=word_embedding_matrix.shape[0], 287 | output_dim=word_embedding_matrix.shape[1], 288 | weights=[word_embedding_matrix], 289 | trainable=cfg["trainable"], 290 | name="char_embed" 291 | ) 292 | 293 | t1_in = Input(shape=(None, )) 294 | t2_in = Input(shape=(None, )) 295 | o1_in = Input(shape=(1, )) 296 | o2_in = Input(shape=(1, )) 297 | 298 | t1, t2, o1, o2 = t1_in, t2_in, o1_in, o2_in 299 | 300 | t = bert_model([t1, t2]) 301 | mask = Lambda(lambda x: K.cast(K.not_equal(x, cfg["x_pad"]), 'float32'))(t1) 302 | ## Char information 303 | if word_embedding_matrix is not None: 304 | word_embed = embed(t1) 305 | if cfg.get("use_embed_v2", False): 306 | _t2 = Lambda(lambda x: K.expand_dims(x, axis=-1))(t2) 307 | word_embed = Concatenate(axis=-1)([word_embed, _t2]) 308 | word_embed = Lambda(lambda x: x[0] * K.expand_dims(x[1], axis=-1))([word_embed, mask]) 309 | word_embed = Bidirectional(LSTM(cfg["unit1"], return_sequences=True), merge_mode="sum")(word_embed) 310 | word_embed = Lambda(lambda x: x[0] * K.expand_dims(x[1], axis=-1))([word_embed, mask]) 311 | t = Concatenate(axis=-1)([t, word_embed]) 312 | 313 | t = Lambda(lambda x: x[0] * K.expand_dims(x[1], axis=-1))([t, mask]) 314 | t = Bidirectional(LSTM(cfg["unit3"], return_sequences=True), merge_mode="concat")(t) 315 | # t = Lambda(lambda x: x[0] * K.expand_dims(x[1], axis=-1))([t, mask]) 316 | # t = Conv1D(cfg["conv_num"], kernel_size=3, padding="same")(t) 317 | t = Lambda(lambda x: x[:, 0, :], name="extract_layer")(t) 318 | if cfg.get("num_class", 1) == 2: 319 | po1_logit = Dense(1, name="po1_logit")(t) 320 | po1 = Activation('sigmoid', name="po1")(po1_logit) 321 | train_model = Model(inputs=[t1_in, t2_in, o1_in], 322 | outputs=[po1]) 323 | o1_loss = K.binary_crossentropy(o1, po1) 324 | loss = K.mean(o1_loss) 325 | else: 326 | po1_logit = Dense(cfg["num_class"], name="po1_logit")(t) 327 | po1 = Activation('softmax', name="po1")(po1_logit) 328 | train_model = Model(inputs=[t1_in, t2_in, o1_in], 329 | outputs=[po1]) 330 | loss = K.categorical_crossentropy(o1, po1, axis=-1) 331 | loss = K.mean(loss) 332 | 333 | train_model.add_loss(loss) 334 | opt = _get_opt(num_example=cfg["num_example"], lr=cfg["lr"], min_lr=cfg['min_lr']) 335 | train_model.compile(optimizer=opt) 336 | if summary: 337 | train_model.summary() 338 | return train_model 339 | 340 | 341 | # print("----------------build model ---------------") 342 | # model = build_model(cfg, summary=True, word_embedding_matrix=word_embedding_matrix if cfg["use_embed"] else None) 343 | # del model 344 | 345 | 346 | # In[5]: 347 | 348 | 349 | def token2id_X(x, x_dict, x2=None, maxlen=None, maxlen1=None): 350 | if x2: 351 | x1 = x 352 | del x 353 | maxlen -= 3 354 | maxlen1 -= 2 355 | assert maxlen > maxlen1 356 | maxlen2 = maxlen - maxlen1 - 1 357 | x1 = ["[CLS]"] + list(x1)[: maxlen1] + ["[SEP]"] 358 | x1 = [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x1] 359 | seg1= [0 for _ in x1] 360 | 361 | x2 = list(x2)[: maxlen2] + ["[SEP]"] 362 | x2= [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x2] 363 | seg2 = [1 for _ in x2] 364 | x = x1 + x2 365 | seg = seg1 + seg2 366 | 367 | else: 368 | maxlen -= 2 369 | x = ["[CLS]"] + list(x)[: maxlen] + ["[SEP]"] 370 | x = [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x] 371 | seg = [0 for _ in x] 372 | return x, seg 373 | 374 | 375 | def seq_padding(X, maxlen=None, padding_value=None, debug=False): 376 | L = [len(x) for x in X] 377 | if maxlen is None: 378 | maxlen = max(L) 379 | 380 | pad_X = np.array([ 381 | np.concatenate([x, [padding_value] * (maxlen - len(x))]) if len(x) < maxlen else x for x in X 382 | ]) 383 | if debug: 384 | print("[!] before pading {}\n".format(X)) 385 | print("[!] after pading {}\n".format(pad_X)) 386 | return pad_X 387 | 388 | 389 | class data_generator: 390 | 391 | def __init__(self, data, batch_size=B_SIZE, shuffle=SHUFFLE, augm_frac=0.75): 392 | self.data = data 393 | self.batch_size = batch_size 394 | self.steps = cfg["num_example"] // self.batch_size 395 | self.shuffle = shuffle 396 | self.data_dict = get_data(data) 397 | self.augm_frac = augm_frac 398 | if cfg["num_example"] % self.batch_size != 0: 399 | self.steps += 1 400 | 401 | def __len__(self): 402 | return self.steps 403 | 404 | def __iter__(self): 405 | 406 | while True: 407 | idxs = list(range(len(self.data))) 408 | if self.shuffle: 409 | np.random.shuffle(idxs) 410 | T1, T2, O1, O2 = [], [], [], [] 411 | for i in idxs: 412 | d = self.data.iloc[i] 413 | text = d["query1"] 414 | label_text = d["query2"] 415 | o1 = d["label"] 416 | 417 | if random() > self.augm_frac: 418 | data_d = self.data_dict[text] 419 | pos_data = data_d["pos"] 420 | neg_data = data_d["neg"] 421 | if pos_data and neg_data: 422 | if random() > 0.5: 423 | o1 = 1 424 | label_text = choice(pos_data) 425 | if len(pos_data) >= 2: 426 | _pos_data = [e for e in pos_data if e != label_text] 427 | text = choice(_pos_data) 428 | else: 429 | o1 = 0 430 | text = choice(pos_data) 431 | label_text = choice(neg_data) 432 | 433 | if random() > 0.5: 434 | text, label_text = label_text, text 435 | 436 | if o1 == "": 437 | continue 438 | o1 = float(o1) 439 | assert 0 <= o1 <= 1 440 | 441 | O1.append(o1) 442 | t1, t2 = token2id_X(text, x2=label_text, x_dict=word_index, maxlen=MAX_LEN, maxlen1=MAX_DOC_LEN) 443 | assert len(t1) == len(t2) 444 | 445 | T1.append(t1) 446 | T2.append(t2) 447 | 448 | if len(T1) == self.batch_size or i == idxs[-1]: 449 | O1 = np.array(O1).reshape(-1, 1) 450 | T1 = seq_padding(T1, padding_value=cfg["x_pad"]) 451 | T2 = seq_padding(T2, padding_value=0) 452 | assert T1.shape == T2.shape and T1.shape[0] == O1.shape[0] 453 | 454 | yield [T1, T2, O1], None 455 | T1, T2, O1, = [], [], [] 456 | 457 | 458 | 459 | # In[6]: 460 | 461 | 462 | def get_model(model_): 463 | model_inp_ind = [0, 1] 464 | inputs = [model_.inputs[e] for e in model_inp_ind] 465 | sub_model = Model(inputs=inputs, outputs=[model_.get_layer("po1").output]) 466 | return sub_model 467 | 468 | 469 | def find_best_acc_score(y_pred, y_true, use_plt=True, bins=1000): 470 | thres = [i / bins for i in range(1, bins)] 471 | scores = [accuracy_score(y_true, np.array(y_pred > thre, "int32")) for thre in thres] 472 | # if use_plt: 473 | # import matplotlib 474 | # import matplotlib.pyplot as plt 475 | # %matplotlib inline 476 | # plt.plot(scores) 477 | # plt.show() 478 | ind = np.argmax(scores) 479 | max_score = np.max(scores) 480 | assert abs(scores[ind] - max_score) < 1e-15 481 | return max_score, thres[ind] 482 | 483 | 484 | def evaluate(sub_model, data, bs=32): 485 | idxs = list(range(len(data))) 486 | T1, T2, O1, O2 = [], [], [], [] 487 | preds = [] 488 | for i in idxs: 489 | d = data.iloc[i] 490 | text = d["query1"] 491 | label_text = d["query2"] 492 | 493 | t1, t2 = token2id_X(text, x2=label_text, x_dict=word_index, maxlen=MAX_LEN, maxlen1=MAX_DOC_LEN) 494 | assert len(t1) == len(t2) 495 | 496 | T1.append(t1) 497 | T2.append(t2) 498 | 499 | o1 = float(d["label"]) 500 | O1.append(o1) 501 | if len(T1) == bs or i == idxs[-1]: 502 | T1 = seq_padding(T1, padding_value=cfg["x_pad"]) 503 | T2 = seq_padding(T2, padding_value=0) 504 | assert T1.shape == T2.shape 505 | pred = sub_model.predict([T1, T2]) 506 | preds.append(pred) 507 | T1, T2 = [], [] 508 | 509 | preds = np.concatenate(preds, axis=0).reshape(-1) 510 | O1 = np.array(O1).reshape(-1) 511 | O1 = O1.astype("int32") 512 | auc = roc_auc_score(O1, preds) 513 | best_res = find_best_acc_score(preds, O1) 514 | print("[!] best accurary&threshold = {}".format(best_res)) 515 | print("[!] best threshold classification_report") 516 | print(classification_report(O1, np.array(preds > best_res[1], "int32"), digits=6)) 517 | print("-" * 80) 518 | print("[!] np.mean(preds) = {}".format(np.mean(preds))) 519 | print("[!] classification_report") 520 | print(classification_report(O1, np.array(preds > 0.5, "int32"), digits=6)) 521 | acc = accuracy_score(O1, np.array(preds > 0.5, "int32")) 522 | return auc, acc 523 | 524 | 525 | class Evaluate(Callback): 526 | def __init__(self, data, filename=None): 527 | self.F1 = [] 528 | self.best = 0. 529 | self.filename = filename 530 | self.data = data 531 | 532 | def on_epoch_begin(self, epoch, logs=None): 533 | if epoch == 0: 534 | print("[!] test load&save model") 535 | f = self.filename + ".h5" 536 | f = os.path.join(SAVE_DIR, f) 537 | self.model.save(f, include_optimizer=False, overwrite=False) 538 | if "albert" in cfg["verbose"]: 539 | model_ = load_model(f) 540 | else: 541 | model_ = load_model(f, custom_objects=get_custom_objects()) 542 | 543 | def on_epoch_end(self, epoch, logs=None): 544 | if epoch + 1 < 1: 545 | return 546 | # if epoch + 1 in [3, 6, 9, 10, 12, 15, 18, 20]: 547 | # f = self.filename + "_{}.h5".format(epoch + 1) 548 | # f = os.path.join(SAVE_DIR, f) 549 | # self.model.save(f, include_optimizer=False) 550 | 551 | sub_model = get_model(self.model) 552 | f1, class_f1 = evaluate(sub_model, data=self.data) 553 | self.F1.append(f1) 554 | if f1 > self.best: 555 | f = self.filename + ".h5" 556 | f = os.path.join(SAVE_DIR, f) 557 | self.model.save(f, include_optimizer=False) 558 | 559 | if f1 > self.best: 560 | self.best = f1 561 | print("[!] epoch = {}, new best_auc = {}".format(epoch + 1, f1)) 562 | print('[!] epoch = {}, auc = {}, best auc {}'.format(epoch + 1, f1, self.best)) 563 | print('[!] epoch = {}, acc = {}\n'.format(epoch + 1, class_f1)) 564 | 565 | 566 | # In[7]: 567 | 568 | 569 | def search_layer(inputs, name, exclude_from=None): 570 | """根据inputs和name来搜索层 571 | 说明:inputs为某个层或某个层的输出;name为目标层的名字。 572 | 实现:根据inputs一直往上递归搜索,直到发现名字为name的层为止; 573 | 如果找不到,那就返回None。 574 | """ 575 | if exclude_from is None: 576 | exclude_from = set() 577 | 578 | if isinstance(inputs, keras.layers.Layer): 579 | layer = inputs 580 | else: 581 | layer = inputs._keras_history[0] 582 | 583 | if layer.name == name: 584 | return layer 585 | elif layer in exclude_from: 586 | return None 587 | else: 588 | exclude_from.add(layer) 589 | if isinstance(layer, keras.models.Model): 590 | model = layer 591 | for layer in model.layers: 592 | if layer.name == name: 593 | return layer 594 | inbound_layers = layer._inbound_nodes[0].inbound_layers 595 | if not isinstance(inbound_layers, list): 596 | inbound_layers = [inbound_layers] 597 | if len(inbound_layers) > 0: 598 | for layer in inbound_layers: 599 | layer = search_layer(layer, name, exclude_from) 600 | if layer is not None: 601 | return layer 602 | 603 | def adversarial_training(model, embedding_names, epsilon=1): 604 | """给模型添加对抗训练 605 | 其中model是需要添加对抗训练的keras模型,embedding_names 606 | 则是model里边Embedding层的名字。要在模型compile之后使用。 607 | """ 608 | if model.train_function is None: # 如果还没有训练函数 609 | model._make_train_function() # 手动make 610 | old_train_function = model.train_function # 备份旧的训练函数 611 | 612 | # 查找Embedding层 613 | embedding_layers = [] 614 | for embedding_name in embedding_names: 615 | for output in model.outputs: 616 | embedding_layer = search_layer(output, embedding_name) 617 | if embedding_layer is not None: 618 | embedding_layers.append(embedding_layer) 619 | break 620 | for embedding_layer in embedding_layers: 621 | if embedding_layer is None: 622 | raise Exception('Embedding layer not found') 623 | 624 | # 求Embedding梯度 625 | embeddings = [embedding_layer.embeddings for embedding_layer in embedding_layers] # Embedding矩阵 626 | gradients = K.gradients(model.total_loss, embeddings) # Embedding梯度 627 | # gradients = K.zeros_like(embeddings) + gradients[0] # 转为dense tensor 628 | gradients = [K.zeros_like(embedding) + gradient for embedding, gradient in zip(embeddings, gradients)] 629 | 630 | # 封装为函数 631 | inputs = (model._feed_inputs + 632 | model._feed_targets + 633 | model._feed_sample_weights) # 所有输入层 634 | embedding_gradients = K.function( 635 | inputs=inputs, 636 | outputs=gradients, 637 | name='embedding_gradients', 638 | ) # 封装为函数 639 | 640 | def train_function(inputs): # 重新定义训练函数 641 | # grads = embedding_gradients(inputs)[0] # Embedding梯度 642 | # delta = epsilon * grads / (np.sqrt((grads**2).sum()) + 1e-8) # 计算扰动 643 | grads = embedding_gradients(inputs) # Embedding梯度 644 | deltas = [epsilon * grad / (np.sqrt((grad**2).sum()) + 1e-8) for grad in grads] # 计算扰动 645 | # 注入扰动 646 | # K.set_value(embeddings, K.eval(embeddings) + delta) 647 | for embedding, delta in zip(embeddings, deltas): 648 | K.set_value(embedding, K.eval(embedding) + delta) 649 | 650 | outputs = old_train_function(inputs) # 梯度下降 651 | # 删除扰动 652 | # K.set_value(embeddings, K.eval(embeddings) - delta) # 删除扰动 653 | for embedding, delta in zip(embeddings, deltas): 654 | K.set_value(embedding, K.eval(embedding) - delta) 655 | return outputs 656 | 657 | model.train_function = train_function # 覆盖原训练函数 658 | 659 | 660 | adv_layer_names = ['Embedding-Token', 'char_embed'] 661 | 662 | if -1 in FOLD_ID: 663 | fold_id = -1 664 | cfg["num_example"] = len(train_data) 665 | print("-" * 81) 666 | print("[!] start fold_id =", fold_id, train_data.shape, dev_data.shape) 667 | print(cfg) 668 | K.clear_session() 669 | gc.collect() 670 | train_D = data_generator(train_data) 671 | seed(SEED + fold_id) 672 | np.random.seed(SEED + fold_id) 673 | tf.random.set_random_seed(SEED + fold_id) 674 | model = build_model(cfg, summary=True, word_embedding_matrix=word_embedding_matrix if cfg["use_embed"] else None) 675 | if cfg["adv_training"]: 676 | print("[!] using adv_training") 677 | adversarial_training(model, adv_layer_names, 0.5) 678 | evaluator = Evaluate(filename=cfg["filename"] + "_fold{}".format(fold_id), data=dev_data) 679 | model.fit_generator(train_D.__iter__(), 680 | steps_per_epoch=len(train_D), 681 | epochs=RUN_EPOCH, 682 | callbacks=[evaluator], 683 | shuffle=True 684 | ) 685 | del model, train_data, dev_data 686 | gc.collect() 687 | print("[!] finish fold_id =", fold_id) 688 | print("-" * 81) 689 | 690 | 691 | skf = SKF(FOLD_NUM, shuffle=False, random_state=SEED) 692 | 693 | print(all_data.shape) 694 | _t0 = time() 695 | for fold_id, (trn_ind, val_ind) in enumerate(skf.split(range(len(all_data)), all_data["label"])): 696 | if fold_id not in FOLD_ID: 697 | continue 698 | t0 = time() 699 | dev_data = all_data.iloc[val_ind].reset_index(drop=True) 700 | train_data = all_data.iloc[trn_ind].reset_index(drop=True) 701 | cfg["num_example"] = len(train_data) 702 | print("-" * 81) 703 | print("[!] start fold_id =", fold_id, train_data.shape, dev_data.shape) 704 | print(cfg) 705 | K.clear_session() 706 | gc.collect() 707 | train_D = data_generator(train_data) 708 | seed(SEED + fold_id) 709 | np.random.seed(SEED + fold_id) 710 | tf.random.set_random_seed(SEED + fold_id) 711 | model = build_model(cfg, summary=True, word_embedding_matrix=word_embedding_matrix if cfg["use_embed"] else None) 712 | if cfg["adv_training"]: 713 | print("[!] using adv_training") 714 | adversarial_training(model, adv_layer_names, 0.5) 715 | evaluator = Evaluate(filename=cfg["filename"] + "_fold{}".format(fold_id), data=dev_data) 716 | model.fit_generator(train_D.__iter__(), 717 | steps_per_epoch=len(train_D), 718 | epochs=RUN_EPOCH, 719 | callbacks=[evaluator], 720 | shuffle=True 721 | ) 722 | print(evaluator.F1, max(evaluator.F1)) 723 | print("[{}] finish fold_id =".format(time() - t0), fold_id) 724 | print("-" * 81) 725 | del model, train_data, dev_data, evaluator 726 | gc.collect() 727 | print("[{}] finish =".format(time() - _t0)) 728 | 729 | 730 | # In[9]: 731 | 732 | 733 | sub_model = get_model(model) 734 | evaluate(sub_model=sub_model, data=dev_data) 735 | 736 | 737 | 738 | 739 | -------------------------------------------------------------------------------- /code/sequence_labeling.py: -------------------------------------------------------------------------------- 1 | """Metrics to assess performance on sequence labeling task given prediction 2 | Functions named as ``*_score`` return a scalar value to maximize: the higher 3 | the better 4 | see `https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py` 5 | """ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | from collections import defaultdict 12 | 13 | import numpy as np 14 | 15 | 16 | def get_entities(seq, suffix=False): 17 | """Gets entities from sequence. 18 | 19 | Args: 20 | seq (list): sequence of labels. 21 | 22 | Returns: 23 | list: list of (chunk_type, chunk_start, chunk_end). 24 | 25 | Example: 26 | >>> from seqeval.metrics.sequence_labeling import get_entities 27 | >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC'] 28 | >>> get_entities(seq) 29 | [('PER', 0, 1), ('LOC', 3, 3)] 30 | """ 31 | # for nested list 32 | if any(isinstance(s, list) for s in seq): 33 | seq = [item for sublist in seq for item in sublist + ['O']] 34 | 35 | prev_tag = 'O' 36 | prev_type = '' 37 | begin_offset = 0 38 | chunks = [] 39 | for i, chunk in enumerate(seq + ['O']): 40 | if suffix: 41 | tag = chunk[-1] 42 | type_ = chunk.split('-')[0] 43 | else: 44 | tag = chunk[0] 45 | type_ = chunk.split('-')[-1] 46 | 47 | if end_of_chunk(prev_tag, tag, prev_type, type_): 48 | chunks.append((prev_type, begin_offset, i-1)) 49 | if start_of_chunk(prev_tag, tag, prev_type, type_): 50 | begin_offset = i 51 | prev_tag = tag 52 | prev_type = type_ 53 | 54 | return chunks 55 | 56 | 57 | def end_of_chunk(prev_tag, tag, prev_type, type_): 58 | """Checks if a chunk ended between the previous and current word. 59 | 60 | Args: 61 | prev_tag: previous chunk tag. 62 | tag: current chunk tag. 63 | prev_type: previous type. 64 | type_: current type. 65 | 66 | Returns: 67 | chunk_end: boolean. 68 | """ 69 | chunk_end = False 70 | 71 | if prev_tag == 'E': chunk_end = True 72 | if prev_tag == 'S': chunk_end = True 73 | 74 | if prev_tag == 'B' and tag == 'B': chunk_end = True 75 | if prev_tag == 'B' and tag == 'S': chunk_end = True 76 | if prev_tag == 'B' and tag == 'O': chunk_end = True 77 | if prev_tag == 'I' and tag == 'B': chunk_end = True 78 | if prev_tag == 'I' and tag == 'S': chunk_end = True 79 | if prev_tag == 'I' and tag == 'O': chunk_end = True 80 | 81 | if prev_tag != 'O' and prev_tag != '.' and prev_type != type_: 82 | chunk_end = True 83 | 84 | return chunk_end 85 | 86 | 87 | def start_of_chunk(prev_tag, tag, prev_type, type_): 88 | """Checks if a chunk started between the previous and current word. 89 | 90 | Args: 91 | prev_tag: previous chunk tag. 92 | tag: current chunk tag. 93 | prev_type: previous type. 94 | type_: current type. 95 | 96 | Returns: 97 | chunk_start: boolean. 98 | """ 99 | chunk_start = False 100 | 101 | if tag == 'B': chunk_start = True 102 | if tag == 'S': chunk_start = True 103 | 104 | if prev_tag == 'E' and tag == 'E': chunk_start = True 105 | if prev_tag == 'E' and tag == 'I': chunk_start = True 106 | if prev_tag == 'S' and tag == 'E': chunk_start = True 107 | if prev_tag == 'S' and tag == 'I': chunk_start = True 108 | if prev_tag == 'O' and tag == 'E': chunk_start = True 109 | if prev_tag == 'O' and tag == 'I': chunk_start = True 110 | 111 | if tag != 'O' and tag != '.' and prev_type != type_: 112 | chunk_start = True 113 | 114 | return chunk_start 115 | 116 | 117 | def f1_score(y_true, y_pred, average='micro', suffix=False): 118 | """Compute the F1 score. 119 | 120 | The F1 score can be interpreted as a weighted average of the precision and 121 | recall, where an F1 score reaches its best value at 1 and worst score at 0. 122 | The relative contribution of precision and recall to the F1 score are 123 | equal. The formula for the F1 score is:: 124 | 125 | F1 = 2 * (precision * recall) / (precision + recall) 126 | 127 | Args: 128 | y_true : 2d array. Ground truth (correct) target values. 129 | y_pred : 2d array. Estimated targets as returned by a tagger. 130 | 131 | Returns: 132 | score : float. 133 | 134 | Example: 135 | >>> from seqeval.metrics import f1_score 136 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 137 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 138 | >>> f1_score(y_true, y_pred) 139 | 0.50 140 | """ 141 | true_entities = set(get_entities(y_true, suffix)) 142 | pred_entities = set(get_entities(y_pred, suffix)) 143 | 144 | nb_correct = len(true_entities & pred_entities) 145 | nb_pred = len(pred_entities) 146 | nb_true = len(true_entities) 147 | 148 | p = nb_correct / nb_pred if nb_pred > 0 else 0 149 | r = nb_correct / nb_true if nb_true > 0 else 0 150 | score = 2 * p * r / (p + r) if p + r > 0 else 0 151 | 152 | return score 153 | 154 | 155 | def accuracy_score(y_true, y_pred): 156 | """Accuracy classification score. 157 | 158 | In multilabel classification, this function computes subset accuracy: 159 | the set of labels predicted for a sample must *exactly* match the 160 | corresponding set of labels in y_true. 161 | 162 | Args: 163 | y_true : 2d array. Ground truth (correct) target values. 164 | y_pred : 2d array. Estimated targets as returned by a tagger. 165 | 166 | Returns: 167 | score : float. 168 | 169 | Example: 170 | >>> from seqeval.metrics import accuracy_score 171 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 172 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 173 | >>> accuracy_score(y_true, y_pred) 174 | 0.80 175 | """ 176 | if any(isinstance(s, list) for s in y_true): 177 | y_true = [item for sublist in y_true for item in sublist] 178 | y_pred = [item for sublist in y_pred for item in sublist] 179 | 180 | nb_correct = sum(y_t==y_p for y_t, y_p in zip(y_true, y_pred)) 181 | nb_true = len(y_true) 182 | 183 | score = nb_correct / nb_true 184 | 185 | return score 186 | 187 | 188 | def precision_score(y_true, y_pred, average='micro', suffix=False): 189 | """Compute the precision. 190 | 191 | The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of 192 | true positives and ``fp`` the number of false positives. The precision is 193 | intuitively the ability of the classifier not to label as positive a sample. 194 | 195 | The best value is 1 and the worst value is 0. 196 | 197 | Args: 198 | y_true : 2d array. Ground truth (correct) target values. 199 | y_pred : 2d array. Estimated targets as returned by a tagger. 200 | 201 | Returns: 202 | score : float. 203 | 204 | Example: 205 | >>> from seqeval.metrics import precision_score 206 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 207 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 208 | >>> precision_score(y_true, y_pred) 209 | 0.50 210 | """ 211 | true_entities = set(get_entities(y_true, suffix)) 212 | pred_entities = set(get_entities(y_pred, suffix)) 213 | 214 | nb_correct = len(true_entities & pred_entities) 215 | nb_pred = len(pred_entities) 216 | 217 | score = nb_correct / nb_pred if nb_pred > 0 else 0 218 | 219 | return score 220 | 221 | 222 | def recall_score(y_true, y_pred, average='micro', suffix=False): 223 | """Compute the recall. 224 | 225 | The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of 226 | true positives and ``fn`` the number of false negatives. The recall is 227 | intuitively the ability of the classifier to find all the positive samples. 228 | 229 | The best value is 1 and the worst value is 0. 230 | 231 | Args: 232 | y_true : 2d array. Ground truth (correct) target values. 233 | y_pred : 2d array. Estimated targets as returned by a tagger. 234 | 235 | Returns: 236 | score : float. 237 | 238 | Example: 239 | >>> from seqeval.metrics import recall_score 240 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 241 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 242 | >>> recall_score(y_true, y_pred) 243 | 0.50 244 | """ 245 | true_entities = set(get_entities(y_true, suffix)) 246 | pred_entities = set(get_entities(y_pred, suffix)) 247 | 248 | nb_correct = len(true_entities & pred_entities) 249 | nb_true = len(true_entities) 250 | 251 | score = nb_correct / nb_true if nb_true > 0 else 0 252 | 253 | return score 254 | 255 | 256 | def performance_measure(y_true, y_pred): 257 | """ 258 | Compute the performance metrics: TP, FP, FN, TN 259 | 260 | Args: 261 | y_true : 2d array. Ground truth (correct) target values. 262 | y_pred : 2d array. Estimated targets as returned by a tagger. 263 | 264 | Returns: 265 | performance_dict : dict 266 | 267 | Example: 268 | >>> from seqeval.metrics import performance_measure 269 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'O', 'B-ORG'], ['B-PER', 'I-PER', 'O']] 270 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'O'], ['B-PER', 'I-PER', 'O']] 271 | >>> performance_measure(y_true, y_pred) 272 | (3, 3, 1, 4) 273 | """ 274 | performace_dict = dict() 275 | if any(isinstance(s, list) for s in y_true): 276 | y_true = [item for sublist in y_true for item in sublist] 277 | y_pred = [item for sublist in y_pred for item in sublist] 278 | performace_dict['TP'] = sum(y_t == y_p for y_t, y_p in zip(y_true, y_pred) 279 | if ((y_t != 'O') or (y_p != 'O'))) 280 | performace_dict['FP'] = sum(y_t != y_p for y_t, y_p in zip(y_true, y_pred)) 281 | performace_dict['FN'] = sum(((y_t != 'O') and (y_p == 'O')) 282 | for y_t, y_p in zip(y_true, y_pred)) 283 | performace_dict['TN'] = sum((y_t == y_p == 'O') 284 | for y_t, y_p in zip(y_true, y_pred)) 285 | 286 | return performace_dict 287 | 288 | 289 | def classification_report(y_true, y_pred, digits=2, suffix=False): 290 | """Build a text report showing the main classification metrics. 291 | 292 | Args: 293 | y_true : 2d array. Ground truth (correct) target values. 294 | y_pred : 2d array. Estimated targets as returned by a classifier. 295 | digits : int. Number of digits for formatting output floating point values. 296 | 297 | Returns: 298 | report : string. Text summary of the precision, recall, F1 score for each class. 299 | 300 | Examples: 301 | >>> from seqeval.metrics import classification_report 302 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 303 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 304 | >>> print(classification_report(y_true, y_pred)) 305 | precision recall f1-score support 306 | 307 | MISC 0.00 0.00 0.00 1 308 | PER 1.00 1.00 1.00 1 309 | 310 | micro avg 0.50 0.50 0.50 2 311 | macro avg 0.50 0.50 0.50 2 312 | 313 | """ 314 | true_entities = set(get_entities(y_true, suffix)) 315 | pred_entities = set(get_entities(y_pred, suffix)) 316 | 317 | name_width = 0 318 | d1 = defaultdict(set) 319 | d2 = defaultdict(set) 320 | for e in true_entities: 321 | d1[e[0]].add((e[1], e[2])) 322 | name_width = max(name_width, len(e[0])) 323 | for e in pred_entities: 324 | d2[e[0]].add((e[1], e[2])) 325 | 326 | last_line_heading = 'macro avg' 327 | width = max(name_width, len(last_line_heading), digits) 328 | 329 | headers = ["precision", "recall", "f1-score", "support"] 330 | head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers) 331 | report = head_fmt.format(u'', *headers, width=width) 332 | report += u'\n\n' 333 | 334 | row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n' 335 | 336 | ps, rs, f1s, s = [], [], [], [] 337 | for type_name, true_entities in d1.items(): 338 | pred_entities = d2[type_name] 339 | nb_correct = len(true_entities & pred_entities) 340 | nb_pred = len(pred_entities) 341 | nb_true = len(true_entities) 342 | 343 | p = nb_correct / nb_pred if nb_pred > 0 else 0 344 | r = nb_correct / nb_true if nb_true > 0 else 0 345 | f1 = 2 * p * r / (p + r) if p + r > 0 else 0 346 | 347 | report += row_fmt.format(*[type_name, p, r, f1, nb_true], width=width, digits=digits) 348 | 349 | ps.append(p) 350 | rs.append(r) 351 | f1s.append(f1) 352 | s.append(nb_true) 353 | 354 | report += u'\n' 355 | 356 | # compute averages 357 | report += row_fmt.format('micro avg', 358 | precision_score(y_true, y_pred, suffix=suffix), 359 | recall_score(y_true, y_pred, suffix=suffix), 360 | f1_score(y_true, y_pred, suffix=suffix), 361 | np.sum(s), 362 | width=width, digits=digits) 363 | report += row_fmt.format(last_line_heading, 364 | np.average(ps, weights=s), 365 | np.average(rs, weights=s), 366 | np.average(f1s, weights=s), 367 | np.sum(s), 368 | width=width, digits=digits) 369 | 370 | return report -------------------------------------------------------------------------------- /code/special_tokens.py: -------------------------------------------------------------------------------- 1 | CHINESE_MAP = {'\t': '[unused2]', 2 | '\n': '[unused3]', 3 | ' ': '[unused1]', 4 | 'A': '[unused8]', 5 | 'B': '[unused9]', 6 | 'C': '[unused10]', 7 | 'D': '[unused11]', 8 | 'E': '[unused12]', 9 | 'F': '[unused13]', 10 | 'G': '[unused14]', 11 | 'H': '[unused15]', 12 | 'I': '[unused16]', 13 | 'J': '[unused17]', 14 | 'K': '[unused18]', 15 | 'L': '[unused19]', 16 | 'M': '[unused20]', 17 | 'N': '[unused21]', 18 | 'O': '[unused22]', 19 | 'P': '[unused23]', 20 | 'Q': '[unused24]', 21 | 'R': '[unused25]', 22 | 'S': '[unused26]', 23 | 'T': '[unused27]', 24 | 'U': '[unused28]', 25 | 'V': '[unused29]', 26 | 'W': '[unused30]', 27 | 'X': '[unused31]', 28 | 'Y': '[unused32]', 29 | 'Z': '[unused33]', 30 | '‘': '[unused6]', 31 | '’': '[unused7]', 32 | '“': '[unused4]', 33 | '”': '[unused5]', 34 | 'Ⅰ': '[unused34]', 35 | 'Ⅱ': '[unused35]', 36 | 'Ⅲ': '[unused36]', 37 | 'Ⅳ': '[unused37]', 38 | 'Ⅴ': '[unused38]', 39 | 'Ⅵ': '[unused39]', 40 | 'Ⅶ': '[unused40]', 41 | 'Ⅷ': '[unused41]', 42 | 'Ⅸ': '[unused42]', 43 | 'Ⅹ': '[unused43]', 44 | 'A': '[unused44]', 45 | 'B': '[unused45]', 46 | 'C': '[unused46]', 47 | 'D': '[unused47]', 48 | 'E': '[unused48]', 49 | 'F': '[unused49]', 50 | 'G': '[unused50]', 51 | 'H': '[unused51]', 52 | 'I': '[unused52]', 53 | 'J': '[unused53]', 54 | 'K': '[unused54]', 55 | 'L': '[unused55]', 56 | 'M': '[unused56]', 57 | 'N': '[unused57]', 58 | 'O': '[unused58]', 59 | 'P': '[unused59]', 60 | 'Q': '[unused60]', 61 | 'R': '[unused61]', 62 | 'S': '[unused62]', 63 | 'T': '[unused63]', 64 | 'U': '[unused64]', 65 | 'V': '[unused65]', 66 | 'W': '[unused66]', 67 | 'X': '[unused67]', 68 | 'Y': '[unused68]', 69 | 'Z': '[unused69]'} -------------------------------------------------------------------------------- /code/word_embedding_matrix.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lsq357/rank6/3285b46e113690b61b3b705e5aac93447eba1de2/code/word_embedding_matrix.npy -------------------------------------------------------------------------------- /code/word_embedding_matrix_v2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lsq357/rank6/3285b46e113690b61b3b705e5aac93447eba1de2/code/word_embedding_matrix_v2.npy -------------------------------------------------------------------------------- /data/External/UER-large/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "bert_model.ckpt" 2 | all_model_checkpoint_paths: "bert_model.ckpt" 3 | -------------------------------------------------------------------------------- /requirments.txt: -------------------------------------------------------------------------------- 1 | Package Version 2 | ---------------------------------- ----------- 3 | absl-py 0.7.1 4 | alabaster 0.7.12 5 | anaconda-client 1.7.2 6 | anaconda-navigator 1.9.7 7 | anaconda-project 0.8.3 8 | apex 0.1 9 | appdirs 1.4.3 10 | asn1crypto 0.24.0 11 | astor 0.7.1 12 | astroid 2.2.5 13 | astropy 3.2.1 14 | atomicwrites 1.3.0 15 | attrs 19.1.0 16 | Babel 2.7.0 17 | backcall 0.1.0 18 | backports.functools-lru-cache 1.5 19 | backports.os 0.1.1 20 | backports.shutil-get-terminal-size 1.0.0 21 | backports.tempfile 1.0 22 | backports.weakref 1.0.post1 23 | beautifulsoup4 4.7.1 24 | bert4keras 0.4.8 25 | bitarray 0.9.3 26 | bkcharts 0.2 27 | bleach 3.1.0 28 | bokeh 1.2.0 29 | boto 2.49.0 30 | boto3 1.9.162 31 | botocore 1.12.194 32 | Bottleneck 1.2.1 33 | bson 0.5.8 34 | bz2file 0.98 35 | certifi 2019.6.16 36 | cffi 1.12.3 37 | chardet 3.0.4 38 | Click 7.0 39 | cloudpickle 1.2.1 40 | clyent 1.2.2 41 | cn2an 0.3.7 42 | colorama 0.4.1 43 | compress-pickle 1.0.1 44 | conda 4.7.12 45 | conda-build 3.18.8 46 | conda-package-handling 1.3.11 47 | conda-verify 3.4.2 48 | contextlib2 0.5.5 49 | cryptography 2.7 50 | cycler 0.10.0 51 | Cython 0.29.12 52 | cytoolz 0.10.0 53 | dask 2.1.0 54 | decorator 4.4.0 55 | defusedxml 0.6.0 56 | descartes 1.1.0 57 | dill 0.3.1.1 58 | distributed 2.1.0 59 | distro 1.4.0 60 | docutils 0.14 61 | edit-distance 1.0.3 62 | entrypoints 0.3 63 | et-xmlfile 1.0.1 64 | fastcache 1.1.0 65 | filelock 3.0.12 66 | Flask 1.1.1 67 | foolnltk 0.1.6 68 | future 0.17.1 69 | fuzzywuzzy 0.17.0 70 | gast 0.2.2 71 | gensim 3.8.1 72 | gevent 1.4.0 73 | glob2 0.7 74 | gmpy2 2.0.8 75 | googledrivedownloader 0.4 76 | greenlet 0.4.15 77 | grpcio 1.16.1 78 | h5py 2.9.0 79 | heapdict 1.0.0 80 | html5lib 1.0.1 81 | hyperopt 0.2.1 82 | idna 2.8 83 | imageio 2.5.0 84 | imagesize 1.1.0 85 | imgaug 0.3.0 86 | importlib-metadata 0.17 87 | ipykernel 5.1.1 88 | ipython 7.6.1 89 | ipython-genutils 0.2.0 90 | ipywidgets 7.5.0 91 | isodate 0.6.0 92 | isort 4.3.21 93 | itsdangerous 1.1.0 94 | jdcal 1.4.1 95 | jedi 0.13.3 96 | jeepney 0.4 97 | jieba 0.39 98 | Jinja2 2.10.1 99 | jmespath 0.9.4 100 | joblib 0.13.2 101 | JPype1 0.7.1 102 | json5 0.8.4 103 | jsonschema 3.0.1 104 | jupyter 1.0.0 105 | jupyter-client 5.3.1 106 | jupyter-console 6.0.0 107 | jupyter-core 4.5.0 108 | jupyterlab 1.0.2 109 | jupyterlab-server 1.0.0 110 | Keras 2.2.4 111 | keras-adaptive-softmax 0.6.0 112 | Keras-Applications 1.0.8 113 | keras-bert 0.70.2 114 | keras-embed-sim 0.7.0 115 | keras-layer-normalization 0.12.0 116 | keras-multi-head 0.22.0 117 | keras-pos-embd 0.11.0 118 | keras-position-wise-feed-forward 0.6.0 119 | Keras-Preprocessing 1.1.0 120 | keras-self-attention 0.41.0 121 | keras-trans-mask 0.3.0 122 | keras-transformer 0.30.0 123 | keras-transformer-xl 0.11.0 124 | keras-xlnet 0.18.0 125 | keyring 18.0.0 126 | kiwisolver 1.1.0 127 | lazy-object-proxy 1.4.1 128 | libarchive-c 2.8 129 | lief 0.9.0 130 | lightgbm 2.2.1 131 | llvmlite 0.29.0 132 | locket 0.2.0 133 | lxml 4.3.4 134 | Markdown 3.1.1 135 | MarkupSafe 1.1.1 136 | matplotlib 3.2.1 137 | mccabe 0.6.1 138 | mistune 0.8.4 139 | mizani 0.6.0 140 | mkl-fft 1.0.12 141 | mkl-random 1.0.2 142 | mkl-service 2.0.2 143 | mock 3.0.5 144 | more-itertools 7.0.0 145 | mpmath 1.1.0 146 | msgpack 0.6.1 147 | multipledispatch 0.6.0 148 | munch 2.3.2 149 | navigator-updater 0.2.1 150 | nbconvert 5.5.0 151 | nbformat 4.4.0 152 | networkx 2.2 153 | nltk 3.4.4 154 | nose 1.3.7 155 | notebook 6.0.0 156 | numba 0.44.1 157 | numexpr 2.6.9 158 | numpy 1.16.4 159 | numpydoc 0.9.1 160 | olefile 0.46 161 | opencc-python-reimplemented 0.1.5 162 | opencv-python 4.1.0.25 163 | opencv-python-headless 4.1.1.26 164 | openpyxl 2.6.2 165 | packaging 19.0 166 | palettable 3.3.0 167 | pandarallel 1.4.1 168 | pandas 0.24.2 169 | pandocfilters 1.4.2 170 | parso 0.5.0 171 | partd 1.0.0 172 | path.py 12.0.1 173 | pathlib2 2.3.4 174 | patsy 0.5.1 175 | pdfminer.six 20200104 176 | pdfminer3k 1.3.1 177 | pdfplumber 0.5.16 178 | pep8 1.7.1 179 | pexpect 4.7.0 180 | pickleshare 0.7.5 181 | Pillow 7.0.0 182 | pip 19.1.1 183 | pkginfo 1.5.0.1 184 | pkuseg 0.0.22 185 | plotnine 0.6.0 186 | pluggy 0.12.0 187 | ply 3.11 188 | plyfile 0.7.1 189 | portalocker 1.5.0 190 | pretrainedmodels 0.7.4 191 | prometheus-client 0.7.1 192 | prompt-toolkit 2.0.9 193 | protobuf 3.8.0 194 | psutil 5.6.3 195 | ptyprocess 0.6.0 196 | py 1.8.0 197 | pycodestyle 2.5.0 198 | pycosat 0.6.3 199 | pycparser 2.19 200 | pycrypto 2.6.1 201 | pycryptodome 3.9.6 202 | pycurl 7.43.0.3 203 | PyExecJS 1.5.1 204 | pyflakes 2.1.1 205 | Pygments 2.4.2 206 | pylint 2.3.1 207 | pyodbc 4.0.26 208 | pyOpenSSL 19.0.0 209 | pyparsing 2.4.0 210 | pypinyin 0.36.0 211 | pyrsistent 0.14.11 212 | PySocks 1.7.0 213 | pytest 5.0.1 214 | pytest-arraydiff 0.3 215 | pytest-astropy 0.5.0 216 | pytest-doctestplus 0.3.0 217 | pytest-openfiles 0.3.2 218 | pytest-remotedata 0.3.1 219 | python-dateutil 2.8.0 220 | python-docx 0.8.10 221 | python-Levenshtein 0.12.0 222 | python-pdfbox 0.1.7.1 223 | pytorch-transformers 1.1.0 224 | pytz 2019.1 225 | PyWavelets 1.0.3 226 | PyYAML 5.1.1 227 | pyzmq 18.0.0 228 | QtAwesome 0.5.7 229 | qtconsole 4.5.1 230 | QtPy 1.8.0 231 | rdflib 4.2.2 232 | regex 2019.11.1 233 | requests 2.22.0 234 | rope 0.14.0 235 | ruamel-yaml 0.15.46 236 | s3transfer 0.2.0 237 | sacrebleu 1.3.7 238 | sacremoses 0.0.35 239 | scikit-image 0.15.0 240 | scikit-learn 0.21.2 241 | scipy 1.3.0 242 | seaborn 0.9.0 243 | SecretStorage 3.1.1 244 | Send2Trash 1.5.0 245 | sentencepiece 0.1.82 246 | setuptools 41.0.1 247 | Shapely 1.6.4.post2 248 | simplegeneric 0.8.1 249 | singledispatch 3.4.0.3 250 | six 1.12.0 251 | smart-open 1.8.4 252 | snowballstemmer 1.9.0 253 | sortedcollections 1.1.2 254 | sortedcontainers 2.1.0 255 | soupsieve 1.8 256 | Sphinx 2.1.2 257 | sphinxcontrib-applehelp 1.0.1 258 | sphinxcontrib-devhelp 1.0.1 259 | sphinxcontrib-htmlhelp 1.0.2 260 | sphinxcontrib-jsmath 1.0.1 261 | sphinxcontrib-qthelp 1.0.2 262 | sphinxcontrib-serializinghtml 1.1.3 263 | sphinxcontrib-websupport 1.1.2 264 | spyder 3.3.6 265 | spyder-kernels 0.5.1 266 | SQLAlchemy 1.3.5 267 | statsmodels 0.10.0 268 | sympy 1.4 269 | tables 3.5.2 270 | tabula-py 2.0.4 271 | tblib 1.4.0 272 | tensorboard 1.13.1 273 | tensorboardX 1.9 274 | tensorflow 1.13.1 275 | tensorflow-estimator 1.13.0 276 | termcolor 1.1.0 277 | terminado 0.8.2 278 | testpath 0.4.2 279 | textrank4zh 0.3 280 | thulac 0.2.0 281 | tika 1.23.1 282 | toolz 0.10.0 283 | torch 1.2.0 284 | torch-geometric 1.4.2 285 | torchvision 0.4.0a0 286 | tornado 6.0.3 287 | tqdm 4.32.1 288 | traitlets 4.3.2 289 | transformers 2.2.2 290 | typing 3.7.4 291 | unicodecsv 0.14.1 292 | urllib3 1.24.2 293 | v 0.0.0 294 | Wand 0.5.8 295 | wcwidth 0.1.7 296 | webencodings 0.5.1 297 | Werkzeug 0.15.4 298 | wheel 0.33.4 299 | widgetsnbextension 3.5.0 300 | wrapt 1.11.2 301 | wurlitzer 1.0.2 302 | xlrd 1.2.0 303 | XlsxWriter 1.1.8 304 | xlwt 1.3.0 305 | xmldict 0.4.1 306 | xmltodict 0.12.0 307 | zhon 1.1.5 308 | zict 1.0.0 309 | zipp 0.5.1 310 | -------------------------------------------------------------------------------- /test/accum_optimizer.py: -------------------------------------------------------------------------------- 1 | #! -*- coding: utf-8 -*- 2 | from keras.optimizers import Optimizer 3 | import keras.backend as K 4 | 5 | 6 | class AccumOptimizer(Optimizer): 7 | """继承Optimizer类,包装原有优化器,实现梯度累积。 8 | # 参数 9 | optimizer:优化器实例,支持目前所有的keras优化器; 10 | steps_per_update:累积的步数。 11 | # 返回 12 | 一个新的keras优化器 13 | Inheriting Optimizer class, wrapping the original optimizer 14 | to achieve a new corresponding optimizer of gradient accumulation. 15 | # Arguments 16 | optimizer: an instance of keras optimizer (supporting 17 | all keras optimizers currently available); 18 | steps_per_update: the steps of gradient accumulation 19 | # Returns 20 | a new keras optimizer. 21 | """ 22 | def __init__(self, optimizer, steps_per_update=1, **kwargs): 23 | super(AccumOptimizer, self).__init__(**kwargs) 24 | self.optimizer = optimizer 25 | with K.name_scope(self.__class__.__name__): 26 | self.steps_per_update = steps_per_update 27 | self.iterations = K.variable(0, dtype='int64', name='iterations') 28 | self.cond = K.equal(self.iterations % self.steps_per_update, 0) 29 | self.lr = self.optimizer.lr 30 | self.optimizer.lr = K.switch(self.cond, self.optimizer.lr, 0.) 31 | for attr in ['momentum', 'rho', 'beta_1', 'beta_2']: 32 | if hasattr(self.optimizer, attr): 33 | value = getattr(self.optimizer, attr) 34 | setattr(self, attr, value) 35 | setattr(self.optimizer, attr, K.switch(self.cond, value, 1 - 1e-7)) 36 | for attr in self.optimizer.get_config(): 37 | if not hasattr(self, attr): 38 | value = getattr(self.optimizer, attr) 39 | setattr(self, attr, value) 40 | # 覆盖原有的获取梯度方法,指向累积梯度 41 | # Cover the original get_gradients method with accumulative gradients. 42 | def get_gradients(loss, params): 43 | return [ag / self.steps_per_update for ag in self.accum_grads] 44 | self.optimizer.get_gradients = get_gradients 45 | def get_updates(self, loss, params): 46 | self.updates = [ 47 | K.update_add(self.iterations, 1), 48 | K.update_add(self.optimizer.iterations, K.cast(self.cond, 'int64')), 49 | ] 50 | # 累积梯度 (gradient accumulation) 51 | self.accum_grads = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] 52 | grads = self.get_gradients(loss, params) 53 | for g, ag in zip(grads, self.accum_grads): 54 | self.updates.append(K.update(ag, K.switch(self.cond, ag * 0, ag + g))) 55 | # 继承optimizer的更新 (inheriting updates of original optimizer) 56 | self.updates.extend(self.optimizer.get_updates(loss, params)[1:]) 57 | self.weights.extend(self.optimizer.weights) 58 | return self.updates 59 | def get_config(self): 60 | iterations = K.eval(self.iterations) 61 | K.set_value(self.iterations, 0) 62 | config = self.optimizer.get_config() 63 | K.set_value(self.iterations, iterations) 64 | return config -------------------------------------------------------------------------------- /test/metric_utils.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import re 3 | import string 4 | 5 | 6 | def normalize_answer_v2(s): 7 | """Lower text and remove punctuation, articles and extra whitespace.""" 8 | def remove_space(text): 9 | regex = re.compile('\s+', re.UNICODE) 10 | return re.sub(regex, '', text) 11 | 12 | def remove_punc(text): 13 | punctuations = '~.!?' + '﹔·!?。。' + string.punctuation 14 | exclude = set(punctuations) 15 | return ''.join(ch for ch in text if ch not in exclude) 16 | 17 | def lower(text): 18 | return text.lower() 19 | 20 | return remove_space(s) 21 | 22 | 23 | def get_tokens(s): 24 | if not s: 25 | return [] 26 | return list(normalize_answer_v2(s)) 27 | 28 | 29 | def compute_exact(a_gold, a_pred): 30 | return int(normalize_answer_v2(a_gold) == normalize_answer_v2(a_pred)) 31 | 32 | 33 | def compute_f1(a_gold, a_pred): 34 | gold_toks = get_tokens(a_gold) 35 | pred_toks = get_tokens(a_pred) 36 | common = collections.Counter(gold_toks) & collections.Counter(pred_toks) 37 | num_same = sum(common.values()) 38 | if len(gold_toks) == 0 or len(pred_toks) == 0: 39 | # If either is no-answer, then F1 is 1 if they agree, 0 otherwise 40 | return int(gold_toks == pred_toks) 41 | if num_same == 0: 42 | return 0 43 | precision = 1.0 * num_same / len(pred_toks) 44 | recall = 1.0 * num_same / len(gold_toks) 45 | f1 = (2 * precision * recall) / (precision + recall) 46 | return f1 -------------------------------------------------------------------------------- /test/sequence_labeling.py: -------------------------------------------------------------------------------- 1 | """Metrics to assess performance on sequence labeling task given prediction 2 | Functions named as ``*_score`` return a scalar value to maximize: the higher 3 | the better 4 | see `https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py` 5 | """ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | from collections import defaultdict 12 | 13 | import numpy as np 14 | 15 | 16 | def get_entities(seq, suffix=False): 17 | """Gets entities from sequence. 18 | 19 | Args: 20 | seq (list): sequence of labels. 21 | 22 | Returns: 23 | list: list of (chunk_type, chunk_start, chunk_end). 24 | 25 | Example: 26 | >>> from seqeval.metrics.sequence_labeling import get_entities 27 | >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC'] 28 | >>> get_entities(seq) 29 | [('PER', 0, 1), ('LOC', 3, 3)] 30 | """ 31 | # for nested list 32 | if any(isinstance(s, list) for s in seq): 33 | seq = [item for sublist in seq for item in sublist + ['O']] 34 | 35 | prev_tag = 'O' 36 | prev_type = '' 37 | begin_offset = 0 38 | chunks = [] 39 | for i, chunk in enumerate(seq + ['O']): 40 | if suffix: 41 | tag = chunk[-1] 42 | type_ = chunk.split('-')[0] 43 | else: 44 | tag = chunk[0] 45 | type_ = chunk.split('-')[-1] 46 | 47 | if end_of_chunk(prev_tag, tag, prev_type, type_): 48 | chunks.append((prev_type, begin_offset, i-1)) 49 | if start_of_chunk(prev_tag, tag, prev_type, type_): 50 | begin_offset = i 51 | prev_tag = tag 52 | prev_type = type_ 53 | 54 | return chunks 55 | 56 | 57 | def end_of_chunk(prev_tag, tag, prev_type, type_): 58 | """Checks if a chunk ended between the previous and current word. 59 | 60 | Args: 61 | prev_tag: previous chunk tag. 62 | tag: current chunk tag. 63 | prev_type: previous type. 64 | type_: current type. 65 | 66 | Returns: 67 | chunk_end: boolean. 68 | """ 69 | chunk_end = False 70 | 71 | if prev_tag == 'E': chunk_end = True 72 | if prev_tag == 'S': chunk_end = True 73 | 74 | if prev_tag == 'B' and tag == 'B': chunk_end = True 75 | if prev_tag == 'B' and tag == 'S': chunk_end = True 76 | if prev_tag == 'B' and tag == 'O': chunk_end = True 77 | if prev_tag == 'I' and tag == 'B': chunk_end = True 78 | if prev_tag == 'I' and tag == 'S': chunk_end = True 79 | if prev_tag == 'I' and tag == 'O': chunk_end = True 80 | 81 | if prev_tag != 'O' and prev_tag != '.' and prev_type != type_: 82 | chunk_end = True 83 | 84 | return chunk_end 85 | 86 | 87 | def start_of_chunk(prev_tag, tag, prev_type, type_): 88 | """Checks if a chunk started between the previous and current word. 89 | 90 | Args: 91 | prev_tag: previous chunk tag. 92 | tag: current chunk tag. 93 | prev_type: previous type. 94 | type_: current type. 95 | 96 | Returns: 97 | chunk_start: boolean. 98 | """ 99 | chunk_start = False 100 | 101 | if tag == 'B': chunk_start = True 102 | if tag == 'S': chunk_start = True 103 | 104 | if prev_tag == 'E' and tag == 'E': chunk_start = True 105 | if prev_tag == 'E' and tag == 'I': chunk_start = True 106 | if prev_tag == 'S' and tag == 'E': chunk_start = True 107 | if prev_tag == 'S' and tag == 'I': chunk_start = True 108 | if prev_tag == 'O' and tag == 'E': chunk_start = True 109 | if prev_tag == 'O' and tag == 'I': chunk_start = True 110 | 111 | if tag != 'O' and tag != '.' and prev_type != type_: 112 | chunk_start = True 113 | 114 | return chunk_start 115 | 116 | 117 | def f1_score(y_true, y_pred, average='micro', suffix=False): 118 | """Compute the F1 score. 119 | 120 | The F1 score can be interpreted as a weighted average of the precision and 121 | recall, where an F1 score reaches its best value at 1 and worst score at 0. 122 | The relative contribution of precision and recall to the F1 score are 123 | equal. The formula for the F1 score is:: 124 | 125 | F1 = 2 * (precision * recall) / (precision + recall) 126 | 127 | Args: 128 | y_true : 2d array. Ground truth (correct) target values. 129 | y_pred : 2d array. Estimated targets as returned by a tagger. 130 | 131 | Returns: 132 | score : float. 133 | 134 | Example: 135 | >>> from seqeval.metrics import f1_score 136 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 137 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 138 | >>> f1_score(y_true, y_pred) 139 | 0.50 140 | """ 141 | true_entities = set(get_entities(y_true, suffix)) 142 | pred_entities = set(get_entities(y_pred, suffix)) 143 | 144 | nb_correct = len(true_entities & pred_entities) 145 | nb_pred = len(pred_entities) 146 | nb_true = len(true_entities) 147 | 148 | p = nb_correct / nb_pred if nb_pred > 0 else 0 149 | r = nb_correct / nb_true if nb_true > 0 else 0 150 | score = 2 * p * r / (p + r) if p + r > 0 else 0 151 | 152 | return score 153 | 154 | 155 | def accuracy_score(y_true, y_pred): 156 | """Accuracy classification score. 157 | 158 | In multilabel classification, this function computes subset accuracy: 159 | the set of labels predicted for a sample must *exactly* match the 160 | corresponding set of labels in y_true. 161 | 162 | Args: 163 | y_true : 2d array. Ground truth (correct) target values. 164 | y_pred : 2d array. Estimated targets as returned by a tagger. 165 | 166 | Returns: 167 | score : float. 168 | 169 | Example: 170 | >>> from seqeval.metrics import accuracy_score 171 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 172 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 173 | >>> accuracy_score(y_true, y_pred) 174 | 0.80 175 | """ 176 | if any(isinstance(s, list) for s in y_true): 177 | y_true = [item for sublist in y_true for item in sublist] 178 | y_pred = [item for sublist in y_pred for item in sublist] 179 | 180 | nb_correct = sum(y_t==y_p for y_t, y_p in zip(y_true, y_pred)) 181 | nb_true = len(y_true) 182 | 183 | score = nb_correct / nb_true 184 | 185 | return score 186 | 187 | 188 | def precision_score(y_true, y_pred, average='micro', suffix=False): 189 | """Compute the precision. 190 | 191 | The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of 192 | true positives and ``fp`` the number of false positives. The precision is 193 | intuitively the ability of the classifier not to label as positive a sample. 194 | 195 | The best value is 1 and the worst value is 0. 196 | 197 | Args: 198 | y_true : 2d array. Ground truth (correct) target values. 199 | y_pred : 2d array. Estimated targets as returned by a tagger. 200 | 201 | Returns: 202 | score : float. 203 | 204 | Example: 205 | >>> from seqeval.metrics import precision_score 206 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 207 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 208 | >>> precision_score(y_true, y_pred) 209 | 0.50 210 | """ 211 | true_entities = set(get_entities(y_true, suffix)) 212 | pred_entities = set(get_entities(y_pred, suffix)) 213 | 214 | nb_correct = len(true_entities & pred_entities) 215 | nb_pred = len(pred_entities) 216 | 217 | score = nb_correct / nb_pred if nb_pred > 0 else 0 218 | 219 | return score 220 | 221 | 222 | def recall_score(y_true, y_pred, average='micro', suffix=False): 223 | """Compute the recall. 224 | 225 | The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of 226 | true positives and ``fn`` the number of false negatives. The recall is 227 | intuitively the ability of the classifier to find all the positive samples. 228 | 229 | The best value is 1 and the worst value is 0. 230 | 231 | Args: 232 | y_true : 2d array. Ground truth (correct) target values. 233 | y_pred : 2d array. Estimated targets as returned by a tagger. 234 | 235 | Returns: 236 | score : float. 237 | 238 | Example: 239 | >>> from seqeval.metrics import recall_score 240 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 241 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 242 | >>> recall_score(y_true, y_pred) 243 | 0.50 244 | """ 245 | true_entities = set(get_entities(y_true, suffix)) 246 | pred_entities = set(get_entities(y_pred, suffix)) 247 | 248 | nb_correct = len(true_entities & pred_entities) 249 | nb_true = len(true_entities) 250 | 251 | score = nb_correct / nb_true if nb_true > 0 else 0 252 | 253 | return score 254 | 255 | 256 | def performance_measure(y_true, y_pred): 257 | """ 258 | Compute the performance metrics: TP, FP, FN, TN 259 | 260 | Args: 261 | y_true : 2d array. Ground truth (correct) target values. 262 | y_pred : 2d array. Estimated targets as returned by a tagger. 263 | 264 | Returns: 265 | performance_dict : dict 266 | 267 | Example: 268 | >>> from seqeval.metrics import performance_measure 269 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'O', 'B-ORG'], ['B-PER', 'I-PER', 'O']] 270 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'O'], ['B-PER', 'I-PER', 'O']] 271 | >>> performance_measure(y_true, y_pred) 272 | (3, 3, 1, 4) 273 | """ 274 | performace_dict = dict() 275 | if any(isinstance(s, list) for s in y_true): 276 | y_true = [item for sublist in y_true for item in sublist] 277 | y_pred = [item for sublist in y_pred for item in sublist] 278 | performace_dict['TP'] = sum(y_t == y_p for y_t, y_p in zip(y_true, y_pred) 279 | if ((y_t != 'O') or (y_p != 'O'))) 280 | performace_dict['FP'] = sum(y_t != y_p for y_t, y_p in zip(y_true, y_pred)) 281 | performace_dict['FN'] = sum(((y_t != 'O') and (y_p == 'O')) 282 | for y_t, y_p in zip(y_true, y_pred)) 283 | performace_dict['TN'] = sum((y_t == y_p == 'O') 284 | for y_t, y_p in zip(y_true, y_pred)) 285 | 286 | return performace_dict 287 | 288 | 289 | def classification_report(y_true, y_pred, digits=2, suffix=False): 290 | """Build a text report showing the main classification metrics. 291 | 292 | Args: 293 | y_true : 2d array. Ground truth (correct) target values. 294 | y_pred : 2d array. Estimated targets as returned by a classifier. 295 | digits : int. Number of digits for formatting output floating point values. 296 | 297 | Returns: 298 | report : string. Text summary of the precision, recall, F1 score for each class. 299 | 300 | Examples: 301 | >>> from seqeval.metrics import classification_report 302 | >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 303 | >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] 304 | >>> print(classification_report(y_true, y_pred)) 305 | precision recall f1-score support 306 | 307 | MISC 0.00 0.00 0.00 1 308 | PER 1.00 1.00 1.00 1 309 | 310 | micro avg 0.50 0.50 0.50 2 311 | macro avg 0.50 0.50 0.50 2 312 | 313 | """ 314 | true_entities = set(get_entities(y_true, suffix)) 315 | pred_entities = set(get_entities(y_pred, suffix)) 316 | 317 | name_width = 0 318 | d1 = defaultdict(set) 319 | d2 = defaultdict(set) 320 | for e in true_entities: 321 | d1[e[0]].add((e[1], e[2])) 322 | name_width = max(name_width, len(e[0])) 323 | for e in pred_entities: 324 | d2[e[0]].add((e[1], e[2])) 325 | 326 | last_line_heading = 'macro avg' 327 | width = max(name_width, len(last_line_heading), digits) 328 | 329 | headers = ["precision", "recall", "f1-score", "support"] 330 | head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers) 331 | report = head_fmt.format(u'', *headers, width=width) 332 | report += u'\n\n' 333 | 334 | row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n' 335 | 336 | ps, rs, f1s, s = [], [], [], [] 337 | for type_name, true_entities in d1.items(): 338 | pred_entities = d2[type_name] 339 | nb_correct = len(true_entities & pred_entities) 340 | nb_pred = len(pred_entities) 341 | nb_true = len(true_entities) 342 | 343 | p = nb_correct / nb_pred if nb_pred > 0 else 0 344 | r = nb_correct / nb_true if nb_true > 0 else 0 345 | f1 = 2 * p * r / (p + r) if p + r > 0 else 0 346 | 347 | report += row_fmt.format(*[type_name, p, r, f1, nb_true], width=width, digits=digits) 348 | 349 | ps.append(p) 350 | rs.append(r) 351 | f1s.append(f1) 352 | s.append(nb_true) 353 | 354 | report += u'\n' 355 | 356 | # compute averages 357 | report += row_fmt.format('micro avg', 358 | precision_score(y_true, y_pred, suffix=suffix), 359 | recall_score(y_true, y_pred, suffix=suffix), 360 | f1_score(y_true, y_pred, suffix=suffix), 361 | np.sum(s), 362 | width=width, digits=digits) 363 | report += row_fmt.format(last_line_heading, 364 | np.average(ps, weights=s), 365 | np.average(rs, weights=s), 366 | np.average(f1s, weights=s), 367 | np.sum(s), 368 | width=width, digits=digits) 369 | 370 | return report -------------------------------------------------------------------------------- /test/special_tokens.py: -------------------------------------------------------------------------------- 1 | CHINESE_MAP = {'\t': '[unused2]', 2 | '\n': '[unused3]', 3 | ' ': '[unused1]', 4 | 'A': '[unused8]', 5 | 'B': '[unused9]', 6 | 'C': '[unused10]', 7 | 'D': '[unused11]', 8 | 'E': '[unused12]', 9 | 'F': '[unused13]', 10 | 'G': '[unused14]', 11 | 'H': '[unused15]', 12 | 'I': '[unused16]', 13 | 'J': '[unused17]', 14 | 'K': '[unused18]', 15 | 'L': '[unused19]', 16 | 'M': '[unused20]', 17 | 'N': '[unused21]', 18 | 'O': '[unused22]', 19 | 'P': '[unused23]', 20 | 'Q': '[unused24]', 21 | 'R': '[unused25]', 22 | 'S': '[unused26]', 23 | 'T': '[unused27]', 24 | 'U': '[unused28]', 25 | 'V': '[unused29]', 26 | 'W': '[unused30]', 27 | 'X': '[unused31]', 28 | 'Y': '[unused32]', 29 | 'Z': '[unused33]', 30 | '‘': '[unused6]', 31 | '’': '[unused7]', 32 | '“': '[unused4]', 33 | '”': '[unused5]', 34 | 'Ⅰ': '[unused34]', 35 | 'Ⅱ': '[unused35]', 36 | 'Ⅲ': '[unused36]', 37 | 'Ⅳ': '[unused37]', 38 | 'Ⅴ': '[unused38]', 39 | 'Ⅵ': '[unused39]', 40 | 'Ⅶ': '[unused40]', 41 | 'Ⅷ': '[unused41]', 42 | 'Ⅸ': '[unused42]', 43 | 'Ⅹ': '[unused43]', 44 | 'A': '[unused44]', 45 | 'B': '[unused45]', 46 | 'C': '[unused46]', 47 | 'D': '[unused47]', 48 | 'E': '[unused48]', 49 | 'F': '[unused49]', 50 | 'G': '[unused50]', 51 | 'H': '[unused51]', 52 | 'I': '[unused52]', 53 | 'J': '[unused53]', 54 | 'K': '[unused54]', 55 | 'L': '[unused55]', 56 | 'M': '[unused56]', 57 | 'N': '[unused57]', 58 | 'O': '[unused58]', 59 | 'P': '[unused59]', 60 | 'Q': '[unused60]', 61 | 'R': '[unused61]', 62 | 'S': '[unused62]', 63 | 'T': '[unused63]', 64 | 'U': '[unused64]', 65 | 'V': '[unused65]', 66 | 'W': '[unused66]', 67 | 'X': '[unused67]', 68 | 'Y': '[unused68]', 69 | 'Z': '[unused69]'} -------------------------------------------------------------------------------- /test/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | import collections 4 | import gc 5 | import json 6 | import os 7 | from glob import glob 8 | from time import time 9 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 10 | from random import choice, seed, randint, random 11 | import pandas as pd 12 | import numpy as np 13 | import tensorflow as tf 14 | import keras.backend as K 15 | import keras 16 | from keras.models import Sequential, Model 17 | from keras.layers import Input, CuDNNGRU as GRU, CuDNNLSTM as LSTM, Dropout, BatchNormalization 18 | from keras.layers import Dense, Concatenate, Activation, Embedding, SpatialDropout1D, Bidirectional, Lambda, Conv1D 19 | from keras.layers import Add, Average 20 | from keras.optimizers import Nadam, Adam, Adamax 21 | from keras.activations import absolute_import 22 | from keras.legacy import interfaces 23 | from keras.preprocessing.sequence import pad_sequences 24 | from keras.callbacks import Callback 25 | from keras.utils import to_categorical 26 | from keras_bert.loader import load_trained_model_from_checkpoint 27 | from keras_bert import AdamWarmup, calc_train_steps 28 | from keras.engine import Layer 29 | from keras.engine import InputSpec 30 | from keras.objectives import categorical_crossentropy 31 | from keras.objectives import sparse_categorical_crossentropy 32 | from keras import activations, initializers, regularizers, constraints 33 | from keras.models import load_model 34 | from keras_bert import get_custom_objects 35 | from tqdm import tqdm 36 | from special_tokens import CHINESE_MAP 37 | from metric_utils import compute_f1, compute_exact 38 | from collections import OrderedDict, Counter 39 | from bert4keras.layers import ZeroMasking 40 | 41 | 42 | DEBUG = False 43 | # BERT_PRETRAINED_DIR = "../../../chinese_bert/chinese_roberta_wwm_large_ext_L-24_H-1024_A-16/" 44 | # TRN_FILENAME = "../data/train_20200228.csv" 45 | TEST_FILENAME = "../data/test.csv" 46 | save_filename = "../prediction_result/result_{}.csv" 47 | # DEV_FILENAME = "../data/dev_20200228.csv" 48 | 49 | 50 | MODEL_DIR = "../user_data/" 51 | PREFIX = "1.25" 52 | MAX_EPOCH = 15 53 | MAX_LEN = 60 54 | MAX_DOC_LEN = MAX_LEN // 2 55 | THRE = 0.5 56 | B_SIZE = 32 57 | ACCUM_STEP = int(32 // B_SIZE) 58 | FOLD_ID = [-1] 59 | FOLD_NUM = 20 60 | SEED = 2020 61 | PREFIX += "_seed" + str(SEED) 62 | SHUFFLE = True 63 | DOC_STRIDE = 128 64 | cfg = {} 65 | cfg["verbose"] = PREFIX 66 | cfg["span_mode"] = True 67 | cfg["lr"] = 5e-6 68 | cfg['min_lr'] = 6e-8 69 | cfg["ch_type"] = "tx_ft" 70 | cfg["trainable"] = True 71 | cfg["bert_trainable"] = True 72 | cfg["accum_step"] = ACCUM_STEP 73 | cfg["cls_num"] = 4 74 | cfg["unit1"] = 128 75 | cfg["unit2"] = 128 76 | cfg["unit3"] = 512 77 | cfg["conv_num"] = 128 78 | cfg['maxlen'] = MAX_LEN 79 | cfg["adv_training"] = False 80 | 81 | # train_data = pd.read_csv(TRN_FILENAME) 82 | # train_data.fillna("", inplace=True) 83 | # dev_data = pd.read_csv(DEV_FILENAME) 84 | # dev_data.fillna("", inplace=True) 85 | # all_data = pd.concat([train_data, dev_data], axis=0, ignore_index=True) 86 | 87 | def get_data(df_data): 88 | 89 | df_gb = df_data.groupby('query1') 90 | res = {} 91 | for index, data in df_gb: 92 | query2s = data["query2"] 93 | lables = data["label"] 94 | ele = {} 95 | pos_qs = [] 96 | neg_qs = [] 97 | for q, lable in zip(query2s, lables): 98 | if lable == 1: 99 | pos_qs.append(q) 100 | elif lable == 0: 101 | neg_qs.append(q) 102 | else: 103 | print("wrong data", index, q, lable) 104 | ele["pos"] = pos_qs 105 | ele["neg"] = neg_qs 106 | res[index] = ele 107 | return res 108 | 109 | 110 | # In[3]: 111 | 112 | 113 | def get_vocab(base_dir, albert=False, clue=False): 114 | if albert or "albert"in cfg["verbose"].lower(): 115 | dict_path = os.path.join(base_dir, 'vocab_chinese.txt') 116 | elif clue: 117 | dict_path = os.path.join(base_dir, 'vocab_clue.txt') 118 | else: 119 | dict_path = os.path.join(base_dir, 'vocab.txt') 120 | print(dict_path) 121 | with open(dict_path, mode="r", encoding="utf8") as f: 122 | lines = f.readlines() 123 | lines = [l.strip() for l in lines] 124 | 125 | word_index = {v: k for k, v in enumerate(lines)} 126 | for k, v in CHINESE_MAP.items(): 127 | assert v in word_index 128 | if k in word_index: 129 | print("[!] CHINESE_MAP k = {} is in word_index, DON'T using `{}` to replace".format(k, v)) 130 | continue 131 | del word_index[v] 132 | return word_index 133 | 134 | 135 | def token2id_X(x, x_dict, x2=None, maxlen=None, maxlen1=None): 136 | if x2: 137 | x1 = x 138 | del x 139 | maxlen -= 3 140 | maxlen1 -= 2 141 | assert maxlen > maxlen1 142 | maxlen2 = maxlen - maxlen1 - 1 143 | x1 = ["[CLS]"] + list(x1)[: maxlen1] + ["[SEP]"] 144 | x1 = [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x1] 145 | seg1= [0 for _ in x1] 146 | 147 | x2 = list(x2)[: maxlen2] + ["[SEP]"] 148 | x2= [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x2] 149 | seg2 = [1 for _ in x2] 150 | x = x1 + x2 151 | seg = seg1 + seg2 152 | 153 | else: 154 | maxlen -= 2 155 | x = ["[CLS]"] + list(x)[: maxlen] + ["[SEP]"] 156 | x = [x_dict[e] if e in x_dict else x_dict["[UNK]"] for e in x] 157 | seg = [0 for _ in x] 158 | return x, seg 159 | 160 | 161 | def seq_padding(X, maxlen=None, padding_value=None, debug=False): 162 | L = [len(x) for x in X] 163 | if maxlen is None: 164 | maxlen = max(L) 165 | 166 | pad_X = np.array([ 167 | np.concatenate([x, [padding_value] * (maxlen - len(x))]) if len(x) < maxlen else x for x in X 168 | ]) 169 | if debug: 170 | print("[!] before pading {}\n".format(X)) 171 | print("[!] after pading {}\n".format(pad_X)) 172 | return pad_X 173 | 174 | 175 | def get_model(model_): 176 | model_inp_ind = [0, 1] 177 | inputs = [model_.inputs[e] for e in model_inp_ind] 178 | sub_model = Model(inputs=inputs, outputs=[model_.get_layer("po1").output]) 179 | return sub_model 180 | 181 | 182 | # In[ ]: 183 | 184 | 185 | def test(sub_model, data, bs=32, x_dict=None): 186 | idxs = list(range(len(data))) 187 | T1, T2, O1, O2 = [], [], [], [] 188 | preds = [] 189 | for i in idxs: 190 | d = data.iloc[i] 191 | text = d["query1"] 192 | label_text = d["query2"] 193 | 194 | t1, t2 = token2id_X(text, x2=label_text, x_dict=word_index, maxlen=MAX_LEN, maxlen1=MAX_DOC_LEN) 195 | assert len(t1) == len(t2) 196 | 197 | T1.append(t1) 198 | T2.append(t2) 199 | 200 | if len(T1) == bs or i == idxs[-1]: 201 | T1 = seq_padding(T1, padding_value=cfg["x_pad"]) 202 | T2 = seq_padding(T2, padding_value=0) 203 | assert T1.shape == T2.shape 204 | pred = sub_model.predict([T1, T2]) 205 | preds.append(pred) 206 | T1, T2 = [], [] 207 | 208 | preds = np.concatenate(preds, axis=0).reshape(-1) 209 | return preds 210 | 211 | 212 | def ensemble_predictions(predictions, weights=None, type_="linear"): 213 | if not weights: 214 | # print("[!] AVE_WGT") 215 | weights = [1./ len(predictions) for _ in range(len(predictions))] 216 | assert len(predictions) == len(weights) 217 | if np.sum(weights) != 1.0: 218 | weights = [w / np.sum(weights) for w in weights] 219 | # print("[!] weights = {}".format(weights)) 220 | assert np.isclose(np.sum(weights), 1.0) 221 | if type_ == "linear": 222 | res = np.average(predictions, weights=weights, axis=0) 223 | elif type_ == "harmonic": 224 | res = np.average([1 / p for p in predictions], weights=weights, axis=0) 225 | return 1 / res 226 | elif type_ == "geometric": 227 | numerator = np.average( 228 | [np.log(p) for p in predictions], weights=weights, axis=0 229 | ) 230 | res = np.exp(numerator / sum(weights)) 231 | return res 232 | elif type_ == "rank": 233 | from scipy.stats import rankdata 234 | res = np.average([rankdata(p) for p in predictions], weights=weights, axis=0) 235 | return res / (len(res) + 1) 236 | return res 237 | 238 | 239 | test_data = pd.read_csv(TEST_FILENAME) 240 | 241 | model_files_v1 = sorted(glob(os.path.join(MODEL_DIR, "*v12*.h5"))) 242 | len_1 = len(model_files_v1) 243 | model_files_v3 = sorted(glob(os.path.join(MODEL_DIR, "*v13*.h5"))) 244 | len_3 = len(model_files_v3) 245 | 246 | 247 | model_files = model_files_v1 + model_files_v3 248 | 249 | print(len_1, len_3) 250 | 251 | if DEBUG: 252 | from random import shuffle, seed 253 | seed(124) 254 | shuffle(model_files) 255 | model_files = model_files[: 2] 256 | 257 | print(PREFIX, TEST_FILENAME, save_filename, model_files) 258 | assert len(model_files) == len(set(model_files)) 259 | assert all([os.path.exists(f) for f in model_files]) 260 | preds = [] 261 | t0 = time() 262 | for f in model_files: 263 | print("-" * 80) 264 | _t0 = time() 265 | print(f) 266 | if "albert" in f: 267 | word_index = get_vocab(base_dir="./", albert=True) 268 | elif "pair" in f or "clue" in f: 269 | word_index = get_vocab(base_dir="./", clue=True) 270 | else: 271 | word_index = get_vocab(base_dir="./") 272 | cfg["x_pad"] = word_index["[PAD]"] 273 | K.clear_session() 274 | print("[!] x_pad = {}".format(cfg["x_pad"])) 275 | if "albert" in f.lower() or "nezha" in f.lower(): 276 | model = load_model(f) 277 | else: 278 | model = load_model(f, custom_objects=get_custom_objects()) 279 | sub_model = get_model(model) 280 | pred = test(sub_model, test_data, x_dict=word_index) 281 | # auc = roc_auc_score(O1, pred) 282 | # acc = accuracy_score(O1, np.array(pred > 0.5, "int32")) 283 | # print("[{}]".format(time() - t0), auc, acc) 284 | print("[{}] f = `{}`, finish".format(time() - _t0, f)) 285 | print(pred.shape) 286 | preds.append(pred) 287 | del model, word_index, pred 288 | gc.collect() 289 | 290 | print("[{}]".format(time() - t0)) 291 | print(len_1, len_3) 292 | pred1 = ensemble_predictions(preds[0: len_1]) 293 | pred3 = ensemble_predictions(preds[len_1: len_1 + len_3]) 294 | 295 | pred = ensemble_predictions([pred1, pred3]) 296 | print(pred1[: 3], pred3[: 3], pred[: 3]) 297 | 298 | for q in [0.382, 0.387, 0.392, 0.397, 0.402, 0.407, 0.412]: 299 | thre = np.quantile(pred, q=1-q) 300 | print("[!]", q, thre, (pred > thre).astype("int32").sum()) 301 | 302 | POS_QUAN = 0.3945 303 | thre = np.quantile(pred, q=1 - POS_QUAN) 304 | test_data["prob"] = pred 305 | test_data["label"] = (pred > thre).astype("int32") 306 | print(test_data.describe()) 307 | print("-" * 81) 308 | print(pred.shape, thre, POS_QUAN, PREFIX, TEST_FILENAME, save_filename, model_files) 309 | print(test_data["label"].value_counts()) 310 | 311 | import datetime 312 | save_filename = save_filename.format(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')) 313 | test_data[["id", "label"]].to_csv(save_filename, index=False) 314 | print("verbose = {} FINISH".format(PREFIX)) 315 | 316 | -------------------------------------------------------------------------------- /test/vocab_clue.txt: -------------------------------------------------------------------------------- 1 | [PAD] 2 | [unused1] 3 | [unused2] 4 | [unused3] 5 | [unused4] 6 | [unused5] 7 | [unused6] 8 | [unused7] 9 | [unused8] 10 | [unused9] 11 | [unused10] 12 | [unused11] 13 | [unused12] 14 | [unused13] 15 | [unused14] 16 | [unused15] 17 | [unused16] 18 | [unused17] 19 | [unused18] 20 | [unused19] 21 | [unused20] 22 | [unused21] 23 | [unused22] 24 | [unused23] 25 | [unused24] 26 | [unused25] 27 | [unused26] 28 | [unused27] 29 | [unused28] 30 | [unused29] 31 | [unused30] 32 | [unused31] 33 | [unused32] 34 | [unused33] 35 | [unused34] 36 | [unused35] 37 | [unused36] 38 | [unused37] 39 | [unused38] 40 | [unused39] 41 | [unused40] 42 | [unused41] 43 | [unused42] 44 | [unused43] 45 | [unused44] 46 | [unused45] 47 | [unused46] 48 | [unused47] 49 | [unused48] 50 | [unused49] 51 | [unused50] 52 | [unused51] 53 | [unused52] 54 | [unused53] 55 | [unused54] 56 | [unused55] 57 | [unused56] 58 | [unused57] 59 | [unused58] 60 | [unused59] 61 | [unused60] 62 | [unused61] 63 | [unused62] 64 | [unused63] 65 | [unused64] 66 | [unused65] 67 | [unused66] 68 | [unused67] 69 | [unused68] 70 | [unused69] 71 | [unused70] 72 | [unused71] 73 | [unused72] 74 | [unused73] 75 | [unused74] 76 | [unused75] 77 | [unused76] 78 | [unused77] 79 | [unused78] 80 | [unused79] 81 | [unused80] 82 | [unused81] 83 | [unused82] 84 | [unused83] 85 | [unused84] 86 | [unused85] 87 | [unused86] 88 | [unused87] 89 | [unused88] 90 | [unused89] 91 | [unused90] 92 | [unused91] 93 | [unused92] 94 | [unused93] 95 | [unused94] 96 | [unused95] 97 | [unused96] 98 | [unused97] 99 | [unused98] 100 | [unused99] 101 | [UNK] 102 | [CLS] 103 | [SEP] 104 | [MASK] 105 | 106 | 107 | ! 108 | " 109 | “ 110 | ‘ 111 | # 112 | $ 113 | % 114 | & 115 | ' 116 | ( 117 | ) 118 | * 119 | + 120 | , 121 | - 122 | . 123 | / 124 | 0 125 | 1 126 | 2 127 | 3 128 | 4 129 | 5 130 | 6 131 | 7 132 | 8 133 | 9 134 | 10 135 | 11 136 | 12 137 | 13 138 | 14 139 | 15 140 | 16 141 | 17 142 | 18 143 | 19 144 | 20 145 | 21 146 | 22 147 | 23 148 | 24 149 | 25 150 | 26 151 | 27 152 | 28 153 | 29 154 | 30 155 | 31 156 | 32 157 | 33 158 | 34 159 | 35 160 | 36 161 | 37 162 | 38 163 | 39 164 | 40 165 | 41 166 | 42 167 | 43 168 | 44 169 | 45 170 | 46 171 | 47 172 | 48 173 | 49 174 | 50 175 | 51 176 | 52 177 | 53 178 | 54 179 | 55 180 | 56 181 | 57 182 | 58 183 | 59 184 | 60 185 | 61 186 | 62 187 | 63 188 | 64 189 | 65 190 | 66 191 | 67 192 | 68 193 | 69 194 | 70 195 | 71 196 | 72 197 | 73 198 | 74 199 | 75 200 | 76 201 | 77 202 | 78 203 | 79 204 | 80 205 | 81 206 | 82 207 | 83 208 | 84 209 | 85 210 | 86 211 | 87 212 | 88 213 | 89 214 | 90 215 | 91 216 | 92 217 | 93 218 | 94 219 | 95 220 | 96 221 | 97 222 | 98 223 | 99 224 | 100 225 | 2016 226 | 2017 227 | 2018 228 | 2019 229 | 2020 230 | 2021 231 | 2022 232 | : 233 | ; 234 | < 235 | = 236 | > 237 | ? 238 | @ 239 | [ 240 | \ 241 | ] 242 | ^ 243 | _ 244 | a 245 | b 246 | c 247 | d 248 | e 249 | f 250 | g 251 | h 252 | i 253 | j 254 | k 255 | l 256 | m 257 | n 258 | o 259 | p 260 | q 261 | r 262 | s 263 | t 264 | u 265 | v 266 | w 267 | x 268 | y 269 | z 270 | { 271 | | 272 | } 273 | ~ 274 | £ 275 | ¤ 276 | ¥ 277 | § 278 | « 279 | ° 280 | ± 281 | ² 282 | ³ 283 | µ 284 | · 285 | ¹ 286 | º 287 | » 288 | ¼ 289 | × 290 | ß 291 | æ 292 | ÷ 293 | ø 294 | đ 295 | ŋ 296 | ɔ 297 | ə 298 | ɡ 299 | ʰ 300 | ˇ 301 | ˈ 302 | ˊ 303 | ˋ 304 | ˍ 305 | ː 306 | ˙ 307 | ˚ 308 | ˢ 309 | α 310 | β 311 | γ 312 | δ 313 | ε 314 | η 315 | θ 316 | ι 317 | κ 318 | λ 319 | μ 320 | ν 321 | ο 322 | π 323 | ρ 324 | ς 325 | σ 326 | τ 327 | υ 328 | φ 329 | χ 330 | ψ 331 | ω 332 | а 333 | б 334 | в 335 | г 336 | д 337 | е 338 | ж 339 | з 340 | и 341 | к 342 | л 343 | м 344 | н 345 | о 346 | п 347 | р 348 | с 349 | т 350 | у 351 | ф 352 | х 353 | ц 354 | ч 355 | ш 356 | ы 357 | ь 358 | я 359 | і 360 | ا 361 | ب 362 | ة 363 | ت 364 | د 365 | ر 366 | س 367 | ع 368 | ل 369 | م 370 | ن 371 | ه 372 | و 373 | ي 374 | ۩ 375 | ก 376 | ง 377 | น 378 | ม 379 | ย 380 | ร 381 | อ 382 | า 383 | เ 384 | ๑ 385 | ་ 386 | ღ 387 | ᵃ 388 | ᵉ 389 | ᵍ 390 | ᵏ 391 | ᵐ 392 | ᵒ 393 | ᵘ 394 | ‖ 395 | „ 396 | † 397 | • 398 | ‥ 399 | ‧ 400 | ‰ 401 | ′ 402 | ″ 403 | ‹ 404 | › 405 | ※ 406 | ‿ 407 | ⁄ 408 | ⁱ 409 | ⁺ 410 | ⁿ 411 | ₁ 412 | ₂ 413 | ₃ 414 | ₄ 415 | € 416 | ℃ 417 | № 418 | ⅰ 419 | ⅱ 420 | ⅲ 421 | ⅳ 422 | ⅴ 423 | ← 424 | ↑ 425 | → 426 | ↓ 427 | ⇒ 428 | ∀ 429 | − 430 | ∕ 431 | ∙ 432 | √ 433 | ∞ 434 | ∟ 435 | ∠ 436 | ∣ 437 | ∥ 438 | ∩ 439 | ∮ 440 | ∶ 441 | ∼ 442 | ∽ 443 | ≈ 444 | ≒ 445 | ≡ 446 | ≤ 447 | ≥ 448 | ≦ 449 | ≧ 450 | ≪ 451 | ≫ 452 | ⊙ 453 | ⋅ 454 | ⋈ 455 | ⋯ 456 | ⌒ 457 | ① 458 | ② 459 | ③ 460 | ④ 461 | ⑤ 462 | ⑥ 463 | ⑦ 464 | ⑧ 465 | ⑨ 466 | ⑩ 467 | ⑴ 468 | ⑵ 469 | ⑶ 470 | ⑷ 471 | ⑸ 472 | ⒈ 473 | ⒉ 474 | ⒊ 475 | ⒋ 476 | ⓒ 477 | ⓔ 478 | ⓘ 479 | ─ 480 | ━ 481 | │ 482 | ┃ 483 | ┅ 484 | ┆ 485 | ┊ 486 | ┌ 487 | └ 488 | ├ 489 | ┣ 490 | ═ 491 | ║ 492 | ╚ 493 | ╞ 494 | ╠ 495 | ╭ 496 | ╮ 497 | ╯ 498 | ╰ 499 | ╱ 500 | ╳ 501 | ▂ 502 | ▃ 503 | ▅ 504 | ▇ 505 | █ 506 | ▉ 507 | ▋ 508 | ▌ 509 | ▍ 510 | ▎ 511 | ■ 512 | □ 513 | ▬ 514 | ▲ 515 | △ 516 | ► 517 | ▼ 518 | ▽ 519 | ◆ 520 | ◇ 521 | ○ 522 | ◎ 523 | ● 524 | ◕ 525 | ◠ 526 | ◢ 527 | ◤ 528 | ★ 529 | ☆ 530 | ☞ 531 | ☼ 532 | ♡ 533 | ♪ 534 | ♫ 535 | ♬ 536 | ✕ 537 | ✦ 538 | ✪ 539 | ✰ 540 | ✿ 541 | ❀ 542 | ➜ 543 | ➤ 544 | ⦿ 545 | 、 546 | 。 547 | 〃 548 | 々 549 | 〇 550 | 〈 551 | 〉 552 | 《 553 | 》 554 | 「 555 | 」 556 | 『 557 | 』 558 | 【 559 | 】 560 | 〓 561 | 〔 562 | 〕 563 | 〖 564 | 〗 565 | 〜 566 | 〝 567 | 〞 568 | ㄧ 569 | ㆍ 570 | ㈦ 571 | ㊣ 572 | ㎡ 573 | 㗎 574 | 一 575 | 丁 576 | 七 577 | 万 578 | 丈 579 | 三 580 | 上 581 | 下 582 | 不 583 | 与 584 | 丐 585 | 丑 586 | 专 587 | 且 588 | 丕 589 | 世 590 | 丘 591 | 丙 592 | 业 593 | 丛 594 | 东 595 | 丝 596 | 丞 597 | 両 598 | 丢 599 | 两 600 | 严 601 | 丧 602 | 丨 603 | 个 604 | 丫 605 | 中 606 | 丰 607 | 串 608 | 临 609 | 丶 610 | 丸 611 | 丹 612 | 为 613 | 主 614 | 丼 615 | 丽 616 | 举 617 | 丿 618 | 乂 619 | 乃 620 | 久 621 | 么 622 | 义 623 | 之 624 | 乌 625 | 乍 626 | 乎 627 | 乏 628 | 乐 629 | 乒 630 | 乓 631 | 乔 632 | 乖 633 | 乗 634 | 乘 635 | 乙 636 | 乜 637 | 九 638 | 乞 639 | 也 640 | 习 641 | 乡 642 | 书 643 | 乩 644 | 买 645 | 乱 646 | 乳 647 | 亀 648 | 了 649 | 予 650 | 争 651 | 事 652 | 二 653 | 于 654 | 亏 655 | 云 656 | 互 657 | 五 658 | 井 659 | 亘 660 | 亚 661 | 些 662 | 亜 663 | 亟 664 | 亡 665 | 亢 666 | 交 667 | 亥 668 | 亦 669 | 产 670 | 亨 671 | 亩 672 | 享 673 | 京 674 | 亭 675 | 亮 676 | 亲 677 | 亳 678 | 亵 679 | 人 680 | 亿 681 | 什 682 | 仁 683 | 仃 684 | 仄 685 | 仅 686 | 仆 687 | 仇 688 | 今 689 | 介 690 | 仍 691 | 从 692 | 仏 693 | 仑 694 | 仓 695 | 仔 696 | 仕 697 | 他 698 | 仗 699 | 付 700 | 仙 701 | 仝 702 | 仞 703 | 仟 704 | 代 705 | 令 706 | 以 707 | 仨 708 | 仪 709 | 们 710 | 仮 711 | 仰 712 | 仲 713 | 件 714 | 价 715 | 任 716 | 份 717 | 仿 718 | 企 719 | 伉 720 | 伊 721 | 伍 722 | 伎 723 | 伏 724 | 伐 725 | 休 726 | 伕 727 | 众 728 | 优 729 | 伙 730 | 会 731 | 伝 732 | 伞 733 | 伟 734 | 传 735 | 伢 736 | 伤 737 | 伦 738 | 伪 739 | 伫 740 | 伯 741 | 估 742 | 伴 743 | 伶 744 | 伸 745 | 伺 746 | 似 747 | 伽 748 | 佃 749 | 但 750 | 位 751 | 低 752 | 住 753 | 佐 754 | 佑 755 | 体 756 | 佔 757 | 何 758 | 佗 759 | 佘 760 | 佚 761 | 佛 762 | 作 763 | 佝 764 | 佞 765 | 佟 766 | 你 767 | 佢 768 | 佣 769 | 佤 770 | 佥 771 | 佩 772 | 佬 773 | 佯 774 | 佰 775 | 佳 776 | 佶 777 | 佻 778 | 佼 779 | 使 780 | 侃 781 | 侄 782 | 侈 783 | 例 784 | 侍 785 | 侏 786 | 侑 787 | 侗 788 | 供 789 | 依 790 | 侠 791 | 価 792 | 侣 793 | 侥 794 | 侦 795 | 侧 796 | 侨 797 | 侬 798 | 侮 799 | 侯 800 | 侵 801 | 侷 802 | 便 803 | 促 804 | 俄 805 | 俊 806 | 俎 807 | 俏 808 | 俐 809 | 俑 810 | 俗 811 | 俘 812 | 俚 813 | 保 814 | 俞 815 | 俟 816 | 信 817 | 俨 818 | 俩 819 | 俪 820 | 俬 821 | 俭 822 | 修 823 | 俯 824 | 俱 825 | 俳 826 | 俸 827 | 俺 828 | 俾 829 | 倌 830 | 倍 831 | 倏 832 | 倒 833 | 倔 834 | 倖 835 | 倘 836 | 候 837 | 倚 838 | 倜 839 | 借 840 | 倡 841 | 値 842 | 倦 843 | 倩 844 | 倪 845 | 倬 846 | 倭 847 | 倶 848 | 债 849 | 值 850 | 倾 851 | 偃 852 | 假 853 | 偈 854 | 偌 855 | 偎 856 | 偏 857 | 偕 858 | 做 859 | 停 860 | 健 861 | 偶 862 | 偷 863 | 偻 864 | 偿 865 | 傀 866 | 傅 867 | 傍 868 | 傚 869 | 傣 870 | 傥 871 | 储 872 | 傩 873 | 催 874 | 傲 875 | 傻 876 | 働 877 | 像 878 | 僖 879 | 僚 880 | 僧 881 | 僭 882 | 僮 883 | 僱 884 | 僵 885 | 僻 886 | 儆 887 | 儋 888 | 儒 889 | 儡 890 | 儿 891 | 兀 892 | 允 893 | 元 894 | 兄 895 | 充 896 | 兆 897 | 先 898 | 光 899 | 克 900 | 免 901 | 児 902 | 兑 903 | 兔 904 | 兖 905 | 党 906 | 兜 907 | 兢 908 | 入 909 | 全 910 | 八 911 | 公 912 | 六 913 | 兮 914 | 兰 915 | 共 916 | 兲 917 | 关 918 | 兴 919 | 兵 920 | 其 921 | 具 922 | 典 923 | 兹 924 | 养 925 | 兼 926 | 兽 927 | 冀 928 | 内 929 | 円 930 | 冇 931 | 冈 932 | 冉 933 | 册 934 | 再 935 | 冏 936 | 冒 937 | 冕 938 | 冗 939 | 写 940 | 军 941 | 农 942 | 冠 943 | 冢 944 | 冤 945 | 冥 946 | 冨 947 | 冬 948 | 冯 949 | 冰 950 | 冲 951 | 决 952 | 况 953 | 冶 954 | 冷 955 | 冻 956 | 冼 957 | 冽 958 | 冾 959 | 净 960 | 凄 961 | 准 962 | 凇 963 | 凉 964 | 凋 965 | 凌 966 | 减 967 | 凑 968 | 凛 969 | 凝 970 | 几 971 | 凡 972 | 凤 973 | 処 974 | 凪 975 | 凭 976 | 凯 977 | 凰 978 | 凳 979 | 凶 980 | 凸 981 | 凹 982 | 出 983 | 击 984 | 函 985 | 凿 986 | 刀 987 | 刁 988 | 刃 989 | 分 990 | 切 991 | 刈 992 | 刊 993 | 刍 994 | 刎 995 | 刑 996 | 划 997 | 列 998 | 刘 999 | 则 1000 | 刚 1001 | 创 1002 | 初 1003 | 删 1004 | 判 1005 | 刨 1006 | 利 1007 | 别 1008 | 刮 1009 | 到 1010 | 制 1011 | 刷 1012 | 券 1013 | 刹 1014 | 刺 1015 | 刻 1016 | 刽 1017 | 剁 1018 | 剂 1019 | 剃 1020 | 剉 1021 | 削 1022 | 剌 1023 | 前 1024 | 剐 1025 | 剑 1026 | 剔 1027 | 剖 1028 | 剜 1029 | 剣 1030 | 剤 1031 | 剥 1032 | 剧 1033 | 剩 1034 | 剪 1035 | 副 1036 | 割 1037 | 剷 1038 | 剽 1039 | 剿 1040 | 劈 1041 | 力 1042 | 劝 1043 | 办 1044 | 功 1045 | 加 1046 | 务 1047 | 劣 1048 | 动 1049 | 助 1050 | 努 1051 | 劫 1052 | 劭 1053 | 励 1054 | 劲 1055 | 劳 1056 | 労 1057 | 劵 1058 | 効 1059 | 劾 1060 | 势 1061 | 勃 1062 | 勇 1063 | 勉 1064 | 勋 1065 | 勐 1066 | 勒 1067 | 勖 1068 | 勘 1069 | 募 1070 | 勤 1071 | 勧 1072 | 勳 1073 | 勺 1074 | 勾 1075 | 勿 1076 | 匀 1077 | 包 1078 | 匆 1079 | 匈 1080 | 匍 1081 | 匐 1082 | 匕 1083 | 化 1084 | 北 1085 | 匙 1086 | 匝 1087 | 匠 1088 | 匡 1089 | 匣 1090 | 匪 1091 | 匮 1092 | 匹 1093 | 区 1094 | 医 1095 | 匾 1096 | 匿 1097 | 十 1098 | 千 1099 | 卅 1100 | 升 1101 | 午 1102 | 卉 1103 | 半 1104 | 卍 1105 | 华 1106 | 协 1107 | 卑 1108 | 卒 1109 | 卓 1110 | 单 1111 | 卖 1112 | 南 1113 | 単 1114 | 博 1115 | 卜 1116 | 卞 1117 | 卟 1118 | 占 1119 | 卡 1120 | 卢 1121 | 卤 1122 | 卦 1123 | 卧 1124 | 卫 1125 | 卮 1126 | 卯 1127 | 印 1128 | 危 1129 | 即 1130 | 却 1131 | 卵 1132 | 卷 1133 | 卸 1134 | 卿 1135 | 厂 1136 | 厄 1137 | 厅 1138 | 历 1139 | 厉 1140 | 压 1141 | 厌 1142 | 厕 1143 | 厘 1144 | 厚 1145 | 厝 1146 | 原 1147 | 厢 1148 | 厥 1149 | 厦 1150 | 厨 1151 | 厩 1152 | 厮 1153 | 厳 1154 | 去 1155 | 县 1156 | 叁 1157 | 参 1158 | 又 1159 | 叉 1160 | 及 1161 | 友 1162 | 双 1163 | 反 1164 | 収 1165 | 发 1166 | 叔 1167 | 取 1168 | 受 1169 | 变 1170 | 叙 1171 | 叛 1172 | 叟 1173 | 叠 1174 | 叡 1175 | 口 1176 | 古 1177 | 句 1178 | 另 1179 | 叨 1180 | 叩 1181 | 只 1182 | 叫 1183 | 召 1184 | 叭 1185 | 叮 1186 | 可 1187 | 台 1188 | 叱 1189 | 史 1190 | 右 1191 | 叵 1192 | 叶 1193 | 号 1194 | 司 1195 | 叹 1196 | 叻 1197 | 叼 1198 | 叽 1199 | 吁 1200 | 吃 1201 | 各 1202 | 吆 1203 | 合 1204 | 吉 1205 | 吊 1206 | 吋 1207 | 同 1208 | 名 1209 | 后 1210 | 吏 1211 | 吐 1212 | 向 1213 | 吓 1214 | 吕 1215 | 吖 1216 | 吗 1217 | 君 1218 | 吝 1219 | 吞 1220 | 吟 1221 | 吠 1222 | 吡 1223 | 否 1224 | 吧 1225 | 吨 1226 | 吩 1227 | 含 1228 | 听 1229 | 吭 1230 | 吮 1231 | 启 1232 | 吱 1233 | 吴 1234 | 吵 1235 | 吸 1236 | 吹 1237 | 吻 1238 | 吼 1239 | 吽 1240 | 吾 1241 | 呀 1242 | 呃 1243 | 呆 1244 | 呈 1245 | 告 1246 | 呋 1247 | 呎 1248 | 呐 1249 | 呓 1250 | 呕 1251 | 呗 1252 | 员 1253 | 呛 1254 | 呜 1255 | 呢 1256 | 呤 1257 | 呦 1258 | 周 1259 | 呱 1260 | 呲 1261 | 味 1262 | 呵 1263 | 呷 1264 | 呸 1265 | 呻 1266 | 呼 1267 | 命 1268 | 咀 1269 | 咁 1270 | 咂 1271 | 咄 1272 | 咆 1273 | 咋 1274 | 和 1275 | 咎 1276 | 咏 1277 | 咐 1278 | 咒 1279 | 咔 1280 | 咕 1281 | 咖 1282 | 咗 1283 | 咘 1284 | 咙 1285 | 咚 1286 | 咛 1287 | 咣 1288 | 咤 1289 | 咦 1290 | 咧 1291 | 咨 1292 | 咩 1293 | 咪 1294 | 咫 1295 | 咬 1296 | 咭 1297 | 咯 1298 | 咱 1299 | 咲 1300 | 咳 1301 | 咸 1302 | 咻 1303 | 咽 1304 | 咿 1305 | 哀 1306 | 品 1307 | 哂 1308 | 哄 1309 | 哆 1310 | 哇 1311 | 哈 1312 | 哉 1313 | 哋 1314 | 哌 1315 | 响 1316 | 哎 1317 | 哏 1318 | 哐 1319 | 哑 1320 | 哒 1321 | 哔 1322 | 哗 1323 | 哟 1324 | 哥 1325 | 哦 1326 | 哧 1327 | 哨 1328 | 哩 1329 | 哪 1330 | 哭 1331 | 哮 1332 | 哲 1333 | 哺 1334 | 哼 1335 | 哽 1336 | 唁 1337 | 唆 1338 | 唇 1339 | 唉 1340 | 唏 1341 | 唐 1342 | 唑 1343 | 唔 1344 | 唠 1345 | 唤 1346 | 唧 1347 | 唬 1348 | 售 1349 | 唯 1350 | 唰 1351 | 唱 1352 | 唳 1353 | 唷 1354 | 唸 1355 | 唾 1356 | 啃 1357 | 啄 1358 | 商 1359 | 啉 1360 | 啊 1361 | 啕 1362 | 啖 1363 | 啜 1364 | 啡 1365 | 啤 1366 | 啥 1367 | 啦 1368 | 啧 1369 | 啪 1370 | 啫 1371 | 啬 1372 | 啮 1373 | 啰 1374 | 啱 1375 | 啲 1376 | 啵 1377 | 啶 1378 | 啷 1379 | 啸 1380 | 啻 1381 | 啼 1382 | 啾 1383 | 喀 1384 | 喂 1385 | 喃 1386 | 善 1387 | 喆 1388 | 喇 1389 | 喉 1390 | 喊 1391 | 喋 1392 | 喏 1393 | 喔 1394 | 喘 1395 | 喙 1396 | 喜 1397 | 喝 1398 | 喟 1399 | 喧 1400 | 喫 1401 | 喰 1402 | 喱 1403 | 喳 1404 | 喵 1405 | 営 1406 | 喷 1407 | 喹 1408 | 喺 1409 | 喻 1410 | 喽 1411 | 嗅 1412 | 嗑 1413 | 嗒 1414 | 嗓 1415 | 嗔 1416 | 嗖 1417 | 嗜 1418 | 嗝 1419 | 嗟 1420 | 嗡 1421 | 嗣 1422 | 嗤 1423 | 嗦 1424 | 嗨 1425 | 嗪 1426 | 嗬 1427 | 嗯 1428 | 嗰 1429 | 嗲 1430 | 嗳 1431 | 嗷 1432 | 嗽 1433 | 嘀 1434 | 嘅 1435 | 嘈 1436 | 嘉 1437 | 嘌 1438 | 嘎 1439 | 嘘 1440 | 嘚 1441 | 嘛 1442 | 嘞 1443 | 嘟 1444 | 嘢 1445 | 嘣 1446 | 嘤 1447 | 嘧 1448 | 嘭 1449 | 嘱 1450 | 嘲 1451 | 嘴 1452 | 嘶 1453 | 嘹 1454 | 嘻 1455 | 嘿 1456 | 噌 1457 | 噎 1458 | 噔 1459 | 噗 1460 | 噙 1461 | 噜 1462 | 噢 1463 | 噤 1464 | 器 1465 | 噩 1466 | 噪 1467 | 噬 1468 | 噱 1469 | 噶 1470 | 噻 1471 | 噼 1472 | 嚎 1473 | 嚏 1474 | 嚐 1475 | 嚓 1476 | 嚟 1477 | 嚣 1478 | 嚷 1479 | 嚼 1480 | 囉 1481 | 囊 1482 | 囍 1483 | 囔 1484 | 囗 1485 | 囚 1486 | 四 1487 | 囝 1488 | 回 1489 | 囟 1490 | 因 1491 | 囡 1492 | 团 1493 | 団 1494 | 囤 1495 | 囧 1496 | 囫 1497 | 园 1498 | 困 1499 | 囱 1500 | 囲 1501 | 図 1502 | 围 1503 | 囹 1504 | 固 1505 | 国 1506 | 图 1507 | 囿 1508 | 圃 1509 | 圄 1510 | 圆 1511 | 圈 1512 | 圏 1513 | 圜 1514 | 土 1515 | 圣 1516 | 圧 1517 | 在 1518 | 圩 1519 | 圭 1520 | 地 1521 | 圳 1522 | 场 1523 | 圻 1524 | 圾 1525 | 址 1526 | 坂 1527 | 均 1528 | 坊 1529 | 坍 1530 | 坎 1531 | 坏 1532 | 坐 1533 | 坑 1534 | 块 1535 | 坚 1536 | 坛 1537 | 坝 1538 | 坞 1539 | 坟 1540 | 坠 1541 | 坡 1542 | 坤 1543 | 坦 1544 | 坨 1545 | 坪 1546 | 坯 1547 | 坳 1548 | 坵 1549 | 坷 1550 | 垂 1551 | 垃 1552 | 垄 1553 | 型 1554 | 垒 1555 | 垚 1556 | 垛 1557 | 垠 1558 | 垢 1559 | 垣 1560 | 垦 1561 | 垩 1562 | 垫 1563 | 垭 1564 | 垮 1565 | 埂 1566 | 埃 1567 | 埋 1568 | 城 1569 | 埔 1570 | 埕 1571 | 埗 1572 | 域 1573 | 埠 1574 | 埤 1575 | 埵 1576 | 埸 1577 | 培 1578 | 基 1579 | 埼 1580 | 堀 1581 | 堂 1582 | 堃 1583 | 堆 1584 | 堇 1585 | 堑 1586 | 堕 1587 | 堙 1588 | 堡 1589 | 堤 1590 | 堪 1591 | 堰 1592 | 堵 1593 | 堺 1594 | 堿 1595 | 塌 1596 | 塑 1597 | 塔 1598 | 塘 1599 | 塞 1600 | 塩 1601 | 填 1602 | 塬 1603 | 塭 1604 | 塾 1605 | 墀 1606 | 境 1607 | 墅 1608 | 墉 1609 | 墒 1610 | 墓 1611 | 増 1612 | 墘 1613 | 墙 1614 | 增 1615 | 墟 1616 | 墨 1617 | 墩 1618 | 壁 1619 | 壅 1620 | 壆 1621 | 壊 1622 | 壑 1623 | 壕 1624 | 壤 1625 | 士 1626 | 壬 1627 | 壮 1628 | 声 1629 | 売 1630 | 壳 1631 | 壶 1632 | 壹 1633 | 处 1634 | 备 1635 | 変 1636 | 复 1637 | 夏 1638 | 夔 1639 | 夕 1640 | 外 1641 | 夙 1642 | 多 1643 | 夜 1644 | 够 1645 | 夥 1646 | 大 1647 | 天 1648 | 太 1649 | 夫 1650 | 夭 1651 | 央 1652 | 夯 1653 | 失 1654 | 头 1655 | 夷 1656 | 夸 1657 | 夹 1658 | 夺 1659 | 奂 1660 | 奄 1661 | 奇 1662 | 奈 1663 | 奉 1664 | 奋 1665 | 奎 1666 | 奏 1667 | 契 1668 | 奔 1669 | 奕 1670 | 奖 1671 | 套 1672 | 奘 1673 | 奚 1674 | 奠 1675 | 奢 1676 | 奥 1677 | 女 1678 | 奴 1679 | 奶 1680 | 奸 1681 | 她 1682 | 好 1683 | 如 1684 | 妃 1685 | 妄 1686 | 妆 1687 | 妇 1688 | 妈 1689 | 妊 1690 | 妍 1691 | 妒 1692 | 妓 1693 | 妖 1694 | 妘 1695 | 妙 1696 | 妞 1697 | 妣 1698 | 妤 1699 | 妥 1700 | 妨 1701 | 妩 1702 | 妪 1703 | 妮 1704 | 妲 1705 | 妳 1706 | 妹 1707 | 妻 1708 | 妾 1709 | 姆 1710 | 姉 1711 | 姊 1712 | 始 1713 | 姐 1714 | 姑 1715 | 姒 1716 | 姓 1717 | 委 1718 | 姗 1719 | 姚 1720 | 姜 1721 | 姝 1722 | 姣 1723 | 姥 1724 | 姨 1725 | 姪 1726 | 姫 1727 | 姬 1728 | 姹 1729 | 姻 1730 | 姿 1731 | 威 1732 | 娃 1733 | 娄 1734 | 娅 1735 | 娆 1736 | 娇 1737 | 娉 1738 | 娑 1739 | 娓 1740 | 娘 1741 | 娜 1742 | 娟 1743 | 娠 1744 | 娣 1745 | 娥 1746 | 娩 1747 | 娱 1748 | 娲 1749 | 娴 1750 | 娶 1751 | 娼 1752 | 婀 1753 | 婆 1754 | 婉 1755 | 婊 1756 | 婕 1757 | 婚 1758 | 婢 1759 | 婧 1760 | 婪 1761 | 婴 1762 | 婵 1763 | 婶 1764 | 婷 1765 | 婺 1766 | 婿 1767 | 媒 1768 | 媚 1769 | 媛 1770 | 媞 1771 | 媲 1772 | 媳 1773 | 媾 1774 | 嫁 1775 | 嫂 1776 | 嫉 1777 | 嫌 1778 | 嫑 1779 | 嫔 1780 | 嫖 1781 | 嫘 1782 | 嫚 1783 | 嫡 1784 | 嫣 1785 | 嫦 1786 | 嫩 1787 | 嫲 1788 | 嬅 1789 | 嬉 1790 | 嬗 1791 | 嬛 1792 | 嬢 1793 | 嬴 1794 | 嬷 1795 | 嬿 1796 | 孀 1797 | 孃 1798 | 子 1799 | 孑 1800 | 孔 1801 | 孕 1802 | 孖 1803 | 字 1804 | 存 1805 | 孙 1806 | 孚 1807 | 孛 1808 | 孜 1809 | 孝 1810 | 孟 1811 | 孢 1812 | 季 1813 | 孤 1814 | 学 1815 | 孩 1816 | 孪 1817 | 孬 1818 | 孰 1819 | 孱 1820 | 孳 1821 | 孵 1822 | 孺 1823 | 孽 1824 | 宁 1825 | 它 1826 | 宅 1827 | 宇 1828 | 守 1829 | 安 1830 | 宋 1831 | 完 1832 | 宏 1833 | 宓 1834 | 宕 1835 | 宗 1836 | 官 1837 | 宙 1838 | 定 1839 | 宛 1840 | 宜 1841 | 宝 1842 | 实 1843 | 実 1844 | 宠 1845 | 审 1846 | 客 1847 | 宣 1848 | 室 1849 | 宥 1850 | 宦 1851 | 宪 1852 | 宫 1853 | 宰 1854 | 害 1855 | 宴 1856 | 宵 1857 | 家 1858 | 宸 1859 | 容 1860 | 宽 1861 | 宾 1862 | 宿 1863 | 寂 1864 | 寄 1865 | 寅 1866 | 密 1867 | 寇 1868 | 富 1869 | 寐 1870 | 寒 1871 | 寓 1872 | 寛 1873 | 寝 1874 | 寞 1875 | 察 1876 | 寡 1877 | 寥 1878 | 寨 1879 | 寮 1880 | 寰 1881 | 寸 1882 | 对 1883 | 寺 1884 | 寻 1885 | 导 1886 | 対 1887 | 寿 1888 | 封 1889 | 専 1890 | 射 1891 | 将 1892 | 尉 1893 | 尊 1894 | 小 1895 | 少 1896 | 尔 1897 | 尕 1898 | 尖 1899 | 尘 1900 | 尚 1901 | 尝 1902 | 尤 1903 | 尧 1904 | 尬 1905 | 就 1906 | 尴 1907 | 尸 1908 | 尹 1909 | 尺 1910 | 尻 1911 | 尼 1912 | 尽 1913 | 尾 1914 | 尿 1915 | 局 1916 | 屁 1917 | 层 1918 | 屄 1919 | 居 1920 | 屈 1921 | 屉 1922 | 届 1923 | 屋 1924 | 屌 1925 | 屎 1926 | 屏 1927 | 屐 1928 | 屑 1929 | 展 1930 | 属 1931 | 屠 1932 | 屡 1933 | 履 1934 | 屯 1935 | 山 1936 | 屹 1937 | 屿 1938 | 岀 1939 | 岁 1940 | 岂 1941 | 岌 1942 | 岐 1943 | 岑 1944 | 岔 1945 | 岖 1946 | 岗 1947 | 岘 1948 | 岙 1949 | 岚 1950 | 岛 1951 | 岩 1952 | 岫 1953 | 岬 1954 | 岭 1955 | 岱 1956 | 岳 1957 | 岷 1958 | 岸 1959 | 峇 1960 | 峋 1961 | 峒 1962 | 峙 1963 | 峡 1964 | 峤 1965 | 峥 1966 | 峦 1967 | 峨 1968 | 峪 1969 | 峭 1970 | 峯 1971 | 峰 1972 | 峻 1973 | 崁 1974 | 崂 1975 | 崆 1976 | 崇 1977 | 崎 1978 | 崑 1979 | 崔 1980 | 崖 1981 | 崙 1982 | 崛 1983 | 崧 1984 | 崩 1985 | 崭 1986 | 崴 1987 | 崽 1988 | 嵇 1989 | 嵊 1990 | 嵋 1991 | 嵌 1992 | 嵘 1993 | 嵩 1994 | 嵬 1995 | 嵯 1996 | 嶂 1997 | 嶋 1998 | 嶙 1999 | 巅 2000 | 巍 2001 | 巖 2002 | 川 2003 | 州 2004 | 巡 2005 | 巢 2006 | 工 2007 | 左 2008 | 巧 2009 | 巨 2010 | 巩 2011 | 巫 2012 | 差 2013 | 己 2014 | 已 2015 | 巳 2016 | 巴 2017 | 巷 2018 | 巻 2019 | 巽 2020 | 巾 2021 | 巿 2022 | 币 2023 | 市 2024 | 布 2025 | 帅 2026 | 帆 2027 | 师 2028 | 希 2029 | 帐 2030 | 帑 2031 | 帕 2032 | 帖 2033 | 帘 2034 | 帚 2035 | 帛 2036 | 帜 2037 | 帝 2038 | 带 2039 | 帧 2040 | 席 2041 | 帮 2042 | 帯 2043 | 帰 2044 | 帷 2045 | 常 2046 | 帼 2047 | 帽 2048 | 幂 2049 | 幄 2050 | 幅 2051 | 幌 2052 | 幔 2053 | 幕 2054 | 幡 2055 | 幢 2056 | 干 2057 | 平 2058 | 年 2059 | 并 2060 | 幸 2061 | 幻 2062 | 幼 2063 | 幽 2064 | 广 2065 | 庁 2066 | 広 2067 | 庄 2068 | 庆 2069 | 庇 2070 | 床 2071 | 序 2072 | 庐 2073 | 库 2074 | 应 2075 | 底 2076 | 庖 2077 | 店 2078 | 庙 2079 | 庚 2080 | 府 2081 | 庞 2082 | 废 2083 | 庠 2084 | 度 2085 | 座 2086 | 庭 2087 | 庵 2088 | 庶 2089 | 康 2090 | 庸 2091 | 庹 2092 | 庾 2093 | 廃 2094 | 廉 2095 | 廊 2096 | 廓 2097 | 廖 2098 | 延 2099 | 廷 2100 | 建 2101 | 廿 2102 | 开 2103 | 弁 2104 | 异 2105 | 弃 2106 | 弄 2107 | 弈 2108 | 弊 2109 | 弋 2110 | 式 2111 | 弑 2112 | 弓 2113 | 弔 2114 | 引 2115 | 弗 2116 | 弘 2117 | 弛 2118 | 弟 2119 | 张 2120 | 弥 2121 | 弦 2122 | 弧 2123 | 弩 2124 | 弭 2125 | 弯 2126 | 弱 2127 | 弹 2128 | 强 2129 | 弼 2130 | 弾 2131 | 彅 2132 | 归 2133 | 当 2134 | 录 2135 | 彗 2136 | 彝 2137 | 形 2138 | 彤 2139 | 彦 2140 | 彧 2141 | 彩 2142 | 彪 2143 | 彫 2144 | 彬 2145 | 彭 2146 | 彰 2147 | 影 2148 | 彷 2149 | 役 2150 | 彻 2151 | 彼 2152 | 彿 2153 | 往 2154 | 征 2155 | 径 2156 | 待 2157 | 徇 2158 | 很 2159 | 徉 2160 | 徊 2161 | 律 2162 | 徐 2163 | 徒 2164 | 従 2165 | 徕 2166 | 得 2167 | 徘 2168 | 徙 2169 | 徜 2170 | 御 2171 | 徨 2172 | 循 2173 | 徬 2174 | 微 2175 | 徳 2176 | 徴 2177 | 德 2178 | 徼 2179 | 徽 2180 | 心 2181 | 必 2182 | 忆 2183 | 忌 2184 | 忍 2185 | 忏 2186 | 忐 2187 | 忑 2188 | 忒 2189 | 忖 2190 | 志 2191 | 忘 2192 | 忙 2193 | 応 2194 | 忠 2195 | 忡 2196 | 忤 2197 | 忧 2198 | 忪 2199 | 快 2200 | 忱 2201 | 念 2202 | 忻 2203 | 忽 2204 | 忿 2205 | 怀 2206 | 态 2207 | 怂 2208 | 怅 2209 | 怆 2210 | 怎 2211 | 怏 2212 | 怒 2213 | 怔 2214 | 怕 2215 | 怖 2216 | 怙 2217 | 怜 2218 | 思 2219 | 怠 2220 | 怡 2221 | 急 2222 | 怦 2223 | 性 2224 | 怨 2225 | 怪 2226 | 怯 2227 | 怵 2228 | 总 2229 | 怼 2230 | 恁 2231 | 恃 2232 | 恋 2233 | 恍 2234 | 恐 2235 | 恒 2236 | 恕 2237 | 恙 2238 | 恚 2239 | 恢 2240 | 恣 2241 | 恤 2242 | 恨 2243 | 恩 2244 | 恪 2245 | 恫 2246 | 恬 2247 | 恭 2248 | 息 2249 | 恰 2250 | 恳 2251 | 恵 2252 | 恶 2253 | 恸 2254 | 恺 2255 | 恻 2256 | 恼 2257 | 恿 2258 | 悄 2259 | 悉 2260 | 悌 2261 | 悍 2262 | 悔 2263 | 悖 2264 | 悚 2265 | 悟 2266 | 悠 2267 | 患 2268 | 悦 2269 | 您 2270 | 悩 2271 | 悪 2272 | 悬 2273 | 悯 2274 | 悱 2275 | 悲 2276 | 悴 2277 | 悸 2278 | 悻 2279 | 悼 2280 | 悽 2281 | 情 2282 | 惆 2283 | 惇 2284 | 惊 2285 | 惋 2286 | 惑 2287 | 惕 2288 | 惘 2289 | 惚 2290 | 惜 2291 | 惟 2292 | 惠 2293 | 惦 2294 | 惧 2295 | 惨 2296 | 惩 2297 | 惫 2298 | 惬 2299 | 惭 2300 | 惮 2301 | 惯 2302 | 惰 2303 | 想 2304 | 惴 2305 | 惶 2306 | 惹 2307 | 惺 2308 | 愁 2309 | 愆 2310 | 愈 2311 | 愉 2312 | 愍 2313 | 意 2314 | 愕 2315 | 愚 2316 | 感 2317 | 愣 2318 | 愤 2319 | 愧 2320 | 愫 2321 | 愿 2322 | 慈 2323 | 慌 2324 | 慎 2325 | 慑 2326 | 慕 2327 | 慢 2328 | 慧 2329 | 慨 2330 | 慰 2331 | 慵 2332 | 慷 2333 | 慾 2334 | 憋 2335 | 憎 2336 | 憔 2337 | 憧 2338 | 憨 2339 | 憩 2340 | 憬 2341 | 憾 2342 | 懂 2343 | 懈 2344 | 懊 2345 | 懋 2346 | 懑 2347 | 懒 2348 | 懦 2349 | 懵 2350 | 懿 2351 | 戈 2352 | 戊 2353 | 戌 2354 | 戍 2355 | 戎 2356 | 戏 2357 | 成 2358 | 我 2359 | 戒 2360 | 戕 2361 | 或 2362 | 战 2363 | 戚 2364 | 戛 2365 | 戟 2366 | 戡 2367 | 戦 2368 | 截 2369 | 戬 2370 | 戮 2371 | 戳 2372 | 戴 2373 | 户 2374 | 戸 2375 | 戻 2376 | 戾 2377 | 房 2378 | 所 2379 | 扁 2380 | 扇 2381 | 扈 2382 | 扉 2383 | 手 2384 | 才 2385 | 扎 2386 | 扑 2387 | 扒 2388 | 打 2389 | 扔 2390 | 払 2391 | 托 2392 | 扛 2393 | 扣 2394 | 扦 2395 | 执 2396 | 扩 2397 | 扪 2398 | 扫 2399 | 扬 2400 | 扭 2401 | 扮 2402 | 扯 2403 | 扰 2404 | 扱 2405 | 扳 2406 | 扶 2407 | 批 2408 | 扼 2409 | 找 2410 | 承 2411 | 技 2412 | 抄 2413 | 抉 2414 | 把 2415 | 抑 2416 | 抒 2417 | 抓 2418 | 投 2419 | 抖 2420 | 抗 2421 | 折 2422 | 抚 2423 | 抛 2424 | 抜 2425 | 択 2426 | 抟 2427 | 抠 2428 | 抡 2429 | 抢 2430 | 护 2431 | 报 2432 | 抨 2433 | 披 2434 | 抬 2435 | 抱 2436 | 抵 2437 | 抹 2438 | 押 2439 | 抽 2440 | 抿 2441 | 拂 2442 | 拄 2443 | 担 2444 | 拆 2445 | 拇 2446 | 拈 2447 | 拉 2448 | 拌 2449 | 拍 2450 | 拎 2451 | 拐 2452 | 拒 2453 | 拓 2454 | 拔 2455 | 拖 2456 | 拗 2457 | 拘 2458 | 拙 2459 | 拚 2460 | 招 2461 | 拜 2462 | 拟 2463 | 拡 2464 | 拢 2465 | 拣 2466 | 拥 2467 | 拦 2468 | 拧 2469 | 拨 2470 | 择 2471 | 括 2472 | 拭 2473 | 拮 2474 | 拯 2475 | 拱 2476 | 拳 2477 | 拴 2478 | 拷 2479 | 拼 2480 | 拽 2481 | 拾 2482 | 拿 2483 | 持 2484 | 挂 2485 | 指 2486 | 挈 2487 | 按 2488 | 挎 2489 | 挑 2490 | 挖 2491 | 挙 2492 | 挚 2493 | 挛 2494 | 挝 2495 | 挞 2496 | 挟 2497 | 挠 2498 | 挡 2499 | 挣 2500 | 挤 2501 | 挥 2502 | 挨 2503 | 挪 2504 | 挫 2505 | 振 2506 | 挲 2507 | 挹 2508 | 挺 2509 | 挽 2510 | 捂 2511 | 捅 2512 | 捆 2513 | 捉 2514 | 捋 2515 | 捌 2516 | 捍 2517 | 捎 2518 | 捏 2519 | 捐 2520 | 捕 2521 | 捞 2522 | 损 2523 | 捡 2524 | 换 2525 | 捣 2526 | 捧 2527 | 捩 2528 | 据 2529 | 捱 2530 | 捲 2531 | 捶 2532 | 捷 2533 | 捺 2534 | 捻 2535 | 掀 2536 | 掂 2537 | 掇 2538 | 授 2539 | 掉 2540 | 掌 2541 | 掏 2542 | 掐 2543 | 排 2544 | 掖 2545 | 掘 2546 | 掠 2547 | 探 2548 | 掣 2549 | 接 2550 | 控 2551 | 推 2552 | 掩 2553 | 措 2554 | 掬 2555 | 掰 2556 | 掲 2557 | 掳 2558 | 掴 2559 | 掷 2560 | 掸 2561 | 掺 2562 | 揃 2563 | 揄 2564 | 揆 2565 | 揉 2566 | 揍 2567 | 描 2568 | 提 2569 | 插 2570 | 揖 2571 | 握 2572 | 揣 2573 | 揩 2574 | 揪 2575 | 揭 2576 | 援 2577 | 揶 2578 | 揸 2579 | 揹 2580 | 揽 2581 | 搀 2582 | 搁 2583 | 搂 2584 | 搅 2585 | 搏 2586 | 搐 2587 | 搓 2588 | 搔 2589 | 搜 2590 | 搞 2591 | 搡 2592 | 搪 2593 | 搬 2594 | 搭 2595 | 携 2596 | 搽 2597 | 摀 2598 | 摁 2599 | 摄 2600 | 摆 2601 | 摇 2602 | 摈 2603 | 摊 2604 | 摒 2605 | 摔 2606 | 摘 2607 | 摞 2608 | 摧 2609 | 摩 2610 | 摸 2611 | 摹 2612 | 撂 2613 | 撃 2614 | 撅 2615 | 撇 2616 | 撑 2617 | 撒 2618 | 撕 2619 | 撚 2620 | 撞 2621 | 撤 2622 | 撩 2623 | 撬 2624 | 播 2625 | 撮 2626 | 撰 2627 | 撵 2628 | 撷 2629 | 撸 2630 | 撼 2631 | 擀 2632 | 擂 2633 | 擅 2634 | 操 2635 | 擎 2636 | 擒 2637 | 擘 2638 | 擞 2639 | 擡 2640 | 擢 2641 | 擦 2642 | 攀 2643 | 攒 2644 | 攘 2645 | 攞 2646 | 攥 2647 | 攫 2648 | 支 2649 | 收 2650 | 攸 2651 | 改 2652 | 攻 2653 | 放 2654 | 政 2655 | 故 2656 | 效 2657 | 敌 2658 | 敍 2659 | 敎 2660 | 敏 2661 | 救 2662 | 敕 2663 | 敖 2664 | 教 2665 | 敛 2666 | 敝 2667 | 敞 2668 | 敢 2669 | 散 2670 | 敦 2671 | 敬 2672 | 数 2673 | 敲 2674 | 整 2675 | 敷 2676 | 文 2677 | 斋 2678 | 斌 2679 | 斎 2680 | 斐 2681 | 斑 2682 | 斓 2683 | 斗 2684 | 料 2685 | 斛 2686 | 斜 2687 | 斟 2688 | 斡 2689 | 斤 2690 | 斥 2691 | 斧 2692 | 斩 2693 | 斫 2694 | 断 2695 | 斯 2696 | 新 2697 | 方 2698 | 施 2699 | 旁 2700 | 旃 2701 | 旅 2702 | 旋 2703 | 旌 2704 | 旎 2705 | 族 2706 | 旖 2707 | 旗 2708 | 无 2709 | 既 2710 | 日 2711 | 旦 2712 | 旧 2713 | 旨 2714 | 早 2715 | 旬 2716 | 旭 2717 | 旮 2718 | 旱 2719 | 时 2720 | 旷 2721 | 旺 2722 | 旻 2723 | 昀 2724 | 昂 2725 | 昆 2726 | 昇 2727 | 昉 2728 | 昊 2729 | 昌 2730 | 明 2731 | 昏 2732 | 易 2733 | 昔 2734 | 昕 2735 | 昙 2736 | 星 2737 | 映 2738 | 春 2739 | 昧 2740 | 昨 2741 | 昭 2742 | 是 2743 | 昱 2744 | 昴 2745 | 昵 2746 | 昶 2747 | 昼 2748 | 显 2749 | 晁 2750 | 晃 2751 | 晋 2752 | 晌 2753 | 晏 2754 | 晒 2755 | 晓 2756 | 晔 2757 | 晕 2758 | 晖 2759 | 晗 2760 | 晚 2761 | 晞 2762 | 晟 2763 | 晤 2764 | 晦 2765 | 晨 2766 | 晩 2767 | 普 2768 | 景 2769 | 晰 2770 | 晴 2771 | 晶 2772 | 晷 2773 | 智 2774 | 晾 2775 | 暂 2776 | 暄 2777 | 暇 2778 | 暌 2779 | 暐 2780 | 暑 2781 | 暖 2782 | 暗 2783 | 暝 2784 | 暧 2785 | 暨 2786 | 暮 2787 | 暱 2788 | 暴 2789 | 暸 2790 | 暹 2791 | 曙 2792 | 曜 2793 | 曝 2794 | 曦 2795 | 曰 2796 | 曲 2797 | 曳 2798 | 更 2799 | 曹 2800 | 曼 2801 | 曾 2802 | 替 2803 | 最 2804 | 月 2805 | 有 2806 | 朋 2807 | 服 2808 | 朐 2809 | 朔 2810 | 朕 2811 | 朗 2812 | 望 2813 | 朝 2814 | 期 2815 | 朦 2816 | 木 2817 | 未 2818 | 末 2819 | 本 2820 | 札 2821 | 术 2822 | 朱 2823 | 朴 2824 | 朵 2825 | 机 2826 | 朽 2827 | 杀 2828 | 杂 2829 | 权 2830 | 杆 2831 | 杈 2832 | 杉 2833 | 李 2834 | 杏 2835 | 材 2836 | 村 2837 | 杓 2838 | 杖 2839 | 杜 2840 | 杞 2841 | 束 2842 | 杠 2843 | 条 2844 | 来 2845 | 杨 2846 | 杭 2847 | 杯 2848 | 杰 2849 | 杳 2850 | 杵 2851 | 杷 2852 | 杼 2853 | 松 2854 | 板 2855 | 极 2856 | 构 2857 | 枇 2858 | 枉 2859 | 枋 2860 | 析 2861 | 枕 2862 | 林 2863 | 枚 2864 | 果 2865 | 枝 2866 | 枢 2867 | 枣 2868 | 枪 2869 | 枫 2870 | 枭 2871 | 枯 2872 | 枰 2873 | 枱 2874 | 枳 2875 | 架 2876 | 枷 2877 | 枸 2878 | 柄 2879 | 柏 2880 | 某 2881 | 柑 2882 | 柒 2883 | 染 2884 | 柔 2885 | 柘 2886 | 柚 2887 | 柜 2888 | 柞 2889 | 柠 2890 | 柢 2891 | 查 2892 | 柩 2893 | 柬 2894 | 柯 2895 | 柱 2896 | 柳 2897 | 柴 2898 | 査 2899 | 柿 2900 | 栀 2901 | 栃 2902 | 栄 2903 | 栅 2904 | 标 2905 | 栈 2906 | 栉 2907 | 栋 2908 | 栎 2909 | 栏 2910 | 树 2911 | 栓 2912 | 栖 2913 | 栗 2914 | 校 2915 | 栩 2916 | 株 2917 | 样 2918 | 核 2919 | 根 2920 | 格 2921 | 栽 2922 | 栾 2923 | 桀 2924 | 桁 2925 | 桂 2926 | 桃 2927 | 桅 2928 | 框 2929 | 案 2930 | 桉 2931 | 桌 2932 | 桎 2933 | 桐 2934 | 桑 2935 | 桓 2936 | 桔 2937 | 桜 2938 | 桠 2939 | 桡 2940 | 桢 2941 | 档 2942 | 桥 2943 | 桦 2944 | 桧 2945 | 桨 2946 | 桩 2947 | 桶 2948 | 梁 2949 | 梅 2950 | 梆 2951 | 梏 2952 | 梓 2953 | 梗 2954 | 梢 2955 | 梦 2956 | 梧 2957 | 梨 2958 | 梭 2959 | 梯 2960 | 械 2961 | 梳 2962 | 梵 2963 | 梶 2964 | 检 2965 | 棂 2966 | 棉 2967 | 棋 2968 | 棍 2969 | 棒 2970 | 棕 2971 | 棘 2972 | 棚 2973 | 棠 2974 | 棣 2975 | 森 2976 | 棱 2977 | 棵 2978 | 棹 2979 | 棺 2980 | 椁 2981 | 椅 2982 | 椋 2983 | 植 2984 | 椎 2985 | 椒 2986 | 検 2987 | 椪 2988 | 椭 2989 | 椰 2990 | 椹 2991 | 椽 2992 | 椿 2993 | 楂 2994 | 楔 2995 | 楚 2996 | 楝 2997 | 楞 2998 | 楠 2999 | 楣 3000 | 楫 3001 | 楮 3002 | 楷 3003 | 楸 3004 | 楹 3005 | 楼 3006 | 楽 3007 | 概 3008 | 榄 3009 | 榆 3010 | 榈 3011 | 榉 3012 | 榔 3013 | 榕 3014 | 榖 3015 | 榛 3016 | 榜 3017 | 榨 3018 | 榫 3019 | 榭 3020 | 榱 3021 | 榴 3022 | 榷 3023 | 榻 3024 | 槁 3025 | 槃 3026 | 槌 3027 | 槎 3028 | 槐 3029 | 槓 3030 | 様 3031 | 槛 3032 | 槟 3033 | 槭 3034 | 槲 3035 | 槻 3036 | 槽 3037 | 槿 3038 | 樊 3039 | 樑 3040 | 樟 3041 | 模 3042 | 権 3043 | 横 3044 | 樫 3045 | 樯 3046 | 樱 3047 | 樵 3048 | 樽 3049 | 樾 3050 | 橄 3051 | 橇 3052 | 橐 3053 | 橘 3054 | 橙 3055 | 橡 3056 | 橱 3057 | 橹 3058 | 橼 3059 | 檀 3060 | 檄 3061 | 檎 3062 | 檐 3063 | 檗 3064 | 檬 3065 | 欠 3066 | 次 3067 | 欢 3068 | 欣 3069 | 欧 3070 | 欲 3071 | 欸 3072 | 欺 3073 | 款 3074 | 歆 3075 | 歇 3076 | 歉 3077 | 歌 3078 | 歎 3079 | 歓 3080 | 歙 3081 | 歛 3082 | 止 3083 | 正 3084 | 此 3085 | 步 3086 | 武 3087 | 歧 3088 | 歩 3089 | 歪 3090 | 歯 3091 | 歳 3092 | 歴 3093 | 歹 3094 | 死 3095 | 歼 3096 | 殁 3097 | 殃 3098 | 殆 3099 | 殇 3100 | 殉 3101 | 殊 3102 | 残 3103 | 殒 3104 | 殓 3105 | 殖 3106 | 殡 3107 | 殭 3108 | 殴 3109 | 段 3110 | 殷 3111 | 殿 3112 | 毁 3113 | 毂 3114 | 毅 3115 | 毋 3116 | 母 3117 | 毎 3118 | 每 3119 | 毒 3120 | 毓 3121 | 比 3122 | 毕 3123 | 毗 3124 | 毘 3125 | 毙 3126 | 毛 3127 | 毡 3128 | 毫 3129 | 毯 3130 | 毽 3131 | 氏 3132 | 氐 3133 | 民 3134 | 氓 3135 | 气 3136 | 氖 3137 | 気 3138 | 氙 3139 | 氛 3140 | 氟 3141 | 氡 3142 | 氢 3143 | 氤 3144 | 氦 3145 | 氧 3146 | 氨 3147 | 氪 3148 | 氮 3149 | 氯 3150 | 氰 3151 | 氲 3152 | 水 3153 | 氷 3154 | 永 3155 | 氹 3156 | 氾 3157 | 汀 3158 | 汁 3159 | 求 3160 | 汆 3161 | 汇 3162 | 汉 3163 | 汎 3164 | 汐 3165 | 汕 3166 | 汗 3167 | 汛 3168 | 汝 3169 | 汞 3170 | 江 3171 | 池 3172 | 污 3173 | 汤 3174 | 汨 3175 | 汩 3176 | 汪 3177 | 汰 3178 | 汲 3179 | 汴 3180 | 汶 3181 | 汹 3182 | 汽 3183 | 汾 3184 | 沁 3185 | 沂 3186 | 沃 3187 | 沅 3188 | 沈 3189 | 沉 3190 | 沌 3191 | 沏 3192 | 沐 3193 | 沓 3194 | 沙 3195 | 沛 3196 | 沟 3197 | 没 3198 | 沢 3199 | 沣 3200 | 沥 3201 | 沦 3202 | 沧 3203 | 沪 3204 | 沫 3205 | 沭 3206 | 沮 3207 | 沱 3208 | 河 3209 | 沸 3210 | 油 3211 | 治 3212 | 沼 3213 | 沽 3214 | 沾 3215 | 沿 3216 | 泄 3217 | 泉 3218 | 泊 3219 | 泌 3220 | 泓 3221 | 法 3222 | 泗 3223 | 泛 3224 | 泞 3225 | 泠 3226 | 泡 3227 | 波 3228 | 泣 3229 | 泥 3230 | 注 3231 | 泪 3232 | 泫 3233 | 泮 3234 | 泯 3235 | 泰 3236 | 泱 3237 | 泳 3238 | 泵 3239 | 泷 3240 | 泸 3241 | 泻 3242 | 泼 3243 | 泽 3244 | 泾 3245 | 洁 3246 | 洄 3247 | 洋 3248 | 洒 3249 | 洗 3250 | 洙 3251 | 洛 3252 | 洞 3253 | 津 3254 | 洩 3255 | 洪 3256 | 洮 3257 | 洱 3258 | 洲 3259 | 洵 3260 | 洸 3261 | 洹 3262 | 活 3263 | 洼 3264 | 洽 3265 | 派 3266 | 流 3267 | 浃 3268 | 浄 3269 | 浅 3270 | 浆 3271 | 浇 3272 | 浊 3273 | 测 3274 | 济 3275 | 浏 3276 | 浑 3277 | 浒 3278 | 浓 3279 | 浔 3280 | 浙 3281 | 浚 3282 | 浜 3283 | 浣 3284 | 浦 3285 | 浩 3286 | 浪 3287 | 浬 3288 | 浮 3289 | 浯 3290 | 浴 3291 | 海 3292 | 浸 3293 | 涂 3294 | 涅 3295 | 消 3296 | 涉 3297 | 涌 3298 | 涎 3299 | 涓 3300 | 涔 3301 | 涕 3302 | 涙 3303 | 涛 3304 | 涝 3305 | 涞 3306 | 涟 3307 | 涠 3308 | 涡 3309 | 涣 3310 | 涤 3311 | 润 3312 | 涧 3313 | 涨 3314 | 涩 3315 | 涪 3316 | 涮 3317 | 涯 3318 | 液 3319 | 涵 3320 | 涸 3321 | 涿 3322 | 淀 3323 | 淄 3324 | 淅 3325 | 淆 3326 | 淇 3327 | 淋 3328 | 淌 3329 | 淑 3330 | 淖 3331 | 淘 3332 | 淙 3333 | 淞 3334 | 淡 3335 | 淤 3336 | 淦 3337 | 淫 3338 | 淬 3339 | 淮 3340 | 深 3341 | 淳 3342 | 混 3343 | 淹 3344 | 添 3345 | 淼 3346 | 清 3347 | 済 3348 | 渉 3349 | 渊 3350 | 渋 3351 | 渍 3352 | 渎 3353 | 渐 3354 | 渔 3355 | 渗 3356 | 渚 3357 | 渝 3358 | 渠 3359 | 渡 3360 | 渣 3361 | 渤 3362 | 渥 3363 | 温 3364 | 渭 3365 | 港 3366 | 渲 3367 | 渴 3368 | 游 3369 | 渺 3370 | 湃 3371 | 湄 3372 | 湍 3373 | 湖 3374 | 湘 3375 | 湛 3376 | 湟 3377 | 湧 3378 | 湫 3379 | 湮 3380 | 湳 3381 | 湾 3382 | 湿 3383 | 満 3384 | 溃 3385 | 溅 3386 | 溉 3387 | 溏 3388 | 源 3389 | 溜 3390 | 溟 3391 | 溢 3392 | 溥 3393 | 溧 3394 | 溪 3395 | 溯 3396 | 溱 3397 | 溴 3398 | 溶 3399 | 溺 3400 | 溼 3401 | 滁 3402 | 滂 3403 | 滇 3404 | 滋 3405 | 滑 3406 | 滓 3407 | 滔 3408 | 滕 3409 | 滙 3410 | 滚 3411 | 滝 3412 | 滞 3413 | 滟 3414 | 满 3415 | 滢 3416 | 滤 3417 | 滥 3418 | 滦 3419 | 滨 3420 | 滩 3421 | 滴 3422 | 漂 3423 | 漆 3424 | 漉 3425 | 漏 3426 | 漓 3427 | 演 3428 | 漕 3429 | 漠 3430 | 漩 3431 | 漪 3432 | 漫 3433 | 漯 3434 | 漱 3435 | 漳 3436 | 漾 3437 | 潆 3438 | 潇 3439 | 潋 3440 | 潍 3441 | 潘 3442 | 潜 3443 | 潞 3444 | 潟 3445 | 潢 3446 | 潦 3447 | 潧 3448 | 潭 3449 | 潮 3450 | 潴 3451 | 潸 3452 | 潺 3453 | 潼 3454 | 澄 3455 | 澈 3456 | 澍 3457 | 澎 3458 | 澜 3459 | 澡 3460 | 澧 3461 | 澳 3462 | 澹 3463 | 激 3464 | 濂 3465 | 濑 3466 | 濒 3467 | 濠 3468 | 濡 3469 | 濬 3470 | 濮 3471 | 濯 3472 | 瀑 3473 | 瀚 3474 | 瀛 3475 | 瀞 3476 | 瀬 3477 | 灌 3478 | 灏 3479 | 灞 3480 | 火 3481 | 灬 3482 | 灭 3483 | 灯 3484 | 灰 3485 | 灵 3486 | 灶 3487 | 灸 3488 | 灼 3489 | 灾 3490 | 灿 3491 | 炀 3492 | 炁 3493 | 炅 3494 | 炉 3495 | 炊 3496 | 炎 3497 | 炒 3498 | 炔 3499 | 炕 3500 | 炖 3501 | 炙 3502 | 炜 3503 | 炫 3504 | 炬 3505 | 炭 3506 | 炮 3507 | 炯 3508 | 炳 3509 | 炷 3510 | 炸 3511 | 点 3512 | 炼 3513 | 炽 3514 | 烁 3515 | 烂 3516 | 烃 3517 | 烈 3518 | 烊 3519 | 烘 3520 | 烙 3521 | 烛 3522 | 烟 3523 | 烤 3524 | 烦 3525 | 烧 3526 | 烨 3527 | 烩 3528 | 烫 3529 | 烬 3530 | 热 3531 | 烯 3532 | 烷 3533 | 烹 3534 | 烽 3535 | 焉 3536 | 焊 3537 | 焕 3538 | 焖 3539 | 焗 3540 | 焘 3541 | 焙 3542 | 焚 3543 | 焜 3544 | 焦 3545 | 焯 3546 | 焰 3547 | 焱 3548 | 然 3549 | 焼 3550 | 煅 3551 | 煊 3552 | 煌 3553 | 煎 3554 | 煖 3555 | 煜 3556 | 煞 3557 | 煤 3558 | 煦 3559 | 照 3560 | 煨 3561 | 煮 3562 | 煲 3563 | 煸 3564 | 煽 3565 | 熄 3566 | 熊 3567 | 熏 3568 | 熔 3569 | 熙 3570 | 熟 3571 | 熠 3572 | 熨 3573 | 熬 3574 | 熵 3575 | 熹 3576 | 燃 3577 | 燄 3578 | 燊 3579 | 燎 3580 | 燔 3581 | 燕 3582 | 燥 3583 | 燧 3584 | 燮 3585 | 燻 3586 | 燿 3587 | 爆 3588 | 爪 3589 | 爬 3590 | 爰 3591 | 爱 3592 | 爵 3593 | 父 3594 | 爷 3595 | 爸 3596 | 爹 3597 | 爻 3598 | 爽 3599 | 片 3600 | 版 3601 | 牌 3602 | 牍 3603 | 牒 3604 | 牙 3605 | 牛 3606 | 牝 3607 | 牟 3608 | 牠 3609 | 牡 3610 | 牢 3611 | 牦 3612 | 牧 3613 | 物 3614 | 牯 3615 | 牲 3616 | 牴 3617 | 牵 3618 | 特 3619 | 牺 3620 | 犀 3621 | 犁 3622 | 犄 3623 | 犊 3624 | 犍 3625 | 犒 3626 | 犬 3627 | 犯 3628 | 状 3629 | 犷 3630 | 犸 3631 | 犹 3632 | 狂 3633 | 狄 3634 | 狈 3635 | 狎 3636 | 狐 3637 | 狒 3638 | 狗 3639 | 狙 3640 | 狞 3641 | 狠 3642 | 狡 3643 | 狩 3644 | 独 3645 | 狭 3646 | 狮 3647 | 狰 3648 | 狱 3649 | 狸 3650 | 狼 3651 | 猎 3652 | 猕 3653 | 猖 3654 | 猗 3655 | 猛 3656 | 猜 3657 | 猝 3658 | 猥 3659 | 猩 3660 | 猪 3661 | 猫 3662 | 猬 3663 | 献 3664 | 猴 3665 | 猷 3666 | 猾 3667 | 猿 3668 | 獐 3669 | 獒 3670 | 獗 3671 | 獠 3672 | 獣 3673 | 獭 3674 | 獾 3675 | 玄 3676 | 率 3677 | 玉 3678 | 王 3679 | 玑 3680 | 玖 3681 | 玛 3682 | 玟 3683 | 玠 3684 | 玥 3685 | 玩 3686 | 玫 3687 | 玮 3688 | 环 3689 | 现 3690 | 玲 3691 | 玳 3692 | 玷 3693 | 玺 3694 | 玻 3695 | 珀 3696 | 珂 3697 | 珅 3698 | 珈 3699 | 珉 3700 | 珊 3701 | 珍 3702 | 珏 3703 | 珐 3704 | 珑 3705 | 珙 3706 | 珞 3707 | 珠 3708 | 珣 3709 | 珥 3710 | 珩 3711 | 珪 3712 | 班 3713 | 珮 3714 | 珲 3715 | 珺 3716 | 球 3717 | 琅 3718 | 理 3719 | 琇 3720 | 琉 3721 | 琊 3722 | 琍 3723 | 琏 3724 | 琐 3725 | 琛 3726 | 琢 3727 | 琥 3728 | 琦 3729 | 琨 3730 | 琪 3731 | 琬 3732 | 琮 3733 | 琰 3734 | 琲 3735 | 琳 3736 | 琴 3737 | 琵 3738 | 琶 3739 | 琼 3740 | 瑀 3741 | 瑁 3742 | 瑄 3743 | 瑕 3744 | 瑗 3745 | 瑙 3746 | 瑚 3747 | 瑛 3748 | 瑜 3749 | 瑞 3750 | 瑟 3751 | 瑠 3752 | 瑯 3753 | 瑰 3754 | 瑶 3755 | 瑾 3756 | 璀 3757 | 璁 3758 | 璃 3759 | 璇 3760 | 璋 3761 | 璎 3762 | 璐 3763 | 璜 3764 | 璞 3765 | 璟 3766 | 璧 3767 | 璨 3768 | 璿 3769 | 瓒 3770 | 瓜 3771 | 瓢 3772 | 瓣 3773 | 瓤 3774 | 瓦 3775 | 瓮 3776 | 瓯 3777 | 瓴 3778 | 瓶 3779 | 瓷 3780 | 甄 3781 | 甕 3782 | 甘 3783 | 甙 3784 | 甚 3785 | 甜 3786 | 生 3787 | 甥 3788 | 甦 3789 | 用 3790 | 甩 3791 | 甫 3792 | 甬 3793 | 甭 3794 | 甯 3795 | 田 3796 | 由 3797 | 甲 3798 | 申 3799 | 电 3800 | 男 3801 | 甸 3802 | 町 3803 | 画 3804 | 甾 3805 | 畀 3806 | 畅 3807 | 界 3808 | 畏 3809 | 畑 3810 | 畔 3811 | 留 3812 | 畜 3813 | 略 3814 | 畦 3815 | 番 3816 | 畲 3817 | 畳 3818 | 畴 3819 | 畸 3820 | 畹 3821 | 畿 3822 | 疆 3823 | 疏 3824 | 疑 3825 | 疔 3826 | 疖 3827 | 疗 3828 | 疙 3829 | 疚 3830 | 疝 3831 | 疟 3832 | 疡 3833 | 疣 3834 | 疤 3835 | 疥 3836 | 疫 3837 | 疮 3838 | 疯 3839 | 疱 3840 | 疲 3841 | 疳 3842 | 疵 3843 | 疸 3844 | 疹 3845 | 疼 3846 | 疽 3847 | 疾 3848 | 痂 3849 | 病 3850 | 症 3851 | 痈 3852 | 痉 3853 | 痊 3854 | 痍 3855 | 痒 3856 | 痔 3857 | 痕 3858 | 痘 3859 | 痛 3860 | 痞 3861 | 痠 3862 | 痢 3863 | 痣 3864 | 痤 3865 | 痧 3866 | 痨 3867 | 痪 3868 | 痫 3869 | 痰 3870 | 痱 3871 | 痴 3872 | 痹 3873 | 痺 3874 | 痼 3875 | 痿 3876 | 瘀 3877 | 瘁 3878 | 瘘 3879 | 瘙 3880 | 瘟 3881 | 瘠 3882 | 瘢 3883 | 瘤 3884 | 瘦 3885 | 瘩 3886 | 瘪 3887 | 瘫 3888 | 瘴 3889 | 瘸 3890 | 瘾 3891 | 癌 3892 | 癒 3893 | 癖 3894 | 癜 3895 | 癞 3896 | 癡 3897 | 癣 3898 | 癫 3899 | 癸 3900 | 発 3901 | 登 3902 | 白 3903 | 百 3904 | 皂 3905 | 的 3906 | 皆 3907 | 皇 3908 | 皈 3909 | 皋 3910 | 皎 3911 | 皑 3912 | 皓 3913 | 皖 3914 | 皙 3915 | 皮 3916 | 皱 3917 | 皴 3918 | 皿 3919 | 盂 3920 | 盅 3921 | 盆 3922 | 盈 3923 | 益 3924 | 盎 3925 | 盏 3926 | 盐 3927 | 监 3928 | 盒 3929 | 盔 3930 | 盖 3931 | 盗 3932 | 盘 3933 | 盛 3934 | 盟 3935 | 盥 3936 | 目 3937 | 盯 3938 | 盱 3939 | 盲 3940 | 直 3941 | 相 3942 | 盹 3943 | 盼 3944 | 盾 3945 | 省 3946 | 眈 3947 | 眉 3948 | 看 3949 | 県 3950 | 眙 3951 | 眞 3952 | 真 3953 | 眠 3954 | 眦 3955 | 眨 3956 | 眩 3957 | 眯 3958 | 眶 3959 | 眷 3960 | 眸 3961 | 眺 3962 | 眼 3963 | 着 3964 | 睁 3965 | 睇 3966 | 睐 3967 | 睑 3968 | 睛 3969 | 睡 3970 | 睢 3971 | 督 3972 | 睥 3973 | 睦 3974 | 睨 3975 | 睪 3976 | 睫 3977 | 睬 3978 | 睹 3979 | 睽 3980 | 睾 3981 | 睿 3982 | 瞄 3983 | 瞅 3984 | 瞇 3985 | 瞋 3986 | 瞌 3987 | 瞎 3988 | 瞑 3989 | 瞒 3990 | 瞓 3991 | 瞟 3992 | 瞠 3993 | 瞥 3994 | 瞧 3995 | 瞩 3996 | 瞪 3997 | 瞬 3998 | 瞰 3999 | 瞳 4000 | 瞻 4001 | 瞿 4002 | 矍 4003 | 矗 4004 | 矛 4005 | 矜 4006 | 矢 4007 | 矣 4008 | 知 4009 | 矩 4010 | 矫 4011 | 短 4012 | 矮 4013 | 石 4014 | 矶 4015 | 矽 4016 | 矾 4017 | 矿 4018 | 码 4019 | 砂 4020 | 砌 4021 | 砍 4022 | 砒 4023 | 研 4024 | 砖 4025 | 砗 4026 | 砚 4027 | 砝 4028 | 砣 4029 | 砥 4030 | 砧 4031 | 砭 4032 | 砰 4033 | 砲 4034 | 破 4035 | 砷 4036 | 砸 4037 | 砺 4038 | 砼 4039 | 砾 4040 | 础 4041 | 硅 4042 | 硐 4043 | 硒 4044 | 硕 4045 | 硝 4046 | 硫 4047 | 硬 4048 | 确 4049 | 硼 4050 | 碁 4051 | 碇 4052 | 碉 4053 | 碌 4054 | 碍 4055 | 碎 4056 | 碑 4057 | 碓 4058 | 碗 4059 | 碘 4060 | 碚 4061 | 碛 4062 | 碟 4063 | 碣 4064 | 碧 4065 | 碰 4066 | 碱 4067 | 碳 4068 | 碴 4069 | 碾 4070 | 磁 4071 | 磅 4072 | 磊 4073 | 磋 4074 | 磐 4075 | 磕 4076 | 磡 4077 | 磨 4078 | 磬 4079 | 磲 4080 | 磷 4081 | 磺 4082 | 礁 4083 | 礡 4084 | 礴 4085 | 示 4086 | 礼 4087 | 社 4088 | 祀 4089 | 祁 4090 | 祂 4091 | 祇 4092 | 祈 4093 | 祉 4094 | 祎 4095 | 祐 4096 | 祕 4097 | 祖 4098 | 祗 4099 | 祚 4100 | 祛 4101 | 祜 4102 | 祝 4103 | 神 4104 | 祟 4105 | 祠 4106 | 祢 4107 | 祥 4108 | 票 4109 | 祭 4110 | 祯 4111 | 祷 4112 | 祸 4113 | 祺 4114 | 禀 4115 | 禁 4116 | 禄 4117 | 禅 4118 | 福 4119 | 禛 4120 | 禧 4121 | 禹 4122 | 禺 4123 | 离 4124 | 禽 4125 | 禾 4126 | 秀 4127 | 私 4128 | 秃 4129 | 秆 4130 | 秉 4131 | 秋 4132 | 种 4133 | 科 4134 | 秒 4135 | 秘 4136 | 租 4137 | 秣 4138 | 秤 4139 | 秦 4140 | 秧 4141 | 秩 4142 | 秭 4143 | 积 4144 | 称 4145 | 秸 4146 | 移 4147 | 秽 4148 | 稀 4149 | 程 4150 | 稍 4151 | 税 4152 | 稔 4153 | 稗 4154 | 稚 4155 | 稜 4156 | 稞 4157 | 稠 4158 | 稣 4159 | 稲 4160 | 稳 4161 | 稷 4162 | 稹 4163 | 稻 4164 | 稼 4165 | 稽 4166 | 稿 4167 | 穂 4168 | 穆 4169 | 穗 4170 | 穴 4171 | 究 4172 | 穷 4173 | 穹 4174 | 空 4175 | 穿 4176 | 突 4177 | 窃 4178 | 窄 4179 | 窈 4180 | 窍 4181 | 窑 4182 | 窒 4183 | 窓 4184 | 窕 4185 | 窖 4186 | 窗 4187 | 窘 4188 | 窜 4189 | 窝 4190 | 窟 4191 | 窠 4192 | 窥 4193 | 窦 4194 | 窨 4195 | 窿 4196 | 立 4197 | 竖 4198 | 站 4199 | 竜 4200 | 竞 4201 | 竟 4202 | 章 4203 | 竣 4204 | 童 4205 | 竭 4206 | 端 4207 | 竹 4208 | 竺 4209 | 竽 4210 | 竿 4211 | 笃 4212 | 笆 4213 | 笈 4214 | 笋 4215 | 笏 4216 | 笑 4217 | 笔 4218 | 笙 4219 | 笛 4220 | 笞 4221 | 笠 4222 | 符 4223 | 笨 4224 | 第 4225 | 笹 4226 | 笺 4227 | 笼 4228 | 等 4229 | 筊 4230 | 筋 4231 | 筏 4232 | 筐 4233 | 筑 4234 | 筒 4235 | 答 4236 | 策 4237 | 筛 4238 | 筝 4239 | 筠 4240 | 筱 4241 | 筲 4242 | 筵 4243 | 筷 4244 | 筹 4245 | 签 4246 | 简 4247 | 箇 4248 | 箍 4249 | 箐 4250 | 箔 4251 | 箕 4252 | 算 4253 | 箝 4254 | 管 4255 | 箩 4256 | 箫 4257 | 箭 4258 | 箱 4259 | 箴 4260 | 箸 4261 | 篁 4262 | 篆 4263 | 篇 4264 | 篑 4265 | 篓 4266 | 篙 4267 | 篝 4268 | 篠 4269 | 篡 4270 | 篪 4271 | 篮 4272 | 篱 4273 | 篷 4274 | 簇 4275 | 簌 4276 | 簦 4277 | 簧 4278 | 簪 4279 | 簷 4280 | 簸 4281 | 簿 4282 | 籁 4283 | 籍 4284 | 籐 4285 | 籤 4286 | 米 4287 | 类 4288 | 籼 4289 | 籽 4290 | 粄 4291 | 粉 4292 | 粑 4293 | 粒 4294 | 粕 4295 | 粗 4296 | 粘 4297 | 粟 4298 | 粤 4299 | 粥 4300 | 粧 4301 | 粪 4302 | 粮 4303 | 粱 4304 | 粲 4305 | 粳 4306 | 粹 4307 | 粼 4308 | 粽 4309 | 精 4310 | 粿 4311 | 糅 4312 | 糊 4313 | 糍 4314 | 糕 4315 | 糖 4316 | 糗 4317 | 糙 4318 | 糜 4319 | 糟 4320 | 糠 4321 | 糬 4322 | 糯 4323 | 糸 4324 | 系 4325 | 紊 4326 | 素 4327 | 索 4328 | 紧 4329 | 紫 4330 | 紮 4331 | 累 4332 | 絃 4333 | 経 4334 | 絮 4335 | 絵 4336 | 綑 4337 | 継 4338 | 続 4339 | 綦 4340 | 総 4341 | 縁 4342 | 縄 4343 | 繁 4344 | 繇 4345 | 繋 4346 | 纂 4347 | 纠 4348 | 红 4349 | 纣 4350 | 纤 4351 | 约 4352 | 级 4353 | 纨 4354 | 纪 4355 | 纫 4356 | 纬 4357 | 纭 4358 | 纯 4359 | 纰 4360 | 纱 4361 | 纲 4362 | 纳 4363 | 纵 4364 | 纶 4365 | 纷 4366 | 纸 4367 | 纹 4368 | 纺 4369 | 纽 4370 | 纾 4371 | 线 4372 | 绀 4373 | 练 4374 | 组 4375 | 绅 4376 | 细 4377 | 织 4378 | 终 4379 | 绊 4380 | 绍 4381 | 绎 4382 | 经 4383 | 绑 4384 | 绒 4385 | 结 4386 | 绔 4387 | 绕 4388 | 绘 4389 | 给 4390 | 绚 4391 | 绛 4392 | 络 4393 | 绝 4394 | 绞 4395 | 统 4396 | 绡 4397 | 绢 4398 | 绣 4399 | 绥 4400 | 绦 4401 | 继 4402 | 绩 4403 | 绪 4404 | 绫 4405 | 续 4406 | 绮 4407 | 绯 4408 | 绰 4409 | 绳 4410 | 维 4411 | 绵 4412 | 绶 4413 | 绷 4414 | 绸 4415 | 绻 4416 | 综 4417 | 绽 4418 | 绾 4419 | 绿 4420 | 缀 4421 | 缄 4422 | 缅 4423 | 缆 4424 | 缇 4425 | 缈 4426 | 缉 4427 | 缎 4428 | 缓 4429 | 缔 4430 | 缕 4431 | 编 4432 | 缘 4433 | 缙 4434 | 缚 4435 | 缜 4436 | 缝 4437 | 缠 4438 | 缢 4439 | 缤 4440 | 缥 4441 | 缨 4442 | 缩 4443 | 缪 4444 | 缭 4445 | 缮 4446 | 缰 4447 | 缱 4448 | 缴 4449 | 缸 4450 | 缺 4451 | 罂 4452 | 罄 4453 | 罐 4454 | 网 4455 | 罔 4456 | 罕 4457 | 罗 4458 | 罚 4459 | 罡 4460 | 罢 4461 | 罩 4462 | 罪 4463 | 置 4464 | 署 4465 | 罹 4466 | 羁 4467 | 羊 4468 | 羌 4469 | 美 4470 | 羔 4471 | 羚 4472 | 羞 4473 | 羟 4474 | 羡 4475 | 羣 4476 | 群 4477 | 羧 4478 | 羨 4479 | 羯 4480 | 羲 4481 | 羸 4482 | 羹 4483 | 羽 4484 | 羿 4485 | 翁 4486 | 翅 4487 | 翊 4488 | 翌 4489 | 翎 4490 | 翔 4491 | 翘 4492 | 翟 4493 | 翠 4494 | 翡 4495 | 翦 4496 | 翩 4497 | 翰 4498 | 翱 4499 | 翳 4500 | 翻 4501 | 翼 4502 | 耀 4503 | 老 4504 | 考 4505 | 耄 4506 | 者 4507 | 耆 4508 | 耋 4509 | 而 4510 | 耍 4511 | 耐 4512 | 耒 4513 | 耕 4514 | 耗 4515 | 耘 4516 | 耙 4517 | 耦 4518 | 耨 4519 | 耳 4520 | 耶 4521 | 耷 4522 | 耸 4523 | 耻 4524 | 耽 4525 | 耿 4526 | 聂 4527 | 聆 4528 | 聊 4529 | 聋 4530 | 职 4531 | 聒 4532 | 联 4533 | 聘 4534 | 聚 4535 | 聪 4536 | 聴 4537 | 聿 4538 | 肃 4539 | 肄 4540 | 肆 4541 | 肇 4542 | 肉 4543 | 肋 4544 | 肌 4545 | 肏 4546 | 肓 4547 | 肖 4548 | 肘 4549 | 肚 4550 | 肛 4551 | 肝 4552 | 肠 4553 | 股 4554 | 肢 4555 | 肤 4556 | 肥 4557 | 肩 4558 | 肪 4559 | 肮 4560 | 肯 4561 | 肱 4562 | 育 4563 | 肴 4564 | 肺 4565 | 肽 4566 | 肾 4567 | 肿 4568 | 胀 4569 | 胁 4570 | 胃 4571 | 胄 4572 | 胆 4573 | 背 4574 | 胍 4575 | 胎 4576 | 胖 4577 | 胚 4578 | 胛 4579 | 胜 4580 | 胝 4581 | 胞 4582 | 胡 4583 | 胤 4584 | 胥 4585 | 胧 4586 | 胫 4587 | 胭 4588 | 胯 4589 | 胰 4590 | 胱 4591 | 胳 4592 | 胴 4593 | 胶 4594 | 胸 4595 | 胺 4596 | 能 4597 | 脂 4598 | 脆 4599 | 脇 4600 | 脉 4601 | 脊 4602 | 脍 4603 | 脏 4604 | 脐 4605 | 脑 4606 | 脓 4607 | 脖 4608 | 脘 4609 | 脚 4610 | 脣 4611 | 脩 4612 | 脯 4613 | 脱 4614 | 脲 4615 | 脳 4616 | 脸 4617 | 脾 4618 | 腆 4619 | 腈 4620 | 腊 4621 | 腋 4622 | 腌 4623 | 腐 4624 | 腑 4625 | 腓 4626 | 腔 4627 | 腕 4628 | 腥 4629 | 腩 4630 | 腭 4631 | 腮 4632 | 腰 4633 | 腱 4634 | 腴 4635 | 腹 4636 | 腺 4637 | 腻 4638 | 腼 4639 | 腾 4640 | 腿 4641 | 膀 4642 | 膈 4643 | 膊 4644 | 膏 4645 | 膑 4646 | 膘 4647 | 膛 4648 | 膜 4649 | 膝 4650 | 膦 4651 | 膨 4652 | 膳 4653 | 膺 4654 | 膻 4655 | 臀 4656 | 臂 4657 | 臃 4658 | 臆 4659 | 臊 4660 | 臓 4661 | 臣 4662 | 臧 4663 | 自 4664 | 臬 4665 | 臭 4666 | 至 4667 | 致 4668 | 臻 4669 | 臼 4670 | 臾 4671 | 舀 4672 | 舂 4673 | 舅 4674 | 舆 4675 | 舌 4676 | 舍 4677 | 舎 4678 | 舐 4679 | 舒 4680 | 舔 4681 | 舖 4682 | 舗 4683 | 舛 4684 | 舜 4685 | 舞 4686 | 舟 4687 | 航 4688 | 舫 4689 | 般 4690 | 舰 4691 | 舱 4692 | 舵 4693 | 舶 4694 | 舷 4695 | 舸 4696 | 船 4697 | 舺 4698 | 舾 4699 | 艇 4700 | 艋 4701 | 艘 4702 | 艮 4703 | 良 4704 | 艰 4705 | 色 4706 | 艳 4707 | 艹 4708 | 艺 4709 | 艾 4710 | 节 4711 | 芃 4712 | 芈 4713 | 芊 4714 | 芋 4715 | 芍 4716 | 芎 4717 | 芒 4718 | 芙 4719 | 芜 4720 | 芝 4721 | 芡 4722 | 芥 4723 | 芦 4724 | 芩 4725 | 芪 4726 | 芫 4727 | 芬 4728 | 芭 4729 | 芮 4730 | 芯 4731 | 花 4732 | 芳 4733 | 芷 4734 | 芸 4735 | 芹 4736 | 芽 4737 | 芾 4738 | 苁 4739 | 苄 4740 | 苇 4741 | 苋 4742 | 苍 4743 | 苏 4744 | 苑 4745 | 苒 4746 | 苓 4747 | 苔 4748 | 苕 4749 | 苗 4750 | 苛 4751 | 苜 4752 | 苞 4753 | 苟 4754 | 苡 4755 | 苣 4756 | 若 4757 | 苦 4758 | 苫 4759 | 苯 4760 | 英 4761 | 苷 4762 | 苹 4763 | 苻 4764 | 茁 4765 | 茂 4766 | 范 4767 | 茄 4768 | 茅 4769 | 茉 4770 | 茎 4771 | 茏 4772 | 茗 4773 | 茜 4774 | 茧 4775 | 茨 4776 | 茫 4777 | 茬 4778 | 茭 4779 | 茯 4780 | 茱 4781 | 茴 4782 | 茵 4783 | 茶 4784 | 茸 4785 | 茹 4786 | 茼 4787 | 荀 4788 | 荃 4789 | 荆 4790 | 草 4791 | 荏 4792 | 荐 4793 | 荒 4794 | 荔 4795 | 荖 4796 | 荘 4797 | 荚 4798 | 荞 4799 | 荟 4800 | 荠 4801 | 荡 4802 | 荣 4803 | 荤 4804 | 荥 4805 | 荧 4806 | 荨 4807 | 荪 4808 | 荫 4809 | 药 4810 | 荳 4811 | 荷 4812 | 荸 4813 | 荻 4814 | 荼 4815 | 荽 4816 | 莅 4817 | 莆 4818 | 莉 4819 | 莎 4820 | 莒 4821 | 莓 4822 | 莘 4823 | 莞 4824 | 莠 4825 | 莪 4826 | 莫 4827 | 莱 4828 | 莲 4829 | 莴 4830 | 获 4831 | 莹 4832 | 莺 4833 | 莽 4834 | 莿 4835 | 菀 4836 | 菁 4837 | 菅 4838 | 菇 4839 | 菈 4840 | 菊 4841 | 菌 4842 | 菏 4843 | 菓 4844 | 菖 4845 | 菘 4846 | 菜 4847 | 菟 4848 | 菠 4849 | 菡 4850 | 菩 4851 | 菱 4852 | 菲 4853 | 菸 4854 | 菽 4855 | 萁 4856 | 萃 4857 | 萄 4858 | 萋 4859 | 萌 4860 | 萍 4861 | 萎 4862 | 萘 4863 | 萝 4864 | 萤 4865 | 营 4866 | 萦 4867 | 萧 4868 | 萨 4869 | 萩 4870 | 萱 4871 | 萸 4872 | 萼 4873 | 落 4874 | 葆 4875 | 葚 4876 | 葛 4877 | 葡 4878 | 董 4879 | 葩 4880 | 葫 4881 | 葬 4882 | 葭 4883 | 葱 4884 | 葳 4885 | 葵 4886 | 葺 4887 | 蒂 4888 | 蒋 4889 | 蒐 4890 | 蒙 4891 | 蒜 4892 | 蒟 4893 | 蒡 4894 | 蒨 4895 | 蒲 4896 | 蒸 4897 | 蒹 4898 | 蒻 4899 | 蒿 4900 | 蓁 4901 | 蓄 4902 | 蓆 4903 | 蓉 4904 | 蓑 4905 | 蓓 4906 | 蓖 4907 | 蓝 4908 | 蓟 4909 | 蓦 4910 | 蓬 4911 | 蓼 4912 | 蓿 4913 | 蔑 4914 | 蔓 4915 | 蔗 4916 | 蔘 4917 | 蔚 4918 | 蔡 4919 | 蔫 4920 | 蔬 4921 | 蔵 4922 | 蔷 4923 | 蔺 4924 | 蔻 4925 | 蔼 4926 | 蔽 4927 | 蕃 4928 | 蕈 4929 | 蕉 4930 | 蕊 4931 | 蕙 4932 | 蕤 4933 | 蕨 4934 | 蕲 4935 | 蕴 4936 | 蕻 4937 | 蕾 4938 | 薄 4939 | 薅 4940 | 薇 4941 | 薏 4942 | 薑 4943 | 薙 4944 | 薛 4945 | 薨 4946 | 薪 4947 | 薬 4948 | 薯 4949 | 薰 4950 | 薹 4951 | 藏 4952 | 藐 4953 | 藓 4954 | 藕 4955 | 藜 4956 | 藤 4957 | 藩 4958 | 藻 4959 | 藿 4960 | 蘑 4961 | 蘸 4962 | 蘼 4963 | 虎 4964 | 虏 4965 | 虐 4966 | 虑 4967 | 虔 4968 | 虚 4969 | 虞 4970 | 虢 4971 | 虫 4972 | 虬 4973 | 虱 4974 | 虹 4975 | 虻 4976 | 虽 4977 | 虾 4978 | 蚀 4979 | 蚁 4980 | 蚂 4981 | 蚊 4982 | 蚌 4983 | 蚓 4984 | 蚕 4985 | 蚜 4986 | 蚝 4987 | 蚣 4988 | 蚤 4989 | 蚩 4990 | 蚪 4991 | 蚯 4992 | 蚱 4993 | 蚵 4994 | 蛀 4995 | 蛆 4996 | 蛇 4997 | 蛊 4998 | 蛋 4999 | 蛎 5000 | 蛐 5001 | 蛔 5002 | 蛙 5003 | 蛛 5004 | 蛟 5005 | 蛤 5006 | 蛭 5007 | 蛮 5008 | 蛰 5009 | 蛳 5010 | 蛹 5011 | 蛾 5012 | 蜀 5013 | 蜂 5014 | 蜃 5015 | 蜇 5016 | 蜈 5017 | 蜊 5018 | 蜍 5019 | 蜒 5020 | 蜓 5021 | 蜕 5022 | 蜗 5023 | 蜘 5024 | 蜚 5025 | 蜜 5026 | 蜡 5027 | 蜢 5028 | 蜥 5029 | 蜱 5030 | 蜴 5031 | 蜷 5032 | 蜻 5033 | 蜿 5034 | 蝇 5035 | 蝈 5036 | 蝉 5037 | 蝌 5038 | 蝎 5039 | 蝗 5040 | 蝙 5041 | 蝠 5042 | 蝨 5043 | 蝴 5044 | 蝶 5045 | 蝼 5046 | 螂 5047 | 螃 5048 | 融 5049 | 螨 5050 | 螯 5051 | 螳 5052 | 螺 5053 | 蟀 5054 | 蟆 5055 | 蟋 5056 | 蟑 5057 | 蟒 5058 | 蟠 5059 | 蟹 5060 | 蟾 5061 | 蠍 5062 | 蠔 5063 | 蠕 5064 | 蠛 5065 | 蠡 5066 | 蠢 5067 | 蠹 5068 | 血 5069 | 衄 5070 | 衅 5071 | 行 5072 | 衍 5073 | 衔 5074 | 街 5075 | 衙 5076 | 衞 5077 | 衡 5078 | 衢 5079 | 衣 5080 | 补 5081 | 表 5082 | 衩 5083 | 衫 5084 | 衬 5085 | 衮 5086 | 衰 5087 | 衲 5088 | 衷 5089 | 衾 5090 | 衿 5091 | 袁 5092 | 袂 5093 | 袄 5094 | 袅 5095 | 袈 5096 | 袋 5097 | 袍 5098 | 袒 5099 | 袖 5100 | 袜 5101 | 袤 5102 | 袪 5103 | 被 5104 | 袭 5105 | 袱 5106 | 裁 5107 | 裂 5108 | 装 5109 | 裆 5110 | 裔 5111 | 裕 5112 | 裘 5113 | 裙 5114 | 裟 5115 | 裤 5116 | 裨 5117 | 裱 5118 | 裳 5119 | 裴 5120 | 裸 5121 | 裹 5122 | 裾 5123 | 褂 5124 | 褐 5125 | 褒 5126 | 褓 5127 | 褔 5128 | 褚 5129 | 褥 5130 | 褪 5131 | 褫 5132 | 褶 5133 | 襁 5134 | 襄 5135 | 襟 5136 | 西 5137 | 要 5138 | 覃 5139 | 覇 5140 | 覚 5141 | 覧 5142 | 観 5143 | 见 5144 | 观 5145 | 规 5146 | 觅 5147 | 视 5148 | 览 5149 | 觉 5150 | 觊 5151 | 觎 5152 | 觐 5153 | 觑 5154 | 角 5155 | 觞 5156 | 解 5157 | 觥 5158 | 触 5159 | 言 5160 | 訳 5161 | 証 5162 | 詹 5163 | 誉 5164 | 誓 5165 | 読 5166 | 諡 5167 | 譁 5168 | 警 5169 | 譬 5170 | 譲 5171 | 讚 5172 | 计 5173 | 订 5174 | 认 5175 | 讥 5176 | 讧 5177 | 讨 5178 | 让 5179 | 讪 5180 | 讫 5181 | 训 5182 | 议 5183 | 讯 5184 | 记 5185 | 讲 5186 | 讳 5187 | 讴 5188 | 讶 5189 | 讷 5190 | 许 5191 | 讹 5192 | 论 5193 | 讼 5194 | 讽 5195 | 设 5196 | 访 5197 | 诀 5198 | 证 5199 | 诃 5200 | 评 5201 | 诅 5202 | 识 5203 | 诈 5204 | 诉 5205 | 诊 5206 | 诋 5207 | 词 5208 | 诏 5209 | 译 5210 | 试 5211 | 诗 5212 | 诘 5213 | 诙 5214 | 诚 5215 | 诛 5216 | 话 5217 | 诞 5218 | 诟 5219 | 诠 5220 | 诡 5221 | 询 5222 | 诣 5223 | 诤 5224 | 该 5225 | 详 5226 | 诧 5227 | 诩 5228 | 诫 5229 | 诬 5230 | 语 5231 | 误 5232 | 诰 5233 | 诱 5234 | 诲 5235 | 说 5236 | 诵 5237 | 诶 5238 | 请 5239 | 诸 5240 | 诺 5241 | 读 5242 | 诽 5243 | 课 5244 | 诿 5245 | 谀 5246 | 谁 5247 | 调 5248 | 谄 5249 | 谅 5250 | 谆 5251 | 谈 5252 | 谊 5253 | 谋 5254 | 谌 5255 | 谍 5256 | 谎 5257 | 谏 5258 | 谐 5259 | 谑 5260 | 谒 5261 | 谓 5262 | 谔 5263 | 谕 5264 | 谗 5265 | 谘 5266 | 谙 5267 | 谚 5268 | 谛 5269 | 谜 5270 | 谟 5271 | 谢 5272 | 谣 5273 | 谤 5274 | 谥 5275 | 谦 5276 | 谧 5277 | 谨 5278 | 谩 5279 | 谪 5280 | 谬 5281 | 谭 5282 | 谯 5283 | 谱 5284 | 谲 5285 | 谴 5286 | 谶 5287 | 谷 5288 | 豁 5289 | 豆 5290 | 豇 5291 | 豉 5292 | 豊 5293 | 豌 5294 | 豔 5295 | 豚 5296 | 象 5297 | 豢 5298 | 豪 5299 | 豫 5300 | 豹 5301 | 豺 5302 | 貂 5303 | 貅 5304 | 貌 5305 | 貔 5306 | 貘 5307 | 贝 5308 | 贞 5309 | 负 5310 | 贡 5311 | 财 5312 | 责 5313 | 贤 5314 | 败 5315 | 账 5316 | 货 5317 | 质 5318 | 贩 5319 | 贪 5320 | 贫 5321 | 贬 5322 | 购 5323 | 贮 5324 | 贯 5325 | 贰 5326 | 贱 5327 | 贲 5328 | 贴 5329 | 贵 5330 | 贷 5331 | 贸 5332 | 费 5333 | 贺 5334 | 贻 5335 | 贼 5336 | 贾 5337 | 贿 5338 | 赁 5339 | 赂 5340 | 赃 5341 | 资 5342 | 赅 5343 | 赈 5344 | 赊 5345 | 赋 5346 | 赌 5347 | 赎 5348 | 赏 5349 | 赐 5350 | 赓 5351 | 赔 5352 | 赖 5353 | 赘 5354 | 赚 5355 | 赛 5356 | 赝 5357 | 赞 5358 | 赠 5359 | 赡 5360 | 赢 5361 | 赣 5362 | 赤 5363 | 赦 5364 | 赧 5365 | 赫 5366 | 赭 5367 | 走 5368 | 赳 5369 | 赴 5370 | 赵 5371 | 赶 5372 | 起 5373 | 趁 5374 | 超 5375 | 越 5376 | 趋 5377 | 趟 5378 | 趣 5379 | 足 5380 | 趴 5381 | 趵 5382 | 趸 5383 | 趺 5384 | 趾 5385 | 跃 5386 | 跄 5387 | 跆 5388 | 跋 5389 | 跌 5390 | 跎 5391 | 跑 5392 | 跖 5393 | 跚 5394 | 跛 5395 | 距 5396 | 跟 5397 | 跤 5398 | 跨 5399 | 跩 5400 | 跪 5401 | 路 5402 | 跳 5403 | 践 5404 | 跷 5405 | 跹 5406 | 跺 5407 | 跻 5408 | 踉 5409 | 踊 5410 | 踌 5411 | 踏 5412 | 踝 5413 | 踞 5414 | 踟 5415 | 踢 5416 | 踩 5417 | 踪 5418 | 踮 5419 | 踱 5420 | 踵 5421 | 踹 5422 | 蹂 5423 | 蹄 5424 | 蹇 5425 | 蹈 5426 | 蹉 5427 | 蹊 5428 | 蹋 5429 | 蹑 5430 | 蹒 5431 | 蹙 5432 | 蹟 5433 | 蹦 5434 | 蹩 5435 | 蹬 5436 | 蹭 5437 | 蹲 5438 | 蹴 5439 | 蹶 5440 | 蹼 5441 | 蹿 5442 | 躁 5443 | 躇 5444 | 躏 5445 | 身 5446 | 躬 5447 | 躯 5448 | 躲 5449 | 躺 5450 | 転 5451 | 軽 5452 | 车 5453 | 轧 5454 | 轨 5455 | 轩 5456 | 转 5457 | 轭 5458 | 轮 5459 | 软 5460 | 轰 5461 | 轲 5462 | 轴 5463 | 轶 5464 | 轻 5465 | 轼 5466 | 载 5467 | 轿 5468 | 较 5469 | 辄 5470 | 辅 5471 | 辆 5472 | 辇 5473 | 辈 5474 | 辉 5475 | 辊 5476 | 辍 5477 | 辐 5478 | 辑 5479 | 输 5480 | 辕 5481 | 辖 5482 | 辗 5483 | 辘 5484 | 辙 5485 | 辛 5486 | 辜 5487 | 辞 5488 | 辟 5489 | 辣 5490 | 辨 5491 | 辩 5492 | 辫 5493 | 辰 5494 | 辱 5495 | 边 5496 | 辺 5497 | 辻 5498 | 込 5499 | 辽 5500 | 达 5501 | 迁 5502 | 迂 5503 | 迄 5504 | 迅 5505 | 过 5506 | 迈 5507 | 迎 5508 | 运 5509 | 近 5510 | 返 5511 | 还 5512 | 这 5513 | 进 5514 | 远 5515 | 违 5516 | 连 5517 | 迟 5518 | 迢 5519 | 迤 5520 | 迥 5521 | 迦 5522 | 迩 5523 | 迪 5524 | 迫 5525 | 迭 5526 | 述 5527 | 迷 5528 | 迸 5529 | 迹 5530 | 迺 5531 | 追 5532 | 退 5533 | 送 5534 | 适 5535 | 逃 5536 | 逅 5537 | 逆 5538 | 选 5539 | 逊 5540 | 逍 5541 | 透 5542 | 逐 5543 | 递 5544 | 途 5545 | 逗 5546 | 通 5547 | 逛 5548 | 逝 5549 | 逞 5550 | 速 5551 | 造 5552 | 逢 5553 | 逮 5554 | 逵 5555 | 逶 5556 | 逸 5557 | 逻 5558 | 逼 5559 | 逾 5560 | 遁 5561 | 遂 5562 | 遅 5563 | 遇 5564 | 遍 5565 | 遏 5566 | 遐 5567 | 遑 5568 | 遒 5569 | 道 5570 | 遗 5571 | 遛 5572 | 遢 5573 | 遣 5574 | 遥 5575 | 遨 5576 | 遭 5577 | 遮 5578 | 遴 5579 | 遵 5580 | 遶 5581 | 遽 5582 | 避 5583 | 邀 5584 | 邂 5585 | 邃 5586 | 邈 5587 | 邋 5588 | 邑 5589 | 邓 5590 | 邕 5591 | 邛 5592 | 邝 5593 | 邢 5594 | 那 5595 | 邦 5596 | 邨 5597 | 邪 5598 | 邬 5599 | 邮 5600 | 邯 5601 | 邰 5602 | 邱 5603 | 邳 5604 | 邵 5605 | 邸 5606 | 邹 5607 | 邺 5608 | 邻 5609 | 郁 5610 | 郅 5611 | 郊 5612 | 郎 5613 | 郑 5614 | 郜 5615 | 郝 5616 | 郡 5617 | 郢 5618 | 郤 5619 | 郦 5620 | 郧 5621 | 部 5622 | 郫 5623 | 郭 5624 | 郴 5625 | 郷 5626 | 郸 5627 | 都 5628 | 鄂 5629 | 鄙 5630 | 鄞 5631 | 鄢 5632 | 鄱 5633 | 酉 5634 | 酊 5635 | 酋 5636 | 酌 5637 | 配 5638 | 酐 5639 | 酒 5640 | 酗 5641 | 酚 5642 | 酝 5643 | 酢 5644 | 酣 5645 | 酥 5646 | 酩 5647 | 酪 5648 | 酬 5649 | 酮 5650 | 酯 5651 | 酰 5652 | 酱 5653 | 酵 5654 | 酶 5655 | 酷 5656 | 酸 5657 | 酿 5658 | 醃 5659 | 醇 5660 | 醉 5661 | 醋 5662 | 醍 5663 | 醐 5664 | 醒 5665 | 醚 5666 | 醛 5667 | 醣 5668 | 醪 5669 | 醮 5670 | 醯 5671 | 醴 5672 | 醺 5673 | 采 5674 | 釉 5675 | 释 5676 | 里 5677 | 重 5678 | 野 5679 | 量 5680 | 金 5681 | 釜 5682 | 釦 5683 | 鈪 5684 | 鉄 5685 | 鉴 5686 | 銭 5687 | 銮 5688 | 鍊 5689 | 鎌 5690 | 鎏 5691 | 鎗 5692 | 鏖 5693 | 鑑 5694 | 鑫 5695 | 针 5696 | 钉 5697 | 钊 5698 | 钎 5699 | 钏 5700 | 钒 5701 | 钓 5702 | 钗 5703 | 钙 5704 | 钛 5705 | 钜 5706 | 钝 5707 | 钞 5708 | 钟 5709 | 钠 5710 | 钡 5711 | 钢 5712 | 钣 5713 | 钤 5714 | 钥 5715 | 钦 5716 | 钧 5717 | 钨 5718 | 钩 5719 | 钮 5720 | 钯 5721 | 钰 5722 | 钱 5723 | 钳 5724 | 钴 5725 | 钵 5726 | 钺 5727 | 钻 5728 | 钼 5729 | 钾 5730 | 钿 5731 | 铀 5732 | 铁 5733 | 铂 5734 | 铃 5735 | 铄 5736 | 铅 5737 | 铆 5738 | 铉 5739 | 铎 5740 | 铐 5741 | 铛 5742 | 铜 5743 | 铝 5744 | 铠 5745 | 铡 5746 | 铢 5747 | 铣 5748 | 铤 5749 | 铨 5750 | 铩 5751 | 铬 5752 | 铭 5753 | 铮 5754 | 铰 5755 | 铲 5756 | 铵 5757 | 银 5758 | 铸 5759 | 铺 5760 | 链 5761 | 铿 5762 | 销 5763 | 锁 5764 | 锂 5765 | 锄 5766 | 锅 5767 | 锆 5768 | 锈 5769 | 锉 5770 | 锋 5771 | 锌 5772 | 锏 5773 | 锐 5774 | 锑 5775 | 错 5776 | 锚 5777 | 锟 5778 | 锡 5779 | 锢 5780 | 锣 5781 | 锤 5782 | 锥 5783 | 锦 5784 | 锭 5785 | 键 5786 | 锯 5787 | 锰 5788 | 锲 5789 | 锵 5790 | 锹 5791 | 锺 5792 | 锻 5793 | 镀 5794 | 镁 5795 | 镂 5796 | 镇 5797 | 镉 5798 | 镌 5799 | 镍 5800 | 镐 5801 | 镑 5802 | 镕 5803 | 镖 5804 | 镗 5805 | 镛 5806 | 镜 5807 | 镣 5808 | 镭 5809 | 镯 5810 | 镰 5811 | 镳 5812 | 镶 5813 | 长 5814 | 閒 5815 | 関 5816 | 闇 5817 | 闘 5818 | 闢 5819 | 门 5820 | 闪 5821 | 闫 5822 | 闭 5823 | 问 5824 | 闯 5825 | 闰 5826 | 闲 5827 | 间 5828 | 闵 5829 | 闷 5830 | 闸 5831 | 闹 5832 | 闺 5833 | 闻 5834 | 闽 5835 | 闾 5836 | 阀 5837 | 阁 5838 | 阂 5839 | 阅 5840 | 阆 5841 | 阇 5842 | 阈 5843 | 阉 5844 | 阎 5845 | 阐 5846 | 阑 5847 | 阔 5848 | 阕 5849 | 阖 5850 | 阙 5851 | 阚 5852 | 阜 5853 | 队 5854 | 阡 5855 | 阮 5856 | 阱 5857 | 防 5858 | 阳 5859 | 阴 5860 | 阵 5861 | 阶 5862 | 阻 5863 | 阿 5864 | 陀 5865 | 陂 5866 | 附 5867 | 际 5868 | 陆 5869 | 陇 5870 | 陈 5871 | 陋 5872 | 陌 5873 | 降 5874 | 限 5875 | 陕 5876 | 陛 5877 | 陞 5878 | 陟 5879 | 陡 5880 | 院 5881 | 除 5882 | 陨 5883 | 险 5884 | 陪 5885 | 陲 5886 | 陵 5887 | 陶 5888 | 陷 5889 | 険 5890 | 隅 5891 | 隆 5892 | 隈 5893 | 隋 5894 | 隍 5895 | 随 5896 | 隐 5897 | 隔 5898 | 隘 5899 | 隙 5900 | 障 5901 | 隠 5902 | 隣 5903 | 隧 5904 | 隶 5905 | 隼 5906 | 隽 5907 | 难 5908 | 雀 5909 | 雁 5910 | 雄 5911 | 雅 5912 | 集 5913 | 雇 5914 | 雉 5915 | 雌 5916 | 雍 5917 | 雎 5918 | 雏 5919 | 雑 5920 | 雒 5921 | 雕 5922 | 雨 5923 | 雪 5924 | 雯 5925 | 雰 5926 | 雳 5927 | 零 5928 | 雷 5929 | 雹 5930 | 雾 5931 | 需 5932 | 霁 5933 | 霄 5934 | 霆 5935 | 震 5936 | 霈 5937 | 霉 5938 | 霊 5939 | 霍 5940 | 霎 5941 | 霏 5942 | 霑 5943 | 霓 5944 | 霖 5945 | 霜 5946 | 霞 5947 | 霭 5948 | 霰 5949 | 露 5950 | 霸 5951 | 霹 5952 | 霾 5953 | 青 5954 | 靓 5955 | 靖 5956 | 静 5957 | 靛 5958 | 非 5959 | 靠 5960 | 靡 5961 | 面 5962 | 靥 5963 | 革 5964 | 靳 5965 | 靴 5966 | 靶 5967 | 靼 5968 | 鞅 5969 | 鞋 5970 | 鞍 5971 | 鞑 5972 | 鞘 5973 | 鞠 5974 | 鞣 5975 | 鞭 5976 | 韦 5977 | 韧 5978 | 韩 5979 | 韬 5980 | 韭 5981 | 音 5982 | 韵 5983 | 韶 5984 | 頫 5985 | 頼 5986 | 页 5987 | 顶 5988 | 顷 5989 | 项 5990 | 顺 5991 | 须 5992 | 顼 5993 | 顽 5994 | 顾 5995 | 顿 5996 | 颁 5997 | 颂 5998 | 预 5999 | 颅 6000 | 领 6001 | 颇 6002 | 颈 6003 | 颉 6004 | 颊 6005 | 颌 6006 | 颍 6007 | 颐 6008 | 频 6009 | 颓 6010 | 颔 6011 | 颖 6012 | 颗 6013 | 题 6014 | 颚 6015 | 颛 6016 | 颜 6017 | 额 6018 | 颞 6019 | 颠 6020 | 颡 6021 | 颢 6022 | 颤 6023 | 颦 6024 | 颧 6025 | 风 6026 | 飒 6027 | 飓 6028 | 飕 6029 | 飘 6030 | 飙 6031 | 飚 6032 | 飞 6033 | 食 6034 | 飨 6035 | 餐 6036 | 餮 6037 | 餵 6038 | 饍 6039 | 饕 6040 | 饥 6041 | 饨 6042 | 饪 6043 | 饬 6044 | 饭 6045 | 饮 6046 | 饯 6047 | 饰 6048 | 饱 6049 | 饲 6050 | 饴 6051 | 饵 6052 | 饶 6053 | 饷 6054 | 饺 6055 | 饼 6056 | 饽 6057 | 饿 6058 | 馀 6059 | 馁 6060 | 馄 6061 | 馅 6062 | 馆 6063 | 馈 6064 | 馋 6065 | 馍 6066 | 馏 6067 | 馒 6068 | 馔 6069 | 首 6070 | 馗 6071 | 香 6072 | 馥 6073 | 馨 6074 | 駄 6075 | 駅 6076 | 駆 6077 | 験 6078 | 騨 6079 | 驒 6080 | 马 6081 | 驭 6082 | 驮 6083 | 驯 6084 | 驰 6085 | 驱 6086 | 驳 6087 | 驴 6088 | 驶 6089 | 驷 6090 | 驸 6091 | 驹 6092 | 驻 6093 | 驼 6094 | 驾 6095 | 驿 6096 | 骁 6097 | 骂 6098 | 骄 6099 | 骅 6100 | 骆 6101 | 骇 6102 | 骈 6103 | 骊 6104 | 骋 6105 | 验 6106 | 骏 6107 | 骐 6108 | 骑 6109 | 骗 6110 | 骚 6111 | 骛 6112 | 骜 6113 | 骞 6114 | 骠 6115 | 骡 6116 | 骤 6117 | 骥 6118 | 骧 6119 | 骨 6120 | 骰 6121 | 骶 6122 | 骷 6123 | 骸 6124 | 骼 6125 | 髂 6126 | 髅 6127 | 髋 6128 | 髓 6129 | 高 6130 | 髦 6131 | 髪 6132 | 髯 6133 | 髻 6134 | 鬃 6135 | 鬓 6136 | 鬟 6137 | 鬣 6138 | 鬼 6139 | 魁 6140 | 魂 6141 | 魄 6142 | 魅 6143 | 魇 6144 | 魍 6145 | 魏 6146 | 魔 6147 | 鮨 6148 | 鱼 6149 | 鱿 6150 | 鲁 6151 | 鲈 6152 | 鲍 6153 | 鲑 6154 | 鲛 6155 | 鲜 6156 | 鲟 6157 | 鲢 6158 | 鲤 6159 | 鲨 6160 | 鲫 6161 | 鲱 6162 | 鲲 6163 | 鲶 6164 | 鲷 6165 | 鲸 6166 | 鳃 6167 | 鳄 6168 | 鳅 6169 | 鳌 6170 | 鳍 6171 | 鳕 6172 | 鳖 6173 | 鳗 6174 | 鳝 6175 | 鳞 6176 | 鵰 6177 | 鸟 6178 | 鸠 6179 | 鸡 6180 | 鸢 6181 | 鸣 6182 | 鸥 6183 | 鸦 6184 | 鸨 6185 | 鸪 6186 | 鸭 6187 | 鸯 6188 | 鸳 6189 | 鸵 6190 | 鸽 6191 | 鸾 6192 | 鸿 6193 | 鹂 6194 | 鹃 6195 | 鹄 6196 | 鹅 6197 | 鹈 6198 | 鹉 6199 | 鹊 6200 | 鹌 6201 | 鹏 6202 | 鹑 6203 | 鹕 6204 | 鹘 6205 | 鹜 6206 | 鹞 6207 | 鹤 6208 | 鹦 6209 | 鹧 6210 | 鹫 6211 | 鹭 6212 | 鹰 6213 | 鹳 6214 | 鹿 6215 | 麂 6216 | 麋 6217 | 麒 6218 | 麓 6219 | 麝 6220 | 麟 6221 | 麦 6222 | 麴 6223 | 麸 6224 | 麺 6225 | 麻 6226 | 麾 6227 | 黄 6228 | 黍 6229 | 黎 6230 | 黏 6231 | 黑 6232 | 黒 6233 | 黔 6234 | 默 6235 | 黛 6236 | 黜 6237 | 黝 6238 | 黠 6239 | 黯 6240 | 鼋 6241 | 鼎 6242 | 鼐 6243 | 鼓 6244 | 鼠 6245 | 鼬 6246 | 鼹 6247 | 鼻 6248 | 鼾 6249 | 齁 6250 | 齐 6251 | 齢 6252 | 齿 6253 | 龄 6254 | 龅 6255 | 龈 6256 | 龊 6257 | 龋 6258 | 龌 6259 | 龙 6260 | 龚 6261 | 龛 6262 | 龟 6263 | ︰ 6264 | ︱ 6265 | ︶ 6266 | ︿ 6267 | ﹁ 6268 | ﹂ 6269 | ﹍ 6270 | ﹏ 6271 | ﹐ 6272 | ﹑ 6273 | ﹒ 6274 | ﹔ 6275 | ﹕ 6276 | ﹖ 6277 | ﹗ 6278 | ﹙ 6279 | ﹚ 6280 | ﹝ 6281 | ﹞ 6282 | ﹡ 6283 | ﹣ 6284 | ! 6285 | " 6286 | # 6287 | $ 6288 | % 6289 | & 6290 | ' 6291 | ( 6292 | ) 6293 | * 6294 | + 6295 | , 6296 | - 6297 | . 6298 | / 6299 | 0 6300 | 1 6301 | 2 6302 | 3 6303 | 4 6304 | 5 6305 | 6 6306 | 7 6307 | 8 6308 | 9 6309 | : 6310 | ; 6311 | < 6312 | = 6313 | > 6314 | ? 6315 | @ 6316 | [ 6317 | \ 6318 | ] 6319 | ^ 6320 | _ 6321 | ` 6322 | { 6323 | | 6324 | } 6325 | ~ 6326 | 。 6327 | 「 6328 | 」 6329 | 、 6330 | ・ 6331 | ッ 6332 | ー 6333 | ゙ 6334 | ゚ 6335 |  ̄ 6336 | ¥ 6337 | ... 6338 | ##s 6339 | ##0 6340 | ##a 6341 | ##2 6342 | ##1 6343 | ##3 6344 | ##e 6345 | ##8 6346 | ##5 6347 | ##6 6348 | ##4 6349 | ##9 6350 | ##7 6351 | ##t 6352 | ##o 6353 | ##d 6354 | ##i 6355 | ##n 6356 | ##m 6357 | ##c 6358 | ##l 6359 | ##y 6360 | ##r 6361 | ##g 6362 | ##p 6363 | ##f 6364 | ##er 6365 | ##k 6366 | ##h 6367 | ##b 6368 | ##x 6369 | ##u 6370 | ##w 6371 | ##ing 6372 | ##on 6373 | ##v 6374 | ##an 6375 | ##z 6376 | ##le 6377 | ##in 6378 | ##mm 6379 | ##ng 6380 | ##us 6381 | ── 6382 | ##te 6383 | ##ed 6384 | ##al 6385 | ##ic 6386 | ##ia 6387 | ##q 6388 | ##ce 6389 | ##en 6390 | ##is 6391 | ##ra 6392 | ##es 6393 | ##j 6394 | ##cm 6395 | ##ne 6396 | ##re 6397 | ##tion 6398 | ##ch 6399 | ##or 6400 | ##na 6401 | ##ta 6402 | ##ll 6403 | ##ie 6404 | ##ma 6405 | ##ion 6406 | ##th 6407 | ##st 6408 | ##se 6409 | ##et 6410 | ##ck 6411 | ##ly 6412 | ##ge 6413 | ##ry 6414 | ##ter 6415 | ##ar 6416 | ##la 6417 | ##os 6418 | ##el 6419 | ##ml 6420 | ##at 6421 | ##man 6422 | ##it 6423 | ##me 6424 | ##de 6425 | ##nt 6426 | ##mb 6427 | ##ve 6428 | ##da 6429 | ##ps 6430 | ##to 6431 | ##son 6432 | ##ke 6433 | ##um 6434 | ##ss 6435 | ##0 6436 | ##as 6437 | ##co 6438 | ##go 6439 | ##id 6440 | ##ey 6441 | ##sa 6442 | ##ro 6443 | ##am 6444 | ##no 6445 | ##sh 6446 | ##ki 6447 | ##pe 6448 | ##ine 6449 | ##mi 6450 | ##ton 6451 | ##ment 6452 | ##ld 6453 | ##li 6454 | ##rs 6455 | ##ri 6456 | ##rd 6457 | ##io 6458 | ##ty 6459 | ##ba 6460 | ##ga 6461 | ##ny 6462 | ##by 6463 | ##ur 6464 | ##hz 6465 | ##ang 6466 | ##ka 6467 | ##ad 6468 | ##tor 6469 | ##kg 6470 | ##rt 6471 | ##ct 6472 | ##ts 6473 | ##ns 6474 | ##ao 6475 | ##nd 6476 | ##ya 6477 | ##il 6478 | ##ian 6479 | ##ers 6480 | ##day 6481 | ##ay 6482 | ##line 6483 | ##be 6484 | ##dy 6485 | ##ies 6486 | ##ha 6487 | ##ot 6488 | ##va 6489 | ##mo 6490 | ##land 6491 | ##ation 6492 | ##pa 6493 | ##ol 6494 | ##ue 6495 | ##ca 6496 | ##om 6497 | ##ure 6498 | ##bo 6499 | ##way 6500 | ##ko 6501 | ##do 6502 | ##un 6503 | ##ni 6504 | ##up 6505 | ##ds 6506 | ##ee 6507 | ##ive 6508 | ##cc 6509 | ##ble 6510 | ##ity 6511 | ##ex 6512 | ##ler 6513 | ##ap 6514 | ##book 6515 | ##ice 6516 | ##km 6517 | ##mg 6518 | ##ms 6519 | ##cy 6520 | ##view 6521 | ##lo 6522 | ##5 6523 | ##oo 6524 | ##net 6525 | ##ls 6526 | ##ii 6527 | ##box 6528 | ##2 6529 | ##ley 6530 | ##ent 6531 | ##les 6532 | ##1 6533 | ##said 6534 | ##ber 6535 | ##ner 6536 | ##der 6537 | ##hi 6538 | ##ir 6539 | ##ai 6540 | ##ver 6541 | ##ron 6542 | ##ster 6543 | ##sk 6544 | ##ft 6545 | ##ti 6546 | ##my 6547 | ##ker 6548 | ##one 6549 | ##ow 6550 | ##lin 6551 | ##ip 6552 | ##ics 6553 | ##ff 6554 | ##nce 6555 | ##per 6556 | ##ock 6557 | ##bs 6558 | ##ah 6559 | ##lv 6560 | ##mp 6561 | ##3 6562 | ##au 6563 | ##age 6564 | ##ting 6565 | ##ung 6566 | ##ction 6567 | ##db 6568 | ##ore 6569 | ##op 6570 | ##ong 6571 | ##ica 6572 | ##wa 6573 | ##7 6574 | ##tv 6575 | ##di 6576 | ##9 6577 | ##ier 6578 | ##si 6579 | ##ok 6580 | ##ut 6581 | ##vi 6582 | ##ac 6583 | ##fs 6584 | ##sion 6585 | ##6 6586 | ##tt 6587 | ##lt 6588 | ##bc 6589 | ##rry 6590 | ##ted 6591 | ##rn 6592 | ##t00 6593 | ##ui 6594 | ##ary 6595 | ##pm 6596 | ##za 6597 | ##ger 6598 | ##m 6599 | ##hd 6600 | ##od 6601 | ##ina 6602 | ##ix 6603 | ##ana 6604 | ##ji 6605 | ##ard 6606 | ##ain 6607 | ##ze 6608 | ##bi 6609 | ##port 6610 | ##nm 6611 | ##dia 6612 | ##and 6613 | ##im 6614 | ##ei 6615 | ##po 6616 | ##bit 6617 | ##out 6618 | ##zz 6619 | ##8 6620 | ##tes 6621 | ##ast 6622 | ○○ 6623 | ##ling 6624 | ##ory 6625 | ##ical 6626 | ##cn 6627 | ##ys 6628 | ##oc 6629 | ##lly 6630 | ##ks 6631 | ##board 6632 | ##lan 6633 | ##que 6634 | ##ua 6635 | ##com 6636 | ##4 6637 | ##ren 6638 | ##via 6639 | ##tch 6640 | ##ial 6641 | ##nn 6642 | ##xx 6643 | ##ser 6644 | ##ist 6645 | ##art 6646 | ##lm 6647 | ##ek 6648 | ##ning 6649 | ##if 6650 | ##ite 6651 | ##ku 6652 | ##ux 6653 | ##hs 6654 | ##ide 6655 | ##ins 6656 | ##ight 6657 | ##fe 6658 | ##ho 6659 | ##lla 6660 | ##pp 6661 | ##ec 6662 | ##rm 6663 | ##ham 6664 | ##ell 6665 | ##able 6666 | ##ctor 6667 | ##light 6668 | ##han 6669 | ##ise 6670 | ##tions 6671 | ##shi 6672 | ##ram 6673 | ##pi 6674 | ##well 6675 | ##hu 6676 | ##gb 6677 | ##ef 6678 | ##uan 6679 | ##plus 6680 | ##res 6681 | ##ess 6682 | ##ate 6683 | ##ese 6684 | ##ci 6685 | ##bet 6686 | ##nk 6687 | ##ute 6688 | ##fc 6689 | ##www 6690 | ##ght 6691 | ##gs 6692 | ##ile 6693 | ##wood 6694 | ##uo 6695 | ##em 6696 | ##king 6697 | ##tive 6698 | ##ox 6699 | ##zy 6700 | ##red 6701 | ##ium 6702 | ##lf 6703 | ##ding 6704 | ##tic 6705 | ##cs 6706 | ##che 6707 | ##ire 6708 | ##gy 6709 | ##ult 6710 | ##fa 6711 | ##mer 6712 | ##time 6713 | ##tte 6714 | ##tra 6715 | ##ern 6716 | ##ous 6717 | ##int 6718 | ##car 6719 | ##our 6720 | ##ant 6721 | ##jo 6722 | ##min 6723 | ##ino 6724 | ##ris 6725 | ##ud 6726 | ##set 6727 | ##her 6728 | ##ou 6729 | ##fi 6730 | ##ill 6731 | ##ick 6732 | ##av 6733 | ##dd 6734 | ##mark 6735 | ##ash 6736 | ##ome 6737 | ##ak 6738 | ##lle 6739 | ##watch 6740 | ##und 6741 | ##tal 6742 | ##less 6743 | ##rl 6744 | ##mhz 6745 | ##house 6746 | ##key 6747 | ##hy 6748 | ##web 6749 | ##gg 6750 | ##wan 6751 | ##°c 6752 | ##val 6753 | ##ons 6754 | ##ance 6755 | ##so 6756 | ##he 6757 | ##rp 6758 | ##ake 6759 | ##link 6760 | ##hp 6761 | ##eng 6762 | ##style 6763 | ##gi 6764 | ##ray 6765 | ##max 6766 | ##pc 6767 | ##ace 6768 | ##berg 6769 | ##news 6770 | ##all 6771 | ##rus 6772 | ##works 6773 | ##ja 6774 | ##ea 6775 | ##top 6776 | ##ness 6777 | ##lu 6778 | ##ul 6779 | ##a 6780 | ##ean 6781 | ##gle 6782 | ##back 6783 | ##tan 6784 | ##nes 6785 | ##zi 6786 | ##las 6787 | ##oe 6788 | ##sd 6789 | ##bot 6790 | ##world 6791 | ##zo 6792 | ##vr 6793 | ##list 6794 | ##ort 6795 | ##lon 6796 | ##tc 6797 | ##of 6798 | ##bus 6799 | ##gen 6800 | ##lie 6801 | ##ria 6802 | ##coin 6803 | ##bt 6804 | ##ook 6805 | ##sy 6806 | ##word 6807 | ##ther 6808 | ##xi 6809 | ##sc 6810 | ##bb 6811 | ##tar 6812 | ##ky 6813 | ##yo 6814 | ##ara 6815 | ##aa 6816 | ##rc 6817 | ##tz 6818 | ##ston 6819 | ##eo 6820 | ##ade 6821 | ##win 6822 | ##ura 6823 | ##den 6824 | ##ita 6825 | ##sm 6826 | ##use 6827 | ##ode 6828 | ##fo 6829 | ##hone 6830 | ##ology 6831 | ##con 6832 | ##ford 6833 | ##joy 6834 | ##kb 6835 | ##rade 6836 | ##ach 6837 | ##ful 6838 | ##ase 6839 | ##star 6840 | ##are 6841 | ##mc 6842 | ##ella 6843 | ##read 6844 | ##ison 6845 | ##vm 6846 | ##play 6847 | ##cer 6848 | ##yu 6849 | ##ings 6850 | ##lia 6851 | ##cd 6852 | ##tus 6853 | ##rial 6854 | ##life 6855 | ##ae 6856 | ##rk 6857 | ##wang 6858 | ##sp 6859 | ##ving 6860 | ##lton 6861 | ##ple 6862 | ##cal 6863 | ##sen 6864 | ##ville 6865 | ##ius 6866 | ##mah 6867 | ##tin 6868 | ##ws 6869 | ##ru 6870 | ##est 6871 | ##dm 6872 | ##mon 6873 | ##eam 6874 | ##ments 6875 | ##ik 6876 | ##kw 6877 | ##bin 6878 | ##ata 6879 | ##vin 6880 | ##tu 6881 | ##ula 6882 | ##ature 6883 | ##ran 6884 | ##home 6885 | ##ral 6886 | ##force 6887 | ##ini 6888 | ##bert 6889 | ##nder 6890 | ##mber 6891 | ##sis 6892 | ##ence 6893 | ##nc 6894 | ##name 6895 | ##ncy 6896 | ##nie 6897 | ##ye 6898 | ##oid 6899 | ##chi 6900 | ##orm 6901 | ##rf 6902 | ##ware 6903 | ##pro 6904 | ##era 6905 | ##ub 6906 | ##zen 6907 | ##hr 6908 | ##row 6909 | ##ish 6910 | ##lot 6911 | ##ane 6912 | ##tina 6913 | ##vel 6914 | ##ene 6915 | ##cker 6916 | ##px 6917 | ##fork 6918 | ##gan 6919 | ##zon 6920 | ##qq 6921 | ##google 6922 | ##ism 6923 | ##zer 6924 | ##labels 6925 | ##md 6926 | ##ico 6927 | ##new 6928 | ##here 6929 | ##ual 6930 | ##vice 6931 | ##wer 6932 | ##mll 6933 | ##uk 6934 | ##ming 6935 | ##vo 6936 | ##led 6937 | ##ax 6938 | ##ert 6939 | ##lr 6940 | ##hing 6941 | ##chat 6942 | ##ule 6943 | ##pad 6944 | ##ring 6945 | ##city 6946 | ##mann 6947 | ##cl 6948 | ##vd 6949 | ##ping 6950 | ##rge 6951 | ##lk 6952 | ##ney 6953 | ##ular 6954 | ##tter 6955 | ##tm 6956 | ##yan 6957 | ##let 6958 | ##pt 6959 | ##berry 6960 | ##ew 6961 | ##wn 6962 | ##og 6963 | ##code 6964 | ##rid 6965 | ##cket 6966 | ##anonymoussaid 6967 | ##ag 6968 | ##ame 6969 | ##gc 6970 | ##lis 6971 | ##gin 6972 | ##cher 6973 | ##tis 6974 | ##rant 6975 | ##ats 6976 | ##ven 6977 | ##dn 6978 | ##ano 6979 | ##urt 6980 | ##rent 6981 | ##wen 6982 | ##ect 6983 | ##chel 6984 | ##cat 6985 | ##blog 6986 | ##yn 6987 | ##tp 6988 | ##rmb 6989 | ##field 6990 | ##reen 6991 | ##ors 6992 | ##ju 6993 | ##air 6994 | ##map 6995 | ##wo 6996 | ##get 6997 | ##base 6998 | ##ood 6999 | ##aw 7000 | ##ail 7001 | ##een 7002 | ##gp 7003 | ##eg 7004 | ##ose 7005 | ##ories 7006 | ##shop 7007 | ##sta 7008 | ##yer 7009 | ##ube 7010 | ##wi 7011 | ##low 7012 | ##fer 7013 | ##media 7014 | ##san 7015 | ##bank 7016 | ##nge 7017 | ##mail 7018 | ##lp 7019 | ##nse 7020 | ##stry 7021 | ##zone 7022 | ##ab 7023 | ##rner 7024 | ##care 7025 | ##pu 7026 | ##yi 7027 | ##bar 7028 | ##xy 7029 | ##ery 7030 | ##share 7031 | ##ob 7032 | ##ball 7033 | ##hk 7034 | ##cp 7035 | ##rie 7036 | ##ona 7037 | ##sl 7038 | ##lex 7039 | ##rum 7040 | ##ale 7041 | ##atic 7042 | ##erson 7043 | ##ql 7044 | ##ves 7045 | ##type 7046 | ##mix 7047 | ##bian 7048 | ##lc 7049 | ##hc 7050 | ##ration 7051 | ##mit 7052 | ##nch 7053 | ##o2 7054 | ##point 7055 | ##http 7056 | ##ury 7057 | ##ink 7058 | ##logy 7059 | ##js 7060 | ##shot 7061 | ##tb 7062 | ##tics 7063 | ##lus 7064 | ##ama 7065 | ##ions 7066 | ##lls 7067 | ##down 7068 | ##ress 7069 | ##kv 7070 | ##ark 7071 | ##ans 7072 | ##tty 7073 | ##bee 7074 | ##ari 7075 | ##verse 7076 | ##tton 7077 | ##ties 7078 | ##llow 7079 | ##du 7080 | ##rth 7081 | ##lar 7082 | ##des 7083 | ##cky 7084 | ##kit 7085 | ##ime 7086 | ##fun 7087 | ##vis 7088 | ##cture 7089 | ##tta 7090 | ##tel 7091 | ##lock 7092 | ##app 7093 | ##right 7094 | ##cent 7095 | ##its 7096 | ##asia 7097 | ##tti 7098 | ##tle 7099 | ##ller 7100 | ##ken 7101 | ##more 7102 | ##boy 7103 | ##dom 7104 | ##ider 7105 | ##unch 7106 | ##put 7107 | ##gh 7108 | ##tr 7109 | ##n1 7110 | ##tags 7111 | ##nus 7112 | ##town 7113 | ##ique 7114 | ##body 7115 | ##erry 7116 | ##the 7117 | ##mic 7118 | ##tro 7119 | ##alk 7120 | ##nity 7121 | ##oa 7122 | ##tf 7123 | ##ack 7124 | ##ded 7125 | ##sco 7126 | ##rite 7127 | ##ada 7128 | ##now 7129 | ##ndy 7130 | ##ika 7131 | ##xp 7132 | ##bu 7133 | ##rman 7134 | ##gm 7135 | ##fig 7136 | ##tto 7137 | ##gl 7138 | ##len 7139 | ##pper 7140 | ##a1 7141 | ##ition 7142 | ##ference 7143 | ##ig 7144 | ##mond 7145 | ##cation 7146 | ##pr 7147 | ##over 7148 | ##ith 7149 | ##su 7150 | ##llo 7151 | ##qi 7152 | ##admin 7153 | ##ora 7154 | ##log 7155 | ##ces 7156 | ##ume 7157 | ##oper 7158 | ##af 7159 | ##ound 7160 | ##cg 7161 | ##site 7162 | ##iko 7163 | ##ath 7164 | ##hip 7165 | ##rey 7166 | ##cks 7167 | ##dp 7168 | ##mw 7169 | ##ations 7170 | ##vs 7171 | ##flow 7172 | ##late 7173 | ##nter 7174 | ##ever 7175 | ##lab 7176 | ##nger 7177 | ##cing 7178 | ##nap 7179 | ##ens 7180 | ##bra 7181 | ##ign 7182 | ##oto 7183 | ##test 7184 | ##urs 7185 | ##ich 7186 | ##dr 7187 | ##lina 7188 | ##die 7189 | ##try 7190 | ##ader 7191 | ##chen 7192 | ##ten 7193 | ##ough 7194 | ##hen 7195 | ##cus 7196 | ##py 7197 | ##ward 7198 | ##ep 7199 | ##cle 7200 | ##ree 7201 | ##ctive 7202 | ##ool 7203 | ##ira 7204 | ##ez 7205 | ##card 7206 | ##cha 7207 | ##end 7208 | ##ala 7209 | ##tail 7210 | ##ries 7211 | ##ved 7212 | ##ways 7213 | ##wd 7214 | ##ond 7215 | ##tom 7216 | ##ov 7217 | ##fl 7218 | ##pd 7219 | ##gate 7220 | ##sky 7221 | ##ture 7222 | ##ape 7223 | ##load 7224 | ##ream 7225 | ##post 7226 | ##we 7227 | ##ike 7228 | ##ould 7229 | ##ious 7230 | ##gar 7231 | ##ggle 7232 | ##ric 7233 | ##own 7234 | ##side 7235 | ##other 7236 | ##tech 7237 | ##ator 7238 | ##ged 7239 | ##fit 7240 | ##ily 7241 | ##hn 7242 | ##cil 7243 | ##cel 7244 | ##ize 7245 | ##aid 7246 | ##data 7247 | ##head 7248 | ##sun 7249 | ##mar 7250 | ##lic 7251 | ##text 7252 | ##page 7253 | ##rris 7254 | ##ket 7255 | ##hai 7256 | ##hl 7257 | ##sent 7258 | ##ug 7259 | ##men 7260 | ##lution 7261 | ##tional 7262 | ##vy 7263 | ##dget 7264 | ##ein 7265 | ##uel 7266 | ##ument 7267 | ##hang 7268 | ##sue 7269 | ##ndi 7270 | ##cept 7271 | ##ste 7272 | ##tag 7273 | ##west 7274 | ##live 7275 | ##rap 7276 | ##tone 7277 | ##ass 7278 | ##tical 7279 | ##m2 7280 | ##mn 7281 | ##core 7282 | ##may 7283 | ##ope 7284 | ##rain 7285 | ##ement 7286 | ##tier 7287 | ##vic 7288 | ##lay 7289 | ##uck 7290 | ##hop 7291 | ##ear 7292 | ##fly 7293 | ##ship 7294 | ##iel 7295 | ##ude 7296 | ##ena 7297 | ##thing 7298 | ##ters 7299 | ##kin 7300 | ##vertisement 7301 | ##ien 7302 | ##dge 7303 | ##tant 7304 | ##twitter 7305 | ##fu 7306 | ##iner 7307 | ##uce 7308 | ##ev 7309 | ##file 7310 | ##rio 7311 | ##hat 7312 | ##lio 7313 | ##abc 7314 | ##vc 7315 | ##rity 7316 | ##ost 7317 | ##fet 7318 | ##come 7319 | ##beth 7320 | ##aft 7321 | ##don 7322 | ##khz 7323 | ##face 7324 | ##mate 7325 | ##jing 7326 | ##mand 7327 | ##gn 7328 | ##mmy 7329 | ##pmlast 7330 | ##wu 7331 | ##bel 7332 | ##dio 7333 | ##ht 7334 | ##ivity 7335 | ##isa 7336 | ##lter 7337 | ##cts 7338 | ##act 7339 | ##room 7340 | ##ets 7341 | ##hg 7342 | ##raph 7343 | ##ils 7344 | ##host 7345 | ##bon 7346 | ##tsu 7347 | ##ject 7348 | ##vas 7349 | ##firm 7350 | ##wf 7351 | ##nor 7352 | ##space 7353 | ##dc 7354 | ##sol 7355 | ##nsis 7356 | ##sio 7357 | ##ym 7358 | ##bor 7359 | ##wt 7360 | ##tw 7361 | ##oka 7362 | ##rss 7363 | ##work 7364 | ##atus 7365 | ##times 7366 | ##ather 7367 | ##cord 7368 | ##eep 7369 | ##pan 7370 | ##press 7371 | ##tl 7372 | ##ull 7373 | ##wei 7374 | ##rip 7375 | ##nis 7376 | ##oon 7377 | ##bug 7378 | ##cms 7379 | ##dar 7380 | ##oh 7381 | ##nba 7382 | ##count 7383 | ##url 7384 | ##ging 7385 | ##cia 7386 | ##tation 7387 | ##mini 7388 | ##mporary 7389 | ##ering 7390 | ##next 7391 | ##mbps 7392 | ##gas 7393 | ##ift 7394 | ##dot 7395 | ##ros 7396 | ##eet 7397 | ##ible 7398 | ##aka 7399 | ##lor 7400 | ##iu 7401 | ##gt 7402 | ##burg 7403 | ##iki 7404 | ##rex 7405 | ##cam 7406 | ##you 7407 | ##lee 7408 | ##itor 7409 | ##xt 7410 | ##nan 7411 | ##ann 7412 | ##ph 7413 | ##rcle 7414 | ##nic 7415 | ##nar 7416 | ##rian 7417 | ##table 7418 | ##sn 7419 | ##jia 7420 | ##ques 7421 | ##onsored 7422 | ##x2 7423 | ##v4 7424 | ##tein 7425 | ##stack 7426 | ##ads 7427 | ##baby 7428 | ##lone 7429 | ##uid 7430 | ##ave 7431 | ##oy 7432 | ##talk 7433 | ##eme 7434 | ##a5 7435 | ##lace 7436 | ##covery 7437 | ##r3 7438 | ##ners 7439 | ##rea 7440 | ##aine 7441 | ##ision 7442 | ##sia 7443 | ##bow 7444 | ##love 7445 | ##pl 7446 | ##uy 7447 | ##oi 7448 | ##rr 7449 | ##mple 7450 | ##sson 7451 | ##nts 7452 | ##uard 7453 | ##bia 7454 | ##tory 7455 | ##hia 7456 | ##sit 7457 | ##walk 7458 | ##xure 7459 | ##pact 7460 | ##walker 7461 | ##can 7462 | ##hoo 7463 | ##b 7464 | ##yy 7465 | ##iti 7466 | ##bbs 7467 | ##ola 7468 | ##bre 7469 | ##pus 7470 | ##rder 7471 | ##nia 7472 | ##ugh 7473 | ##orage 7474 | ##ush 7475 | ##bat 7476 | ##dt 7477 | ##gio 7478 | ##lax 7479 | ##moon 7480 | ##kk 7481 | ##lux 7482 | ##rdon 7483 | ##x5 7484 | ##als 7485 | ##ida 7486 | ##nda 7487 | ##posted 7488 | ##mine 7489 | ##skip 7490 | ##form 7491 | ##ssion 7492 | ##tee 7493 | ##jie 7494 | ##night 7495 | ##ppy 7496 | ##█ 7497 | ##eh 7498 | ##rence 7499 | ##lvin 7500 | ##trix 7501 | ##n2 7502 | ##uch 7503 | ##dra 7504 | ##sse 7505 | ##urn 7506 | ##lmer 7507 | ##sha 7508 | ##vg 7509 | ##nos 7510 | ##rail 7511 | ##dium 7512 | ##stone 7513 | ##np 7514 | ##ias 7515 | ##dk 7516 | ##xxx 7517 | ##cake 7518 | ##ification 7519 | ##nel 7520 | ##icon 7521 | ##bby 7522 | ##ust 7523 | ##fw 7524 | ##xon 7525 | ##ses 7526 | ##dragon 7527 | ##ere 7528 | ##nne 7529 | ##oud 7530 | ##a2 7531 | ##graphy 7532 | ##rtex 7533 | ##gma 7534 | ##ito 7535 | ##lling 7536 | ##jun 7537 | ##facebook 7538 | ##tos 7539 | ##stin 7540 | ##shine 7541 | ##mu 7542 | ##gence 7543 | ##zzi 7544 | ##tore 7545 | ##ead 7546 | ##osa 7547 | ##jiang 7548 | ##lam 7549 | ##nix 7550 | ##sday 7551 | ##master 7552 | ##zl 7553 | ##itz 7554 | ##food 7555 | ##lent 7556 | ##stro 7557 | ##lts 7558 | ##bscribe 7559 | ##tment 7560 | ##pn 7561 | ##ague 7562 | ##deo 7563 | ##nnis 7564 | ##ette 7565 | ##pop 7566 | ##cast 7567 | ##ews 7568 | ##stle 7569 | ##ima 7570 | ##r4 7571 | ##desk 7572 | ##ald 7573 | ##van 7574 | ##jy 7575 | ##lines 7576 | ##mes 7577 | ##self 7578 | ##note 7579 | ##ova 7580 | ##wing 7581 | ##hua 7582 | ##rect 7583 | ##unge 7584 | ##uma 7585 | ##kins 7586 | ##zu 7587 | ##price 7588 | ##med 7589 | ##mus 7590 | ##group 7591 | ##hin 7592 | ##iginal 7593 | ##oz 7594 | ##public 7595 | ##sch 7596 | ##dden 7597 | ##bell 7598 | ##drive 7599 | ##rmin 7600 | ##fx 7601 | ##nome 7602 | ##ctionary 7603 | ##oman 7604 | ##lish 7605 | ##cr 7606 | ##hm 7607 | ##how 7608 | ##uc 7609 | ##urg 7610 | ##cca 7611 | ##uality 7612 | ##ett 7613 | ##ani 7614 | ##tax 7615 | ##rma 7616 | ##jin 7617 | ##dical 7618 | ##lli 7619 | ##last 7620 | ##dan 7621 | ##rame 7622 | ##oot 7623 | ##hur 7624 | ##ground 7625 | ##getrelax 7626 | ##ince 7627 | ##bay 7628 | ##pass 7629 | ##rix 7630 | ##ello 7631 | ##eting 7632 | ##mobile 7633 | ##ience 7634 | ##ified 7635 | ##nny 7636 | ##fr 7637 | ##lean 7638 | ##pin 7639 | ##rin 7640 | ##bility 7641 | ##baru 7642 | ##gion 7643 | ##tors 7644 | ##ernel 7645 | ##mv 7646 | ##bike 7647 | ##ager 7648 | ##del 7649 | ##pods 7650 | ##free 7651 | ##tings 7652 | ##rley 7653 | ##copyright 7654 | ##mma 7655 | ##anda 7656 | ##mall 7657 | ##gnet 7658 | ##┅ 7659 | ##dog 7660 | ##! 7661 | ##" 7662 | ### 7663 | ##$ 7664 | ##% 7665 | ##& 7666 | ##' 7667 | ##( 7668 | ##) 7669 | ##* 7670 | ##+ 7671 | ##, 7672 | ##- 7673 | ##. 7674 | ##/ 7675 | ##: 7676 | ##; 7677 | ##< 7678 | ##= 7679 | ##> 7680 | ##? 7681 | ##@ 7682 | ##[ 7683 | ##\ 7684 | ##] 7685 | ##^ 7686 | ##_ 7687 | ##{ 7688 | ##| 7689 | ##} 7690 | ##~ 7691 | ##£ 7692 | ##¤ 7693 | ##¥ 7694 | ##§ 7695 | ##« 7696 | ##± 7697 | ##³ 7698 | ##µ 7699 | ##· 7700 | ##¹ 7701 | ##º 7702 | ##» 7703 | ##¼ 7704 | ##ß 7705 | ##æ 7706 | ##÷ 7707 | ##ø 7708 | ##đ 7709 | ##ŋ 7710 | ##ɔ 7711 | ##ə 7712 | ##ɡ 7713 | ##ʰ 7714 | ##ˇ 7715 | ##ˈ 7716 | ##ˊ 7717 | ##ˋ 7718 | ##ˍ 7719 | ##ː 7720 | ##˙ 7721 | ##˚ 7722 | ##ˢ 7723 | ##α 7724 | ##β 7725 | ##γ 7726 | ##δ 7727 | ##ε 7728 | ##η 7729 | ##θ 7730 | ##ι 7731 | ##κ 7732 | ##λ 7733 | ##μ 7734 | ##ν 7735 | ##ο 7736 | ##π 7737 | ##ρ 7738 | ##ς 7739 | ##σ 7740 | ##τ 7741 | ##υ 7742 | ##φ 7743 | ##χ 7744 | ##ψ 7745 | ##б 7746 | ##в 7747 | ##г 7748 | ##д 7749 | ##е 7750 | ##ж 7751 | ##з 7752 | ##к 7753 | ##л 7754 | ##м 7755 | ##н 7756 | ##о 7757 | ##п 7758 | ##р 7759 | ##с 7760 | ##т 7761 | ##у 7762 | ##ф 7763 | ##х 7764 | ##ц 7765 | ##ч 7766 | ##ш 7767 | ##ы 7768 | ##ь 7769 | ##і 7770 | ##ا 7771 | ##ب 7772 | ##ة 7773 | ##ت 7774 | ##د 7775 | ##ر 7776 | ##س 7777 | ##ع 7778 | ##ل 7779 | ##م 7780 | ##ن 7781 | ##ه 7782 | ##و 7783 | ##ي 7784 | ##۩ 7785 | ##ก 7786 | ##ง 7787 | ##น 7788 | ##ม 7789 | ##ย 7790 | ##ร 7791 | ##อ 7792 | ##า 7793 | ##เ 7794 | ##๑ 7795 | ##་ 7796 | ##ღ 7797 | ##ᵃ 7798 | ##ᵉ 7799 | ##ᵍ 7800 | ##ᵏ 7801 | ##ᵐ 7802 | ##ᵒ 7803 | ##ᵘ 7804 | ##‖ 7805 | ##„ 7806 | ##† 7807 | ##• 7808 | ##‥ 7809 | ##‧ 7810 | ## 7811 | ##‰ 7812 | ##′ 7813 | ##″ 7814 | ##‹ 7815 | ##› 7816 | ##※ 7817 | ##‿ 7818 | ##⁄ 7819 | ##ⁱ 7820 | ##⁺ 7821 | ##ⁿ 7822 | ##₁ 7823 | ##₃ 7824 | ##₄ 7825 | ##€ 7826 | ##№ 7827 | ##ⅰ 7828 | ##ⅱ 7829 | ##ⅲ 7830 | ##ⅳ 7831 | ##ⅴ 7832 | ##⇒ 7833 | ##∀ 7834 | ##− 7835 | ##∕ 7836 | ##∙ 7837 | ##√ 7838 | ##∞ 7839 | ##∟ 7840 | ##∠ 7841 | ##∣ 7842 | ##∩ 7843 | ##∮ 7844 | ##∶ 7845 | ##∼ 7846 | ##∽ 7847 | ##≈ 7848 | ##≒ 7849 | ##≡ 7850 | ##≤ 7851 | ##≥ 7852 | ##≦ 7853 | ##≧ 7854 | ##≪ 7855 | ##≫ 7856 | ##⊙ 7857 | ##⋅ 7858 | ##⋈ 7859 | ##⋯ 7860 | ##⌒ 7861 | ##① 7862 | ##② 7863 | ##③ 7864 | ##④ 7865 | ##⑤ 7866 | ##⑥ 7867 | ##⑦ 7868 | ##⑧ 7869 | ##⑨ 7870 | ##⑩ 7871 | ##⑴ 7872 | ##⑵ 7873 | ##⑶ 7874 | ##⑷ 7875 | ##⑸ 7876 | ##⒈ 7877 | ##⒉ 7878 | ##⒊ 7879 | ##⒋ 7880 | ##ⓒ 7881 | ##ⓔ 7882 | ##ⓘ 7883 | ##━ 7884 | ##┃ 7885 | ##┆ 7886 | ##┊ 7887 | ##┌ 7888 | ##└ 7889 | ##├ 7890 | ##┣ 7891 | ##═ 7892 | ##║ 7893 | ##╚ 7894 | ##╞ 7895 | ##╠ 7896 | ##╭ 7897 | ##╮ 7898 | ##╯ 7899 | ##╰ 7900 | ##╱ 7901 | ##╳ 7902 | ##▂ 7903 | ##▃ 7904 | ##▅ 7905 | ##▇ 7906 | ##▉ 7907 | ##▋ 7908 | ##▌ 7909 | ##▍ 7910 | ##▎ 7911 | ##□ 7912 | ##▬ 7913 | ##△ 7914 | ##► 7915 | ##▽ 7916 | ##◇ 7917 | ##◕ 7918 | ##◠ 7919 | ##◢ 7920 | ##◤ 7921 | ##☞ 7922 | ##☼ 7923 | ##♡ 7924 | ##♫ 7925 | ##♬ 7926 | ##✕ 7927 | ##✦ 7928 | ##✪ 7929 | ##✰ 7930 | ##✿ 7931 | ##❀ 7932 | ##➜ 7933 | ##➤ 7934 | ##⦿ 7935 | ##、 7936 | ##。 7937 | ##〃 7938 | ##々 7939 | ##〇 7940 | ##〈 7941 | ##〉 7942 | ##《 7943 | ##》 7944 | ##「 7945 | ##」 7946 | ##『 7947 | ##』 7948 | ##【 7949 | ##】 7950 | ##〓 7951 | ##〔 7952 | ##〕 7953 | ##〖 7954 | ##〗 7955 | ##〜 7956 | ##〝 7957 | ##〞 7958 | ##ㄧ 7959 | ##ㆍ 7960 | ##㈦ 7961 | ##㊣ 7962 | ##㗎 7963 | ##︰ 7964 | ##︱ 7965 | ##︶ 7966 | ##︿ 7967 | ##﹁ 7968 | ##﹂ 7969 | ##﹍ 7970 | ##﹏ 7971 | ##﹐ 7972 | ##﹑ 7973 | ##﹒ 7974 | ##﹔ 7975 | ##﹕ 7976 | ##﹖ 7977 | ##﹗ 7978 | ##﹙ 7979 | ##﹚ 7980 | ##﹝ 7981 | ##﹞ 7982 | ##﹡ 7983 | ##﹣ 7984 | ##! 7985 | ##" 7986 | ### 7987 | ##$ 7988 | ##% 7989 | ##& 7990 | ##' 7991 | ##( 7992 | ##) 7993 | ##* 7994 | ##, 7995 | ##- 7996 | ##. 7997 | ##/ 7998 | ##: 7999 | ##; 8000 | ##< 8001 | ##? 8002 | ##@ 8003 | ##[ 8004 | ##\ 8005 | ##] 8006 | ##^ 8007 | ##_ 8008 | ##` 8009 | ##{ 8010 | ##} 8011 | ##。 8012 | ##「 8013 | ##」 8014 | ##、 8015 | ##・ 8016 | ##ッ 8017 | ##ー 8018 | ##゙ 8019 | ##゚ 8020 | ## ̄ 8021 | ##¥ --------------------------------------------------------------------------------