├── .gitignore
├── README.md
├── common_file
├── file_utils.py
├── modeling.py
├── modeling_xd.py
├── optimization.py
├── parallel.py
└── tokenization.py
├── datas
├── relation_data
│ ├── small.dev.txt
│ └── small_train.txt
├── scene_cut_data
│ ├── small.dev.txt
│ └── small.train.txt
└── two_classifier_data
│ ├── small.dev.txt
│ └── small.train.txt
├── examples
├── embeddings_example.py
├── myself_example.py
└── relation_classifier_predict.py
├── relation_classifier
├── all_cls_mean_relations.py
├── order_relation_classifier.py
├── order_relation_classifier2.py
├── relation_classify.py
└── sim_classify.py
├── requirements.txt
├── scene_classifier
├── __init__.py
├── data_untils.py
├── fix_2_train.py
├── fix_3_train.py
└── scene_classifier_train.py
├── script
├── add_sentences_relations.sh
├── all_cls_mean_relations.sh
├── order_relation.sh
├── relation_classifier.sh
├── scene_classifier.sh
├── scene_fix_2+huameng_classifier.sh
├── scene_fix_2_classifier.sh
├── scene_fix_3_classifier.sh
├── sim_classifier.sh
└── two_sentences_classifier.sh
├── server.py
├── test
└── eval.py
└── two_sentences_classifier
├── __init__.py
├── add_sentences_relations.py
├── add_type_train.py
├── cls_classifier_train.py
├── train.py
└── word_embeddings.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .DS_Store
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .coverage
43 | .coverage.*
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | .hypothesis/
49 | .pytest_cache/
50 |
51 | # Translations
52 | *.mo
53 | *.pot
54 |
55 | # Django stuff:
56 | *.log
57 | .static_storage/
58 | .media/
59 | local_settings.py
60 |
61 | # Flask stuff:
62 | instance/
63 | .webassets-cache
64 |
65 | # Scrapy stuff:
66 | .scrapy
67 |
68 | # Sphinx documentation
69 | docs/_build/
70 |
71 | # PyBuilder
72 | target/
73 |
74 | # Jupyter Notebook
75 | .ipynb_checkpoints
76 |
77 | # pyenv
78 | .python-version
79 |
80 | # celery beat schedule file
81 | celerybeat-schedule
82 |
83 | # SageMath parsed files
84 | *.sage.py
85 |
86 | # Environments
87 | .env
88 | .venv
89 | env/
90 | venv/
91 | ENV/
92 | env.bak/
93 | venv.bak/
94 |
95 | # Spyder project settings
96 | .spyderproject
97 | .spyproject
98 |
99 | # Rope project settings
100 | .ropeproject
101 |
102 | # mkdocs documentation
103 | /site
104 |
105 | # mypy
106 | .mypy_cache/
107 |
108 | #pycharm
109 | .idea/
110 |
111 | #data
112 | #/data
113 | .vscode/*
114 |
115 | # XD
116 | models/
117 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 语义相似度,句向量生成
2 |
3 | 本项目可用于训练涉及到两句的分类任务,同时,可基于训练好的模型,获取句向量,用于下游相关任务,比如语义相似度任务。
4 |
5 | 涉及论文:《Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks》
6 |
7 | #### 框架图
8 | 
9 |
10 | # 项目结构
11 |
12 | .
13 | ├── README.md
14 | ├── common_file # 公用的一些模型,优化器等
15 | ├── datas # 数据样例
16 | ├── examples # 一些项目的应用入口
17 | ├── relation_classifier # 小说人物关系分类
18 | ├── requirements.txt
19 | ├── scene_classifier # 小说场景切分分类
20 | ├── script
21 | ├── test
22 | └── two_sentences_classifier # 小说人物对话分类
23 |
24 |
25 | ## two_sentences_classifier
26 |
27 | 人物对话分类项目:主要针对小说中的人物说话内容进行分类,目的是根据两个人(A,B)说话内容判断两段话是不是出自同一个人。我们将说话内容的句数设置为超参数top_n,
28 | 比如7,10,15,20等等。而数据集中一共有AB的话各20句。具体的数据格式可参照 ../datas/two_sentences_classifier/
29 | 数据样式。
30 |
31 | - **基本思路:以7句为例,输入特征input_ids: batch_size x 14 x len**
32 |
33 | [CLS] 你今天干嘛去了?[SEP]
34 | ...
35 | [CLS] 我想吃雪糕?[SEP] # 前面是A说的话7句话
36 | [CLS] 你今天干嘛去了?[SEP]
37 | ...
38 | [CLS] 我觉得还是不去的好。[SEP] # 这是B说的话7句话
39 |
40 |
41 | 一共14句话,但是注意,A说的7句话是出字某一本小说的随机7句,不一定是跟B说的话,AB可能出自不同小说的人物。为什么这么输入呢?是因为在后面模型前向传播的时候,我们会对这14句话
42 | 进行chunk,拆分成两份,维度分别为 batch_size x 7 x len x 768。然后根据《Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks》方法,采用mean pooling
43 | 策略。将维度变成batch_size x 7 x 768,然后再继续mena -> batch_size x 768。最后基于两个batch_size x 768按照框架图取绝对值,拼接操作。具体做法可参照../common_file/modeling.py的类TwoSentenceClassifier。
44 |
45 | - **训练**
46 |
47 | cd script
48 |
49 | sh two_sentences_classifier.sh
50 |
51 |
52 | **参数讲解**
53 |
54 |
55 | CUDA_VISIBLE_DEVICES=4,5,6,7 python ../two_sentences_classifier/train.py \
56 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \ # 词表文件,这里用的是roberta
57 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
58 | --do_lower_case \
59 | --train_file /nas/xd/data/novels/speech_labeled20/ \ # 训练文件目录
60 | --eval_file /nas/xd/data/novels/speech_labeled20/data.dev \ # 开发集文件
61 | --train_batch_size 32 \
62 | --eval_batch_size 8 \
63 | --learning_rate 5e-5 \
64 | --num_train_epochs 6 \
65 | --top_n 15 \ # 每天数据有20句,取了top_n句
66 | --num_labels 2 \ # 类别数目
67 | --reduce_dim 0 # 降维后的维度值,0表示不降维
68 | --output_dir ./two_sentences_classifier_large_model0506_15 \ # 模型保存位置
69 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
70 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
71 | --do_train \
72 | --gradient_accumulation_steps 4 3>&2 2>&1 1>&3 | tee logs/two_sentences_classifier_large_model0430_15.log
73 |
74 |
75 |
76 | - **结果:**
77 |
78 | | top_n模型 | f1 | 输入20句预测 |
79 | | ---- | ---- | ---- |
80 | | 7 | 84% | 86% |
81 | | 10 | 86% | 88% |
82 | | 15 | 90% | 92% |
83 |
84 | ## relation_classifier
85 |
86 | 人物关系分类项目:主要针对小说中的人物**对话**内容进行分类,目的是根据两个人(A,B)**对话**内容判断两段话是不是出自相同的两个人的对话。我们将说话内容的句数设置为超参数top_n,
87 | 比如7,10,15,20等等。而数据集中一共有对话各20**组**。具体的数据格式可参照 ../datas/relation_classifier/
88 |
89 | 数据样式。
90 |
91 | - **基本思路:以7句为例,输入特征input_ids: batch_size x 14 x len**
92 |
93 | [CLS] 你今天干嘛去了?[SEP]不打算干吗呀![SEP]
94 | ...
95 | [CLS] 我想吃雪糕?[SEP]雪糕对身体不好,要少吃。[SEP] # 前面是A:B的对话
96 | [CLS] 你今天干嘛去了?[SEP]我今天去外婆家吃饭了。[SEP]
97 | ...
98 | [CLS] 我觉得还是不去的好。[SEP]为什么啊?[SEP] # 这是AB,BA,AC或者BD或者...,的对话。如果为AB,BA,label为1,否则为0
99 |
100 | 相比TwoSentencesClassifier,在取绝对值cat,之后接了一个线性层进行降维。具体做法可参照../common_file/modeling.py的类RelationClassifier。
101 |
102 | - **训练**
103 |
104 | cd script
105 |
106 | sh relation_classifier.sh
107 |
108 | - **结果:**
109 |
110 | a) 降维768结果
111 | | top_n模型 | f1 | 输入20句预测 |
112 | | ---- | ---- | ---- |
113 | | 7 | 85% | 74%|
114 | | 10 | 90% | 85% |
115 | | 15 | 94% | 93% |
116 |
117 | b) 不降维结果
118 | | top_n模型 | f1 | 输入20句预测 |
119 | | ---- | ---- | ---- |
120 | | 7 | 81% | 82%|
121 | | 10 | - | - |
122 | | 15 | - | - |
123 |
124 | 分析:可以发现,在训练人物关系时,和训练任务分类有些许不同,就是加入了降维操作。从结果中看,降维是可以获得更好的分类效果的,但是当输入的句数和训练时用的句数不一致时,模型的精度会有偏差。这就是降维带来的负面效果。
125 | 因此,如果你预测和训练的输入句数不同时,建议采用不降维方式进行训练,这样获得的模型精度更好。
126 |
127 |
128 | ## scene_classifier
129 |
130 | 小说场景切换分类项目:主要针对小说中的**场景切换**内容进行识别,目的是根据两个人top_n句话,判断**中间**那句是否为场景切分的标识,数据中的场景切分有比如:"...","学校","张山家里"等等。top_n我们尝试了3,5,7,9等等。
131 | 具体的数据格式可参照 ../datas/scene_classifier/
132 |
133 | 数据样式。
134 |
135 | - **基本思路:以3和5句为例,输入特征input_ids: batch_size x len**。
136 |
137 | [CLS] 你今天干嘛去了?[SEP]学校里...[SEP]不打算干吗呀![SEP] # top_n = 3
138 | [CLS] 你今天干嘛去了?不打算干吗呀![SEP]学校里...[SEP]我想吃雪糕!雪糕对身体不好,要少吃。[SEP] # top_n = 5
139 |
140 |
141 | 采用正常的Bert句子分类。具体做法可参照../common_file/modeling.py的类BertForSequenceClassification。
142 |
143 | - **训练**
144 |
145 | cd script
146 |
147 | sh scene_classifier.sh
148 |
149 | - **结果:**
150 |
151 | | top_n模型 | f1 |
152 | | ---- | ---- |
153 | | 3 | 84% |
154 | | 5 | 85% |
155 | | 7 | 86% |
156 |
157 | #### 注意:不同的dropout对分类的结果有2%点左右的影响,本项目设置为0.3最佳。
158 |
159 |
160 | #### 获取句向量
161 |
162 | cd examples
163 | python embdeeings_examples.py
164 |
165 |
166 |
167 |
--------------------------------------------------------------------------------
/common_file/file_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | Utilities for working with the local dataset cache.
3 | This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
4 | Copyright by the AllenNLP authors.
5 | """
6 |
7 | import os
8 | import shutil
9 | import tempfile
10 | import json
11 | from urllib.parse import urlparse
12 | from pathlib import Path
13 | from typing import Optional, Tuple, Union, IO, Callable, Set
14 | from hashlib import sha256
15 | from functools import wraps
16 |
17 | from tqdm import tqdm
18 |
19 | import boto3
20 | from botocore.exceptions import ClientError
21 | import requests
22 | PYTORCH_PRETRAINED_BERT_CACHE = Path(
23 | os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
24 | Path.home() / '.pytorch_pretrained_bert'))
25 |
26 |
27 | def url_to_filename(url: str, etag: str = None) -> str:
28 | """
29 | Convert `url` into a hashed filename in a repeatable way.
30 | If `etag` is specified, append its hash to the url's, delimited
31 | by a period.
32 | """
33 | url_bytes = url.encode('utf-8')
34 | url_hash = sha256(url_bytes)
35 | filename = url_hash.hexdigest()
36 |
37 | if etag:
38 | etag_bytes = etag.encode('utf-8')
39 | etag_hash = sha256(etag_bytes)
40 | filename += '.' + etag_hash.hexdigest()
41 |
42 | return filename
43 |
44 |
45 | def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
46 | """
47 | Return the url and etag (which may be ``None``) stored for `filename`.
48 | Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
49 | """
50 | if cache_dir is None:
51 | cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
52 |
53 | cache_path = os.path.join(cache_dir, filename)
54 | if not os.path.exists(cache_path):
55 | raise FileNotFoundError("file {} not found".format(cache_path))
56 |
57 | meta_path = cache_path + '.json'
58 | if not os.path.exists(meta_path):
59 | raise FileNotFoundError("file {} not found".format(meta_path))
60 |
61 | with open(meta_path) as meta_file:
62 | metadata = json.load(meta_file)
63 | url = metadata['url']
64 | etag = metadata['etag']
65 |
66 | return url, etag
67 |
68 |
69 | def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
70 | """
71 | Given something that might be a URL (or might be a local path),
72 | determine which. If it's a URL, download the file and cache it, and
73 | return the path to the cached file. If it's already a local path,
74 | make sure the file exists and then return the path.
75 | """
76 | if cache_dir is None:
77 | cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
78 | if isinstance(url_or_filename, Path):
79 | url_or_filename = str(url_or_filename)
80 |
81 | parsed = urlparse(url_or_filename)
82 |
83 | if parsed.scheme in ('http', 'https', 's3'):
84 | # URL, so get it from the cache (downloading if necessary)
85 | # 从服务器下载,model,返回model的path
86 | return get_from_cache(url_or_filename, cache_dir)
87 | elif os.path.exists(url_or_filename):
88 | # File, and it exists.
89 | return url_or_filename
90 | elif parsed.scheme == '':
91 | # File, but it doesn't exist.
92 | raise FileNotFoundError("file {} not found".format(url_or_filename))
93 | else:
94 | # Something unknown
95 | raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
96 |
97 |
98 | def split_s3_path(url: str) -> Tuple[str, str]:
99 | """Split a full s3 path into the bucket name and path."""
100 | parsed = urlparse(url)
101 | if not parsed.netloc or not parsed.path:
102 | raise ValueError("bad s3 path {}".format(url))
103 | bucket_name = parsed.netloc
104 | s3_path = parsed.path
105 | # Remove '/' at beginning of path.
106 | if s3_path.startswith("/"):
107 | s3_path = s3_path[1:]
108 | return bucket_name, s3_path
109 |
110 |
111 | def s3_request(func: Callable):
112 | """
113 | Wrapper function for s3 requests in order to create more helpful error
114 | messages.
115 | """
116 |
117 | @wraps(func)
118 | def wrapper(url: str, *args, **kwargs):
119 | try:
120 | return func(url, *args, **kwargs)
121 | except ClientError as exc:
122 | if int(exc.response["Error"]["Code"]) == 404:
123 | raise FileNotFoundError("file {} not found".format(url))
124 | else:
125 | raise
126 |
127 | return wrapper
128 |
129 |
130 | @s3_request
131 | def s3_etag(url: str) -> Optional[str]:
132 | """Check ETag on S3 object."""
133 | s3_resource = boto3.resource("s3")
134 | bucket_name, s3_path = split_s3_path(url)
135 | s3_object = s3_resource.Object(bucket_name, s3_path)
136 | return s3_object.e_tag
137 |
138 |
139 | @s3_request
140 | def s3_get(url: str, temp_file: IO) -> None:
141 | """Pull a file directly from S3."""
142 | s3_resource = boto3.resource("s3")
143 | bucket_name, s3_path = split_s3_path(url)
144 | s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
145 |
146 |
147 | def http_get(url: str, temp_file: IO) -> None:
148 | req = requests.get(url, stream=True)
149 | content_length = req.headers.get('Content-Length')
150 | total = int(content_length) if content_length is not None else None
151 | progress = tqdm(unit="B", total=total)
152 | for chunk in req.iter_content(chunk_size=1024):
153 | if chunk: # filter out keep-alive new chunks
154 | progress.update(len(chunk))
155 | temp_file.write(chunk)
156 | progress.close()
157 |
158 |
159 | def get_from_cache(url: str, cache_dir: str = None) -> str:
160 | """
161 | Given a URL, look for the corresponding dataset in the local cache.
162 | If it's not there, download it. Then return the path to the cached file.
163 | """
164 | if cache_dir is None:
165 | cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
166 |
167 | os.makedirs(cache_dir, exist_ok=True)
168 |
169 | # Get eTag to add to filename, if it exists.
170 | if url.startswith("s3://"):
171 | etag = s3_etag(url)
172 | else:
173 | response = requests.head(url, allow_redirects=True)
174 | if response.status_code != 200:
175 | raise IOError("HEAD request failed for url {} with status code {}".format(
176 | url, response.status_code))
177 | etag = response.headers.get("ETag")
178 | print("| etag is {}".format(etag))
179 |
180 | filename = url_to_filename(url, etag)
181 |
182 | # get cache path to put the file
183 | cache_path = os.path.join(cache_dir, filename)
184 |
185 | if not os.path.exists(cache_path):
186 | # Download to temporary file, then copy to cache dir once finished.
187 | # Otherwise you get corrupt cache entries if the download gets interrupted.
188 | with tempfile.NamedTemporaryFile() as temp_file:
189 | print("%s not found in cache, downloading to %s", url, temp_file.name)
190 |
191 | # GET file object
192 | if url.startswith("s3://"):
193 | s3_get(url, temp_file)
194 | else:
195 | http_get(url, temp_file)
196 |
197 | # we are copying the file before closing it, so flush to avoid truncation
198 | temp_file.flush()
199 | # shutil.copyfileobj() starts at the current position, so go to the start
200 | temp_file.seek(0)
201 |
202 | print("copying %s to cache at %s", temp_file.name, cache_path)
203 | with open(cache_path, 'wb') as cache_file:
204 | shutil.copyfileobj(temp_file, cache_file)
205 |
206 | print("creating metadata file for %s", cache_path)
207 | meta = {'url': url, 'etag': etag}
208 | meta_path = cache_path + '.json'
209 | with open(meta_path, 'w') as meta_file:
210 | json.dump(meta, meta_file)
211 |
212 | print("removing temp file %s", temp_file.name)
213 |
214 | return cache_path
215 |
216 |
217 | def read_set_from_file(filename: str) -> Set[str]:
218 | '''
219 | Extract a de-duped collection (set) of text from a file.
220 | Expected file format is one item per line.
221 | '''
222 | collection = set()
223 | with open(filename, 'r') as file_:
224 | for line in file_:
225 | collection.add(line.rstrip())
226 | return collection
227 |
228 |
229 | def get_file_extension(path: str, dot=True, lower: bool = True):
230 | ext = os.path.splitext(path)[1]
231 | ext = ext if dot else ext[1:]
232 | return ext.lower() if lower else ext
233 |
--------------------------------------------------------------------------------
/common_file/optimization.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """PyTorch optimization for BERT model."""
16 |
17 | import math
18 | import torch
19 | from torch.optim import Optimizer
20 | from torch.optim.optimizer import required
21 | from torch.nn.utils import clip_grad_norm_
22 | import logging
23 | import abc
24 | import sys
25 |
26 | logger = logging.getLogger(__name__)
27 |
28 | if sys.version_info >= (3, 4):
29 | ABC = abc.ABC
30 | else:
31 | ABC = abc.ABCMeta('ABC', (), {})
32 |
33 |
34 | class _LRSchedule(ABC):
35 | """ Parent of all LRSchedules here. """
36 | warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
37 |
38 | def __init__(self, warmup=0.002, t_total=-1, **kw):
39 | """
40 | :param warmup: what fraction of t_total steps will be used for linear warmup
41 | :param t_total: how many training steps (updates) are planned
42 | :param kw:
43 | """
44 | super(_LRSchedule, self).__init__(**kw)
45 | if t_total < 0:
46 | logger.warning(
47 | "t_total value of {} results in schedule not being applied".format(t_total))
48 | if not 0.0 <= warmup < 1.0 and not warmup == -1:
49 | raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
50 | warmup = max(warmup, 0.)
51 | self.warmup, self.t_total = float(warmup), float(t_total)
52 | self.warned_for_t_total_at_progress = -1
53 |
54 | def get_lr(self, step, nowarn=False):
55 | """
56 | :param step: which of t_total steps we're on
57 | :param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
58 | :return: learning rate multiplier for current update
59 | """
60 | if self.t_total < 0:
61 | return 1.
62 | # print(f'step={step}, t_total={self.t_total}')
63 | progress = float(step) / self.t_total
64 | ret = self.get_lr_(progress)
65 | # warning for exceeding t_total (only active with warmup_linear
66 | if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
67 | logger.warning(
68 | "Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
69 | .format(ret, self.__class__.__name__))
70 | self.warned_for_t_total_at_progress = progress
71 | # end warning
72 | return ret
73 |
74 | @abc.abstractmethod
75 | def get_lr_(self, progress):
76 | """
77 | :param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
78 | :return: learning rate multiplier for current update
79 | """
80 | return 1.
81 |
82 |
83 | class ConstantLR(_LRSchedule):
84 |
85 | def get_lr_(self, progress):
86 | return 1.
87 |
88 |
89 | class WarmupCosineSchedule(_LRSchedule):
90 | """
91 | Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
92 | Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
93 | If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
94 | """
95 | warn_t_total = True
96 |
97 | def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
98 | """
99 | :param warmup: see LRSchedule
100 | :param t_total: see LRSchedule
101 | :param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
102 | :param kw:
103 | """
104 | super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
105 | self.cycles = cycles
106 |
107 | def get_lr_(self, progress):
108 | if progress < self.warmup:
109 | return progress / self.warmup
110 | else:
111 | progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
112 | return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
113 |
114 |
115 | class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
116 | """
117 | Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
118 | If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
119 | learning rate (with hard restarts).
120 | """
121 |
122 | def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
123 | super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup,
124 | t_total=t_total,
125 | cycles=cycles,
126 | **kw)
127 | assert (cycles >= 1.)
128 |
129 | def get_lr_(self, progress):
130 | if progress < self.warmup:
131 | return progress / self.warmup
132 | else:
133 | progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
134 | ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
135 | return ret
136 |
137 |
138 | class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
139 | """
140 | All training progress is divided in `cycles` (default=1.) parts of equal length.
141 | Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
142 | followed by a learning rate decreasing from 1. to 0. following a cosine curve.
143 | """
144 |
145 | def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
146 | assert (warmup * cycles < 1.)
147 | warmup = warmup * cycles if warmup >= 0 else warmup
148 | super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup,
149 | t_total=t_total,
150 | cycles=cycles,
151 | **kw)
152 |
153 | def get_lr_(self, progress):
154 | progress = progress * self.cycles % 1.
155 | if progress < self.warmup:
156 | return progress / self.warmup
157 | else:
158 | progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
159 | ret = 0.5 * (1. + math.cos(math.pi * progress))
160 | return ret
161 |
162 |
163 | class WarmupConstantSchedule(_LRSchedule):
164 | """
165 | Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
166 | Keeps learning rate equal to 1. after warmup.
167 | """
168 |
169 | def get_lr_(self, progress):
170 | if progress < self.warmup:
171 | return progress / self.warmup
172 | return 1.
173 |
174 |
175 | class WarmupLinearSchedule(_LRSchedule):
176 | """
177 | Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
178 | Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
179 | """
180 | warn_t_total = True
181 |
182 | def get_lr_(self, progress):
183 | if progress < self.warmup:
184 | return progress / self.warmup
185 | return max((progress - 1.) / (self.warmup - 1.), 0.)
186 |
187 |
188 | SCHEDULES = {
189 | None: ConstantLR,
190 | "none": ConstantLR,
191 | "warmup_cosine": WarmupCosineSchedule,
192 | "warmup_constant": WarmupConstantSchedule,
193 | "warmup_linear": WarmupLinearSchedule
194 | }
195 |
196 |
197 | class BertAdam(Optimizer):
198 | """Implements BERT version of Adam algorithm with weight decay fix.
199 | Params:
200 | lr: learning rate
201 | warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
202 | t_total: total number of training steps for the learning
203 | rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
204 | schedule: schedule to use for the warmup (see above).
205 | Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
206 | If `None` or `'none'`, learning rate is always kept constant.
207 | Default : `'warmup_linear'`
208 | b1: Adams b1. Default: 0.9
209 | b2: Adams b2. Default: 0.999
210 | e: Adams epsilon. Default: 1e-6
211 | weight_decay: Weight decay. Default: 0.01
212 | max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
213 | """
214 |
215 | def __init__(self,
216 | params,
217 | lr=required,
218 | warmup=-1,
219 | t_total=-1,
220 | schedule='warmup_linear',
221 | b1=0.9,
222 | b2=0.999,
223 | e=1e-6,
224 | weight_decay=0.01,
225 | max_grad_norm=1.0,
226 | **kwargs):
227 | # print(f'lr = {lr}')
228 | if lr is not required and lr < 0.0:
229 | raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
230 | if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
231 | raise ValueError("Invalid schedule parameter: {}".format(schedule))
232 | if not 0.0 <= b1 < 1.0:
233 | raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
234 | if not 0.0 <= b2 < 1.0:
235 | raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
236 | if not e >= 0.0:
237 | raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
238 | # initialize schedule object
239 | if not isinstance(schedule, _LRSchedule):
240 | schedule_type = SCHEDULES[schedule]
241 | schedule = schedule_type(warmup=warmup, t_total=t_total)
242 | else:
243 | if warmup != -1 or t_total != -1:
244 | logger.warning(
245 | "warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
246 | "Please specify custom warmup and t_total in _LRSchedule object.")
247 | defaults = dict(lr=lr,
248 | schedule=schedule,
249 | b1=b1,
250 | b2=b2,
251 | e=e,
252 | weight_decay=weight_decay,
253 | max_grad_norm=max_grad_norm)
254 | super(BertAdam, self).__init__(params, defaults)
255 |
256 | def get_lr(self):
257 | lr = []
258 | for group in self.param_groups:
259 | for p in group['params']:
260 | state = self.state[p]
261 | if len(state) == 0:
262 | return [0]
263 | lr_scheduled = group['lr']
264 | lr_scheduled *= group['schedule'].get_lr(state['step'])
265 | lr.append(lr_scheduled)
266 | return lr
267 |
268 | def step(self, closure=None):
269 | """Performs a single optimization step.
270 |
271 | Arguments:
272 | closure (callable, optional): A closure that reevaluates the model
273 | and returns the loss.
274 | """
275 | loss = None
276 | if closure is not None:
277 | loss = closure()
278 |
279 | for group in self.param_groups:
280 | for p in group['params']:
281 | if p.grad is None:
282 | continue
283 | grad = p.grad.data
284 | if grad.is_sparse:
285 | raise RuntimeError(
286 | 'Adam does not support sparse gradients, please consider SparseAdam instead'
287 | )
288 |
289 | state = self.state[p]
290 |
291 | # State initialization
292 | if len(state) == 0:
293 | state['step'] = 0
294 | # Exponential moving average of gradient values
295 | state['next_m'] = torch.zeros_like(p.data)
296 | # Exponential moving average of squared gradient values
297 | state['next_v'] = torch.zeros_like(p.data)
298 |
299 | next_m, next_v = state['next_m'], state['next_v']
300 | beta1, beta2 = group['b1'], group['b2']
301 |
302 | # Add grad clipping
303 | if group['max_grad_norm'] > 0:
304 | clip_grad_norm_(p, group['max_grad_norm'])
305 |
306 | # Decay the first and second moment running average coefficient
307 | # In-place operations to update the averages at the same time
308 | next_m.mul_(beta1).add_(1 - beta1, grad)
309 | next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
310 | update = next_m / (next_v.sqrt() + group['e'])
311 |
312 | # Just adding the square of the weights to the loss function is *not*
313 | # the correct way of using L2 regularization/weight decay with Adam,
314 | # since that will interact with the m and v parameters in strange ways.
315 | #
316 | # Instead we want to decay the weights in a manner that doesn't interact
317 | # with the m/v parameters. This is equivalent to adding the square
318 | # of the weights to the loss with plain (non-momentum) SGD.
319 | if group['weight_decay'] > 0.0:
320 | update += group['weight_decay'] * p.data
321 |
322 | lr_scheduled = group['lr']
323 | # print(f'lr_scheduled0={lr_scheduled}')
324 | lr_scheduled *= group['schedule'].get_lr(state['step'])
325 | # print(f'lr_scheduled1={lr_scheduled}') # 0
326 |
327 | update_with_lr = lr_scheduled * update
328 | # print(f'lr_scheduled2={lr_scheduled}')
329 |
330 | p.data.add_(-update_with_lr)
331 |
332 | state['step'] += 1
333 |
334 | # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
335 | # No bias correction
336 | # bias_correction1 = 1 - beta1 ** state['step']
337 | # bias_correction2 = 1 - beta2 ** state['step']
338 |
339 | return loss
340 |
341 |
342 | class AdamW(Optimizer):
343 | """ Implements Adam algorithm with weight decay fix.
344 |
345 | Parameters:
346 | lr (float): learning rate. Default 1e-3.
347 | betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
348 | eps (float): Adams epsilon. Default: 1e-6
349 | weight_decay (float): Weight decay. Default: 0.0
350 | correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
351 | """
352 |
353 | def __init__(self,
354 | params,
355 | lr=1e-3,
356 | betas=(0.9, 0.999),
357 | eps=1e-6,
358 | weight_decay=0.0,
359 | correct_bias=True):
360 | if lr < 0.0:
361 | raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
362 | if not 0.0 <= betas[0] < 1.0:
363 | raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(
364 | betas[0]))
365 | if not 0.0 <= betas[1] < 1.0:
366 | raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(
367 | betas[1]))
368 | if not 0.0 <= eps:
369 | raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
370 | defaults = dict(lr=lr,
371 | betas=betas,
372 | eps=eps,
373 | weight_decay=weight_decay,
374 | correct_bias=correct_bias)
375 | super().__init__(params, defaults)
376 |
377 | def step(self, closure=None):
378 | """Performs a single optimization step.
379 |
380 | Arguments:
381 | closure (callable, optional): A closure that reevaluates the model
382 | and returns the loss.
383 | """
384 | loss = None
385 | if closure is not None:
386 | loss = closure()
387 |
388 | for group in self.param_groups:
389 | for p in group["params"]:
390 | if p.grad is None:
391 | continue
392 | grad = p.grad.data
393 | if grad.is_sparse:
394 | raise RuntimeError(
395 | "Adam does not support sparse gradients, please consider SparseAdam instead"
396 | )
397 |
398 | state = self.state[p]
399 |
400 | # State initialization
401 | if len(state) == 0:
402 | state["step"] = 0
403 | # Exponential moving average of gradient values
404 | state["exp_avg"] = torch.zeros_like(p.data)
405 | # Exponential moving average of squared gradient values
406 | state["exp_avg_sq"] = torch.zeros_like(p.data)
407 |
408 | exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
409 | beta1, beta2 = group["betas"]
410 |
411 | state["step"] += 1
412 |
413 | # Decay the first and second moment running average coefficient
414 | # In-place operations to update the averages at the same time
415 | exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
416 | exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
417 | denom = exp_avg_sq.sqrt().add_(group["eps"])
418 |
419 | step_size = group["lr"]
420 | if group["correct_bias"]: # No bias correction for Bert
421 | bias_correction1 = 1.0 - beta1**state["step"]
422 | bias_correction2 = 1.0 - beta2**state["step"]
423 | step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
424 |
425 | p.data.addcdiv_(exp_avg, denom, value=-step_size)
426 |
427 | # Just adding the square of the weights to the loss function is *not*
428 | # the correct way of using L2 regularization/weight decay with Adam,
429 | # since that will interact with the m and v parameters in strange ways.
430 | #
431 | # Instead we want to decay the weights in a manner that doesn't interact
432 | # with the m/v parameters. This is equivalent to adding the square
433 | # of the weights to the loss with plain (non-momentum) SGD.
434 | # Add weight decay at the end (fixed version)
435 | if group["weight_decay"] > 0.0:
436 | p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
437 |
438 | return loss
439 |
--------------------------------------------------------------------------------
/common_file/parallel.py:
--------------------------------------------------------------------------------
1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 | ## Created by: Hang Zhang, Rutgers University, Email: zhang.hang@rutgers.edu
3 | ## Modified by Thomas Wolf, HuggingFace Inc., Email: thomas@huggingface.co
4 | ## Copyright (c) 2017-2018
5 | ##
6 | ## This source code is licensed under the MIT-style license found in the
7 | ## LICENSE file in the root directory of this source tree
8 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
9 | """Encoding Data Parallel"""
10 | import torch
11 | from torch.nn.parallel.data_parallel import DataParallel
12 | from torch.nn.parallel.parallel_apply import parallel_apply
13 | from torch.nn.parallel._functions import Scatter
14 |
15 |
16 | def scatter(inputs, target_gpus, chunk_sizes, dim=0):
17 | r"""
18 | Slices tensors into approximately equal chunks and
19 | distributes them across given GPUs. Duplicates
20 | references to objects that are not tensors.
21 | """
22 |
23 | def scatter_map(obj):
24 | if isinstance(obj, torch.Tensor):
25 | try:
26 | return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
27 | except Exception:
28 | print('obj', obj.size())
29 | print('dim', dim)
30 | print('chunk_sizes', chunk_sizes)
31 | quit()
32 | if isinstance(obj, tuple) and len(obj) > 0:
33 | return list(zip(*map(scatter_map, obj)))
34 | if isinstance(obj, list) and len(obj) > 0:
35 | return list(map(list, zip(*map(scatter_map, obj))))
36 | if isinstance(obj, dict) and len(obj) > 0:
37 | return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
38 | return [obj for targets in target_gpus]
39 |
40 | # After scatter_map is called, a scatter_map cell will exist. This cell
41 | # has a reference to the actual function scatter_map, which has references
42 | # to a closure that has a reference to the scatter_map cell (because the
43 | # fn is recursive). To avoid this reference cycle, we set the function to
44 | # None, clearing the cell
45 | try:
46 | return scatter_map(inputs)
47 | finally:
48 | scatter_map = None
49 |
50 |
51 | def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
52 | """Scatter with support for kwargs dictionary"""
53 | inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
54 | kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
55 | if len(inputs) < len(kwargs):
56 | inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
57 | elif len(kwargs) < len(inputs):
58 | kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
59 | inputs = tuple(inputs)
60 | kwargs = tuple(kwargs)
61 | return inputs, kwargs
62 |
63 |
64 | class BalancedDataParallel(DataParallel):
65 |
66 | def __init__(self, gpu0_bsz, *args, **kwargs):
67 | self.gpu0_bsz = gpu0_bsz
68 | super().__init__(*args, **kwargs)
69 |
70 | def forward(self, *inputs, **kwargs):
71 | if not self.device_ids:
72 | return self.module(*inputs, **kwargs)
73 | if self.gpu0_bsz == 0:
74 | device_ids = self.device_ids[1:]
75 | else:
76 | device_ids = self.device_ids
77 | inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
78 | if len(self.device_ids) == 1:
79 | return self.module(*inputs[0], **kwargs[0])
80 | replicas = self.replicate(self.module, self.device_ids)
81 | if self.gpu0_bsz == 0:
82 | replicas = replicas[1:]
83 | outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
84 | return self.gather(outputs, self.output_device)
85 |
86 | def parallel_apply(self, replicas, device_ids, inputs, kwargs):
87 | return parallel_apply(replicas, inputs, kwargs, device_ids)
88 |
89 | def scatter(self, inputs, kwargs, device_ids):
90 | bsz = inputs[0].size(self.dim)
91 | num_dev = len(self.device_ids)
92 | gpu0_bsz = self.gpu0_bsz
93 | bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
94 | if gpu0_bsz < bsz_unit:
95 | chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
96 | delta = bsz - sum(chunk_sizes)
97 | for i in range(delta):
98 | chunk_sizes[i + 1] += 1
99 | if gpu0_bsz == 0:
100 | chunk_sizes = chunk_sizes[1:]
101 | else:
102 | return super().scatter(inputs, kwargs, device_ids)
103 | return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
104 |
--------------------------------------------------------------------------------
/common_file/tokenization.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Tokenization classes."""
16 |
17 | from __future__ import absolute_import, division, print_function, unicode_literals
18 |
19 | import collections
20 | import logging
21 | import os
22 | import unicodedata
23 | from io import open
24 |
25 | from file_utils import cached_path
26 |
27 | logger = logging.getLogger(__name__)
28 |
29 | PRETRAINED_VOCAB_ARCHIVE_MAP = {
30 | 'bert-base-uncased':
31 | "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
32 | 'bert-large-uncased':
33 | "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
34 | 'bert-base-cased':
35 | "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
36 | 'bert-large-cased':
37 | "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
38 | 'bert-base-multilingual-uncased':
39 | "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
40 | 'bert-base-multilingual-cased':
41 | "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
42 | 'bert-base-chinese':
43 | "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
44 | }
45 | PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
46 | 'bert-base-uncased': 512,
47 | 'bert-large-uncased': 512,
48 | 'bert-base-cased': 512,
49 | 'bert-large-cased': 512,
50 | 'bert-base-multilingual-uncased': 512,
51 | 'bert-base-multilingual-cased': 512,
52 | 'bert-base-chinese': 512,
53 | }
54 | VOCAB_NAME = 'vocab.txt'
55 |
56 |
57 | def load_vocab(vocab_file):
58 | """Loads a vocabulary file into a dictionary."""
59 | vocab = collections.OrderedDict()
60 | index = 0
61 | with open(vocab_file, "r", encoding="utf-8") as reader:
62 | while True:
63 | token = reader.readline()
64 | if not token:
65 | break
66 | token = token.strip()
67 | vocab[token] = index
68 | index += 1
69 | return vocab
70 |
71 |
72 | def whitespace_tokenize(text):
73 | """Runs basic whitespace cleaning and splitting on a piece of text."""
74 | text = text.strip()
75 | if not text:
76 | return []
77 | tokens = text.split()
78 | return tokens
79 |
80 |
81 | class FullTokenizer(object):
82 | """Runs end-to-end tokenziation."""
83 |
84 | def __init__(self, vocab_file, do_lower_case=True):
85 | self.vocab = load_vocab(vocab_file)
86 | self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
87 | self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
88 |
89 | def tokenize(self, text):
90 | split_tokens = []
91 | for token in self.basic_tokenizer.tokenize(text):
92 | for sub_token in self.wordpiece_tokenizer.tokenize(token):
93 | split_tokens.append(sub_token)
94 |
95 | return split_tokens
96 |
97 | def convert_tokens_to_ids(self, tokens):
98 | """Converts a sequence of tokens into ids using the vocab."""
99 | ids = []
100 | for token in tokens:
101 | ids.append(self.vocab[token])
102 | return ids
103 |
104 | def save_vocabulary(self, vocab_path):
105 | """Save the tokenizer vocabulary to a directory or file."""
106 | index = 0
107 | if os.path.isdir(vocab_path):
108 | vocab_file = os.path.join(vocab_path, VOCAB_NAME)
109 | with open(vocab_file, "w", encoding="utf-8") as writer:
110 | for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
111 | if index != token_index:
112 | logger.warning(
113 | "Saving vocabulary to {}: vocabulary indices are not consecutive."
114 | " Please check that the vocabulary is not corrupted!".format(vocab_file))
115 | index = token_index
116 | writer.write(token + u'\n')
117 | index += 1
118 | return vocab_file
119 |
120 |
121 | class BertTokenizer(object):
122 | """Runs end-to-end tokenization: punctuation splitting + wordpiece"""
123 |
124 | def __init__(self,
125 | vocab_file,
126 | do_lower_case=True,
127 | max_len=None,
128 | do_basic_tokenize=True,
129 | never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
130 | """Constructs a BertTokenizer.
131 |
132 | Args:
133 | vocab_file: Path to a one-wordpiece-per-line vocabulary file
134 | do_lower_case: Whether to lower case the input
135 | Only has an effect when do_wordpiece_only=False
136 | do_basic_tokenize: Whether to do basic tokenization before wordpiece.
137 | max_len: An artificial maximum length to truncate tokenized sequences to;
138 | Effective maximum length is always the minimum of this
139 | value (if specified) and the underlying BERT model's
140 | sequence length.
141 | never_split: List of tokens which will never be split during tokenization.
142 | Only has an effect when do_wordpiece_only=False
143 | """
144 | if not os.path.isfile(vocab_file):
145 | raise ValueError(
146 | "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
147 | "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".
148 | format(vocab_file))
149 | self.vocab = load_vocab(vocab_file)
150 | self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
151 | self.do_basic_tokenize = do_basic_tokenize
152 | if do_basic_tokenize:
153 | self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
154 | never_split=never_split)
155 | self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
156 | self.max_len = max_len if max_len is not None else int(1e12)
157 |
158 | def tokenize(self, text):
159 | split_tokens = []
160 | if self.do_basic_tokenize:
161 | for token in self.basic_tokenizer.tokenize(text):
162 | for sub_token in self.wordpiece_tokenizer.tokenize(token):
163 | split_tokens.append(sub_token)
164 | else:
165 | split_tokens = self.wordpiece_tokenizer.tokenize(text)
166 | return split_tokens
167 |
168 | def convert_tokens_to_ids(self, tokens):
169 | """Converts a sequence of tokens into ids using the vocab."""
170 | ids = []
171 | for token in tokens:
172 | ids.append(self.vocab[token])
173 | if len(ids) > self.max_len:
174 | logger.warning("Token indices sequence length is longer than the specified maximum "
175 | " sequence length for this BERT model ({} > {}). Running this"
176 | " sequence through BERT will result in indexing errors".format(
177 | len(ids), self.max_len))
178 | return ids
179 |
180 | def convert_ids_to_tokens(self, ids):
181 | """Converts a sequence of ids in wordpiece tokens using the vocab."""
182 | tokens = []
183 | for i in ids:
184 | tokens.append(self.ids_to_tokens[i])
185 | return tokens
186 |
187 | def save_vocabulary(self, vocab_path):
188 | """Save the tokenizer vocabulary to a directory or file."""
189 | index = 0
190 | if os.path.isdir(vocab_path):
191 | vocab_file = os.path.join(vocab_path, VOCAB_NAME)
192 | with open(vocab_file, "w", encoding="utf-8") as writer:
193 | for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
194 | if index != token_index:
195 | logger.warning(
196 | "Saving vocabulary to {}: vocabulary indices are not consecutive."
197 | " Please check that the vocabulary is not corrupted!".format(vocab_file))
198 | index = token_index
199 | writer.write(token + u'\n')
200 | index += 1
201 | return vocab_file
202 |
203 | @classmethod
204 | def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
205 | """
206 | Instantiate a PreTrainedBertModel from a pre-trained model file.
207 | Download and cache the pre-trained model file if needed.
208 | """
209 | if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
210 | vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
211 | if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
212 | logger.warning(
213 | "The pre-trained model you are loading is a cased model but you have not set "
214 | "`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
215 | "you may want to check this behavior.")
216 | kwargs['do_lower_case'] = False
217 | elif '-cased' not in pretrained_model_name_or_path and not kwargs.get(
218 | 'do_lower_case', True):
219 | logger.warning(
220 | "The pre-trained model you are loading is an uncased model but you have set "
221 | "`do_lower_case` to False. We are setting `do_lower_case=True` for you "
222 | "but you may want to check this behavior.")
223 | kwargs['do_lower_case'] = True
224 | else:
225 | vocab_file = pretrained_model_name_or_path
226 | if os.path.isdir(vocab_file):
227 | vocab_file = os.path.join(vocab_file, VOCAB_NAME)
228 | # redirect to the cache, if necessary
229 | try:
230 | resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
231 | except EnvironmentError:
232 | logger.error("Model name '{}' was not found in model name list ({}). "
233 | "We assumed '{}' was a path or url but couldn't find any file "
234 | "associated to this path or url.".format(
235 | pretrained_model_name_or_path,
236 | ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file))
237 | return None
238 | if resolved_vocab_file == vocab_file:
239 | logger.info("loading vocabulary file {}".format(vocab_file))
240 | else:
241 | logger.info("loading vocabulary file {} from cache at {}".format(
242 | vocab_file, resolved_vocab_file))
243 | if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
244 | # if we're using a pretrained model, ensure the tokenizer wont index sequences longer
245 | # than the number of positional embeddings
246 | max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
247 | kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
248 | # Instantiate tokenizer.
249 | tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
250 | return tokenizer
251 |
252 |
253 | class BasicTokenizer(object):
254 | """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
255 |
256 | def __init__(self,
257 | do_lower_case=True,
258 | never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
259 | """Constructs a BasicTokenizer.
260 |
261 | Args:
262 | do_lower_case: Whether to lower case the input.
263 | """
264 | self.do_lower_case = do_lower_case
265 | self.never_split = never_split
266 |
267 | def tokenize(self, text):
268 | """Tokenizes a piece of text."""
269 | text = self._clean_text(text)
270 | # This was added on November 1st, 2018 for the multilingual and Chinese
271 | # models. This is also applied to the English models now, but it doesn't
272 | # matter since the English models were not trained on any Chinese data
273 | # and generally don't have any Chinese data in them (there are Chinese
274 | # characters in the vocabulary because Wikipedia does have some Chinese
275 | # words in the English Wikipedia.).
276 | text = self._tokenize_chinese_chars(text)
277 | orig_tokens = whitespace_tokenize(text)
278 | split_tokens = []
279 | for token in orig_tokens:
280 | if self.do_lower_case and token not in self.never_split:
281 | token = token.lower()
282 | token = self._run_strip_accents(token)
283 | split_tokens.extend(self._run_split_on_punc(token))
284 |
285 | output_tokens = whitespace_tokenize(" ".join(split_tokens))
286 | return output_tokens
287 |
288 | def _run_strip_accents(self, text):
289 | """Strips accents from a piece of text."""
290 | text = unicodedata.normalize("NFD", text)
291 | output = []
292 | for char in text:
293 | cat = unicodedata.category(char)
294 | if cat == "Mn":
295 | continue
296 | output.append(char)
297 | return "".join(output)
298 |
299 | def _run_split_on_punc(self, text):
300 | """Splits punctuation on a piece of text."""
301 | if text in self.never_split:
302 | return [text]
303 | chars = list(text)
304 | i = 0
305 | start_new_word = True
306 | output = []
307 | while i < len(chars):
308 | char = chars[i]
309 | if _is_punctuation(char):
310 | output.append([char])
311 | start_new_word = True
312 | else:
313 | if start_new_word:
314 | output.append([])
315 | start_new_word = False
316 | output[-1].append(char)
317 | i += 1
318 |
319 | return ["".join(x) for x in output]
320 |
321 | def _tokenize_chinese_chars(self, text):
322 | """Adds whitespace around any CJK character."""
323 | output = []
324 | for char in text:
325 | cp = ord(char)
326 | if self._is_chinese_char(cp):
327 | output.append(" ")
328 | output.append(char)
329 | output.append(" ")
330 | else:
331 | output.append(char)
332 | return "".join(output)
333 |
334 | def _is_chinese_char(self, cp):
335 | """Checks whether CP is the codepoint of a CJK character."""
336 | # This defines a "chinese character" as anything in the CJK Unicode block:
337 | # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
338 | #
339 | # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
340 | # despite its name. The modern Korean Hangul alphabet is a different block,
341 | # as is Japanese Hiragana and Katakana. Those alphabets are used to write
342 | # space-separated words, so they are not treated specially and handled
343 | # like the all of the other languages.
344 | if ((cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF)
345 | or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F)
346 | or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF)
347 | or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F)):
348 | return True
349 |
350 | return False
351 |
352 | def _clean_text(self, text):
353 | """Performs invalid character removal and whitespace cleanup on text."""
354 | output = []
355 | for char in text:
356 | cp = ord(char)
357 | if cp == 0 or cp == 0xfffd or _is_control(char):
358 | continue
359 | if _is_whitespace(char):
360 | output.append(" ")
361 | else:
362 | output.append(char)
363 | return "".join(output)
364 |
365 |
366 | class WordpieceTokenizer(object):
367 | """Runs WordPiece tokenization."""
368 |
369 | def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
370 | self.vocab = vocab
371 | self.unk_token = unk_token
372 | self.max_input_chars_per_word = max_input_chars_per_word
373 |
374 | def tokenize(self, text):
375 | """Tokenizes a piece of text into its word pieces.
376 |
377 | This uses a greedy longest-match-first algorithm to perform tokenization
378 | using the given vocabulary.
379 |
380 | For example:
381 | input = "unaffable"
382 | output = ["un", "##aff", "##able"]
383 |
384 | Args:
385 | text: A single token or whitespace separated tokens. This should have
386 | already been passed through `BasicTokenizer`.
387 |
388 | Returns:
389 | A list of wordpiece tokens.
390 | """
391 |
392 | output_tokens = []
393 | for token in whitespace_tokenize(text):
394 | chars = list(token)
395 | if len(chars) > self.max_input_chars_per_word:
396 | output_tokens.append(self.unk_token)
397 | continue
398 |
399 | is_bad = False
400 | start = 0
401 | sub_tokens = []
402 | while start < len(chars):
403 | end = len(chars)
404 | cur_substr = None
405 | while start < end:
406 | substr = "".join(chars[start:end])
407 | if start > 0:
408 | substr = "##" + substr
409 | if substr in self.vocab:
410 | cur_substr = substr
411 | break
412 | end -= 1
413 | if cur_substr is None:
414 | is_bad = True
415 | break
416 | sub_tokens.append(cur_substr)
417 | start = end
418 |
419 | if is_bad:
420 | output_tokens.append(self.unk_token)
421 | else:
422 | output_tokens.extend(sub_tokens)
423 | return output_tokens
424 |
425 |
426 | def _is_whitespace(char):
427 | """Checks whether `chars` is a whitespace character."""
428 | # \t, \n, and \r are technically contorl characters but we treat them
429 | # as whitespace since they are generally considered as such.
430 | if char == " " or char == "\t" or char == "\n" or char == "\r":
431 | return True
432 | cat = unicodedata.category(char)
433 | if cat == "Zs":
434 | return True
435 | return False
436 |
437 |
438 | def _is_control(char):
439 | """Checks whether `chars` is a control character."""
440 | # These are technically control characters but we count them as whitespace
441 | # characters.
442 | if char == "\t" or char == "\n" or char == "\r":
443 | return False
444 | cat = unicodedata.category(char)
445 | if cat.startswith("C"):
446 | return True
447 | return False
448 |
449 |
450 | def _is_punctuation(char):
451 | """Checks whether `chars` is a punctuation character."""
452 | cp = ord(char)
453 | # We treat all non-letter/number ASCII as punctuation.
454 | # Characters such as "^", "$", and "`" are not in the Unicode
455 | # Punctuation class but we treat them as punctuation anyways, for
456 | # consistency.
457 | if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96)
458 | or (cp >= 123 and cp <= 126)):
459 | return True
460 | cat = unicodedata.category(char)
461 | if cat.startswith("P"):
462 | return True
463 | return False
464 |
--------------------------------------------------------------------------------
/datas/relation_data/small.dev.txt:
--------------------------------------------------------------------------------
1 | “哟,原来是南野兄弟回来了真是有失远迎啊!”“嘿嘿……好久不见啊。你的小日子倒是过得滋润啊。对了,你家的女秘书长得真不错!”||“也不是很了解,就是喜欢看一些书。很多名言警句都很有道理。”“我这次来找你是想了解一下金枪鱼罐头加工厂的事情。”||“嘿~这么说你小子准备卖黄金?”“想。”||“有一些研究,来到这里之后我还特意收藏了很多古董。”“怎么了!你认识?”||“没有没有,我速度很快的……我刚刚差……。咳咳……那什么,秘书你先出去我有事和南野老弟聊。。南野老弟啊恭喜你凯旋归来,来来来你喜欢喝咖啡还是饮料我帮你拿。”“算了,你的手脏我自己来!”||“这次的订单大会其实就是招标差不多。这次很多国外的海鲜市场需要大量的海鲜。由于TA们那边的地方满足不了。或者是有些大型渔业公司或者地区对海鲜需求量增大。但是TA们那边又产出不了那么多就选择进口。而TA们则是和岛国z府商量好,达成共识打算在北海道大量的收购海鲜!”“XX,你说了那么多一句话说完不就行了?就是有人大量收购海鲜!因为我们北海道渔场产出海鲜多,TA们来这里收购。”||“滚~走,去其TA的大学看看~。那那边的东京女子大学吧。”“行~”||“根据以往的经验和对比还有现在本地的物价。建造一个中型加工厂大约400万日元左右。到时候你要请员工,租地,资金运转,还有采购金枪鱼等等都要一笔钱。这些钱不好细算。反正你要生产出第一批罐头,我们公司收购了你才会有第一笔收入。。对了,像你拥有自己的捕鱼队,到时候捕捞到的金枪鱼可以送去自己的加工厂加工。加工出来的罐头再卖给我们。比这样生卖要赚的多!批发价卖给别人那是最初级的赚取方式。”“这个我知道!”||“好,没问题,到时候我亲自带你去瞬间带你认识我的那个哥们儿。”“我这一次出售的黄金比较多,行不信的过?”||“先是一起到达太平洋中部,到时候船队都会分散在方圆几百海里内捕捞。如果要前往下一个地方,下一个海域的时候总负责人会电报通知大家前往下一个地方。到了下一个地方又会分散在方圆几百海里捕捞。如此反复。”“恩。”||“我不信,用华夏一句话,你肯定是无事不登三宝殿。”“XX你对华夏文化很精通?”||“嗯嗯?嗨~南野老弟怎么是你。”“我有事情找你谈。那个黄金黑市交易的幕后老板是你的哥们儿是不是?”||“老弟,这一次你赚到这么多钱准备干嘛啊?”“这么大一笔钱,我打算先开一个鲑鱼罐头加工厂,打造属于自己的品牌。我还打算开一个渔业公司。”||“啊啊啊~XX~我去找美女了~”“啧啧啧……XX你老了啊。”||“谁啊,我之前不是说这半个小时我没有空吗!到底是那个法克不想干了?”“是我!”||“放心,我打一个电话问问我朋友在不在这边。如果在立马过去!。我兄弟就在这里,刚刚出差回来。走,我们现在就过去!对了,你那边都准备好了没有?”“都准备差不多了,我立马叫卡车和人马过去搬运。”||“我们明天一大早就赶过去应该能赶上,希望赶上吧。”“但愿吧,没有想到这也太赶了。”||“很好,你这个想法很不错,既然有钱了就要大手大脚的干一番,不能一辈子帮别人赚钱!”“要是以后每次出海都能遇到沉船宝藏那就好了。”||“南野老弟,你太让我嫉妒了!你这是浪费资源,浪费免费的资源!”“没办法,谁叫我长得帅,有气质,站在那里都是焦点的存在。”||“机器设备生产线不是很贵,大约500万日元一条生产线。小型加工厂一般3条生产线。中型加工厂一般5条生产线。大型生产线10条生产线!”“那就是说,我从你这里购买了生产技术和生产线之后,工厂的建设还有产品的生产都要靠我自己?。原来如此。。那我建造一个中型加工厂大约需要多少资金?” “嗯,去唐人街吧。”“唐人街?那边都是华夏人的食物,好吃吗?”||“你那什么眼神?我像是那种坏男人吗?”“难道不是吗?”||“嘿嘿,不知道?作为你朋友的我给你出一个主意!”“什么主意?”||“怎么了亲爱的?”“那个《千与千寻》的故事是不是你讲给玉子听的?。快点,给我讲讲这个故事,从头到尾。”||“笑什么笑,有什么好笑,喝酒!”“来,干杯!其实啊我发现你这个性格还是不错的,嗯,长相嘛,虽然身材矮小了点但也还过得去。”||“出去一趟~赚点零花钱!”“你确定不是去泡妞?”||“这么说你们XX家族也有那些不出世的强者?”“是的!”||“很简单的……。这下子我好像抱上了大腿了呀!”“喂喂喂,你不要用那么火辣辣的目光盯着我看,辣眼睛!”||“你舍得我吗?”“为什么舍不得,要钱没有钱,虽然有点小帅气可是那方面还是有些差劲儿。。是啊难道不是吗?”||“当然不舍得了,以后我一定会光明正大的带你回去娶你回来。”“你可说好了,不要到时候孩子都上学了你还不敢带我回我家。”||“我……我觉得我不努力点,我娶不到你,所以……”“娶我?我说了要嫁给你了吗!你只不过是我的男朋友,我不一定要嫁给你。再说了,今天是男朋友明天也许就不是了。”||“额,平时怎么不见你们吃夜宵,不是怕肥吗?”“我们都怀孕了,每顿饭都吃的很少,每天需要吃四五顿!这不,到了晚上十一点又饿了,需要吃点东西,不然睡不着。”||“呃~这个,我,我做不到,那边有我的事业。”“或者,我跟你走!”||“没有什么,只是解决了两个废物。叫保镖过来处理就行了,我们先回屋吧。”“看来是三井家族那些真正的族人出世了。”||“喂,虽然我们是朋友,但你不要这样打压我,羞辱我好不好。”“不服?不服那你每天拿十万美金给我,我陪你帮个月。”||“明天我就拿给你,你先去睡觉吧。”“你去哪里?”||“怎么,大小姐你没有来过这样的小酒馆?”“没有来过。”||“咦,你们两个大晚上怎么还没有睡觉啊?”“在煮夜宵呢!你不知道我们晚上会肚子饿啊!”||“咳咳……就是你在结婚之前怀上别人的孩子,到时候挺着大肚子和TA结婚不就让你的未婚夫喜当爹了?怎么样,这个主意够不够厉害,够不够毒,能不能让你解气?”“XX你混蛋,你太坏了!”||“你怎么知道?”“我作为XX家族的人当然知道一些辛秘。” 0 在日本渔村的日子 凯罗尔::南野秀一||南野秀一::伊洛蒂
2 | “虽然说出去窝囊了点,但也不算胡说八道,如果那白莲教的头目性格和你一样没出息,确实有这种可能。”“侯爷,别让属下乱说话坏了您的琢磨心思,您就当属下放了个屁……”||“再把你一刀砍了当祭品?”“侯爷,XX那家伙不怀好意,六卫帅帐里已说了很多次,说回了京就要邀朝中诸大臣联名上奏,参侯爷逼反三卫之罪,侯爷,XX这人不能让TA活着回京师呀!”||“XX,锦衣卫探来探去,就给我探了这些鸡毛蒜皮的东西?我叫你们打听三卫指挥使有没有暗中勾结白莲教,你们却盯着人家老婆的肚子恭贺人家喜当爹,主题呢?啊?。这位唐神医倒真奇人也,开几副方子再扎几针,居然就生儿子了……”“侯爷,您与尊夫人成亲也几年了,一直……咳,属下万死,待白莲教之事了了,您看是不是请这位唐神医去一趟京师,给尊夫人瞧瞧?属下是您的心腹亲信,XX发毒誓一个字都不说……”||“不然怎样?本侯难道一辈子躲着不见TA?”“不如请XX多等一天,今晚属下代XX宴请XX,把XX的小妾弄到TA床上,明日便请XX在XX灵位前上柱香,顺便呜呼两声XX英灵不远,魂兮归来……”||“说正事!话头儿都偏到哪去了?”“是,……天津左卫指挥使XX,河南汝宁人,家中妻子姿色原本非常平凡,后来XX的妻子求了唐神医,神医给TA开了个驻颜养肤的方子,后来王妻的皮肤水灵灵白嫩嫩的,那手感……”||“别的且先不说,三万大军重重包围,而三千反贼竟知道大军包围圈最薄弱的一点,重势出击突破包围,若说TA们是凭运气找到这个薄弱点的,你信吗?”“难道有人向白莲教通风报信?白莲教连三府六卫都渗透进去了?”||“左右是闲聊,胡说八道有何不可,我又不会治你的罪……说说吧,除了拍屁股跑路,这头目如果还想稍微干点有出息的事,你若是TA,你会怎么做?”“侯爷,属下会耍刀弄枪,会冲锋陷阵,可……您别叫属下动脑子呀,属下若有侯爷您一丁点儿的智谋,早就埋头读书考状元去了……”||“属蜡烛的不是?不点不亮!”“侯爷,属下错了……侯爷,既然官仓是白莲教之所图,咱们应该早做布置才是。”||“其实XX没说错,天津三卫确实是我逼反的,不过我也是不得已而为之。自古军队就是个非常敏感的群体,不能纵容也不能得罪,既要倚重又不得不时时打压,其中分寸很难拿捏,查缉三卫里的白莲教徒若抽丝剥茧徐徐图之,非一年半载不能竞功,我若因此事而在天津耽搁一年半载,且先不说与陛下近亲远疏的私心,就说京师XX弄权,朝堂天下被TA搞得乌烟瘴气,大明王朝眼看因宦官之祸而日渐糜烂,仅凭这一点,我怎能被白莲教绊住脚?。对,明知仓促,但我不得不给天津三卫下一剂猛药,眼下确实伤了身,且待日后办了XX,我再让天津恢复元气,甚至更甚往日……XX的做法没错,TA是监察御史,我把事情办急了,TA参我正是职责所在,我一点也不怪TA……”“侯爷,如此说来,XX是好官儿?咱们这次放过TA?。侯爷仁义,胸襟如海……。侯爷……你不是说‘XX”||“除了XX,看看我身边都是些什么粗鄙汉子啊,人才太少了……三卫暂时稳住了,若欲报复朝廷,白莲教必煽动百姓作乱,煽动百姓有个前提,那就是在城中制造恐慌,百姓不恐慌,天津城乱不起来。XX,给你一个提示:子曰,食色,性也……”“侯爷,我明白了!白莲教若欲煽动城中百姓,一定会将城里的青楼妓院一把火烧了,让满城男人无妓可嫖,如此,百姓岂不恐慌大乱?。好歹毒好卑鄙的白莲教!这是要我……要男人们的命啊!”||“无所谓明见,与白莲教无论斗勇还是斗智,说来都是我占了大便宜,因为我背后站着朝廷,站着皇帝,我可兴举国之物力人力独战于一隅,在这方面,白莲教便吃亏多了,胜之不武,不胜才叫耻辱。”“若XX听了侯爷您这句话,怕是羞愤得要撞墙才好。”||“金无足赤,人无完人,人这一辈子偶尔干几件缺德的事儿无伤大雅,杀几个好人只不过是白璧微瑕,总体来看还是可圈可点的嘛……若让XX这家伙活着回京师参我一本,不知会给我添多大的麻烦,XX这人啊,能弄死还是弄死吧。”“……是。。XX,如果属下没弄死TA呢?”||“未雨绸缪罢了,我对白莲教不了解,但我了解民心。若欲民心安定,粮食是绝不能少的,民心安定下来,白莲教如何能煽动?治国如烹小鲜,查反贼亦如烹小鲜,总要一步一步慢慢的布置,能想得到的每一颗棋子,不论有用没用,先将TA布置下来再说,只等将来火候一到,这颗看似无用的棋子兴许却发挥了大作用呢。”“侯爷明见万里。”||“有空多读书,看看圣人是怎样为人处世的!”“圣人也干过这事儿?”||“XX这人……倒也并非一无是处,只不过新政名目虽好,却太不现实,清理田亩,触动了XX的地主乡绅利益,清理军屯官仓,又触动了官员和军队的利益,简单的说,TA这是作死啊。”“不仅如此,XX还向陛下奏议,请将各地镇守太监擢升至巡抚同级,后来被包括内阁三位大学士和满朝文武以死相胁反对,这条奏议才暂时作罢,不过后来XX恼羞成怒,寻了由头杖毙了几个带头反对的大臣,满朝文武敢怒不敢言。”||“新政?XX雄心壮志,可敬可佩呀,新政什么内容?”“XX所谓的新政包括方方面面,比如清理天下XX,清理军屯官仓,裁撤京官和地方官府以及各地卫所冗官冗兵等等……”||“XX,你名字里虽然带了一个‘二”“官仓!侯爷,白莲教如欲作乱,必烧官仓,官仓没了粮食,城中百姓必乱!”||“不必多问,办好这件事,本侯保你有功无过。”“侯爷存粮两千石到底为了什么?”||“这个死太监,为了向陛下邀宠献媚,为了给我拖后腿,TA倒是不遗余力,鞠躬尽瘁。”“如今三卫指挥使也为难了,朝中内外皆知侯爷和XX不对付,侯爷查天津白莲教还没查完,三卫却在这个节骨眼上被XX调离天津,您和XX都是京里的大人物,三位指挥使谁也不敢得罪,手里握着兵马走也不是,留也不是,商议了一上午也没议出个结果来,三人急在心里,还不敢对侯爷说……”||“XX若真把XX的小妾睡了,按理说现在不应该活在人世,咱们此时也应该正在给XX的牌位上香才对,后来发生什么意外了?”“侯爷,后来其实并无意外,属下等人掐好了时间,打算半个时辰后找个由头让XX回后院捉歼的,结果……咳,结果XX回去后,TA的小妾好端端脱光了躺在床上,XX却不见人影儿了……。留在后院暗中放风的弟兄说,XX和XX的小妾确实办了事,不过却办得飞快,XX那家伙不中用,居然三两下便打了个哆嗦,交货了,拎上裤子说了句‘好舒服迅雷不及掩耳。侯爷,属下失算了!” “罢了,人算不如天算……XX?不但没把TA弄死,反而给TA拉了个皮条,让TA爽了一把,爽完还不给钱,咱们图个什么?……XX啊,你说咱们贱不贱呐?”“侯爷,按属下的想法,直截了当一刀把TA剁了拉倒,侯爷的法子是不是太……呃,太委婉了?”||“这个死太监,为了向陛下邀宠献媚,为了给我拖后腿,TA倒是不遗余力,鞠躬尽瘁。”“如今三卫指挥使也为难了,朝中内外皆知侯爷和XX不对付,侯爷查天津白莲教还没查完,三卫却在这个节骨眼上被XX调离天津,您和XX都是京里的大人物,三位指挥使谁也不敢得罪,手里握着兵马走也不是,留也不是,商议了一上午也没议出个结果来,三人急在心里,还不敢对侯爷说……”||“有空多读书,看看圣人是怎样为人处世的!”“圣人也干过这事儿?”||“不一定是白莲教渗透,我只是有一种莫名的感觉,觉得天津之变这件事里,有第三股势力在背后兴风作浪,TA们的影子在其中若隐若现……。这只是我的直觉,哪能知道得这么清楚?按我的感觉,这股势力对咱们是敌非友,来者不善,也许是针对白莲教造反这件事,也许是为了针对我个人,如果是针对我个人的话,排查起来就困难了,这两年我得罪过王爷,得罪过文官,得罪过太监,杀过鞑子,杀过倭寇,坑大臣,坑XX,血洗过东厂,西厂番子也在我手里栽过跟头……”“侯爷的人缘真是,真是……呵呵,卓尔不群啊。”||“其实XX没说错,天津三卫确实是我逼反的,不过我也是不得已而为之。自古军队就是个非常敏感的群体,不能纵容也不能得罪,既要倚重又不得不时时打压,其中分寸很难拿捏,查缉三卫里的白莲教徒若抽丝剥茧徐徐图之,非一年半载不能竞功,我若因此事而在天津耽搁一年半载,且先不说与陛下近亲远疏的私心,就说京师XX弄权,朝堂天下被TA搞得乌烟瘴气,大明王朝眼看因宦官之祸而日渐糜烂,仅凭这一点,我怎能被白莲教绊住脚?。对,明知仓促,但我不得不给天津三卫下一剂猛药,眼下确实伤了身,且待日后办了XX,我再让天津恢复元气,甚至更甚往日……XX的做法没错,TA是监察御史,我把事情办急了,TA参我正是职责所在,我一点也不怪TA……”“侯爷,如此说来,XX是好官儿?咱们这次放过TA?。侯爷仁义,胸襟如海……。侯爷……你不是说‘XX”||“属蜡烛的不是?不点不亮!”“侯爷,属下错了……侯爷,既然官仓是白莲教之所图,咱们应该早做布置才是。”||“罢了,这事是我思虑不周,我没想到XX会通过这件事来招惹我……不,恐怕TA打的算盘已不止是招惹我,而是想置我于死地!”“有这么严重吗?”||“说正事!话头儿都偏到哪去了?”“是,……天津左卫指挥使XX,河南汝宁人,家中妻子姿色原本非常平凡,后来XX的妻子求了唐神医,神医给TA开了个驻颜养肤的方子,后来王妻的皮肤水灵灵白嫩嫩的,那手感……”||“XX,你名字里虽然带了一个‘二”“官仓!侯爷,白莲教如欲作乱,必烧官仓,官仓没了粮食,城中百姓必乱!”||“不必多问,办好这件事,本侯保你有功无过。”“侯爷存粮两千石到底为了什么?”||“是啊,成亲都几年了,XX怀上之后嫣儿的脸一天比一天臭,也不知TA是不是报复社会,家里每一只能下蛋的母鸡都让TA掐死了,该让TA下个蛋了……不过这姓唐的神医姑娘收费有点贵,瞧一次病就得三千两,若把TA请到京师,恐怕三万两都不止,这笔买卖……”“不亏!侯爷,一点都不亏,正房正室嫡子,将来继承您的爵位啊,十万两都值得的。”||“再把你一刀砍了当祭品?”“侯爷,XX那家伙不怀好意,六卫帅帐里已说了很多次,说回了京就要邀朝中诸大臣联名上奏,参侯爷逼反三卫之罪,侯爷,XX这人不能让TA活着回京师呀!”||“XX,我离京这些日子,京里有何动静?”“侯爷,京里动静挺大,最大的动静是……XXXX正在大刀阔斧推行新政。”||“陛下已下旨让我重审科考弊案,如今三个当事人死了两个,已是死无对证,重审权刚到我手里,当初参劾XXXX舞弊的言官华昶便被灭了满门,你觉得满朝文武会怎么想?。XX这分明是灭口加嫁祸之计双料齐上啊,这一次TA倒将我以前坑人的本事学了个十足,这死太监,我小看TA了。。现在已不是咱们如何应对,而是要看XX如何应对了,若我所料不差,过不了几曰,天下皆知我OO为了好友功名而灭华昶满门,然后,金殿里可就热闹了……”“侯爷,这是污蔑!”||“XX,锦衣卫探来探去,就给我探了这些鸡毛蒜皮的东西?我叫你们打听三卫指挥使有没有暗中勾结白莲教,你们却盯着人家老婆的肚子恭贺人家喜当爹,主题呢?啊?。这位唐神医倒真奇人也,开几副方子再扎几针,居然就生儿子了……”“侯爷,您与尊夫人成亲也几年了,一直……咳,属下万死,待白莲教之事了了,您看是不是请这位唐神医去一趟京师,给尊夫人瞧瞧?属下是您的心腹亲信,XX发毒誓一个字都不说……”||“金无足赤,人无完人,人这一辈子偶尔干几件缺德的事儿无伤大雅,杀几个好人只不过是白璧微瑕,总体来看还是可圈可点的嘛……若让XX这家伙活着回京师参我一本,不知会给我添多大的麻烦,XX这人啊,能弄死还是弄死吧。”“……是。。XX,如果属下没弄死TA呢?”||“虽然说出去窝囊了点,但也不算胡说八道,如果那白莲教的头目性格和你一样没出息,确实有这种可能。”“侯爷,别让属下乱说话坏了您的琢磨心思,您就当属下放了个屁……”||“其实是TA们想多了,就算TA们依了XX的调令XX离开天津,我也不会拿TA们怎样的,这事儿怨不着TA们……”“侯爷……您不会这么大方吧?三卫离了天津可误了您的大事呀,您几日前不是还说过要将三卫指挥使的儿子扔井里去吗?”||“无所谓明见,与白莲教无论斗勇还是斗智,说来都是我占了大便宜,因为我背后站着朝廷,站着皇帝,我可兴举国之物力人力独战于一隅,在这方面,白莲教便吃亏多了,胜之不武,不胜才叫耻辱。”“若XX听了侯爷您这句话,怕是羞愤得要撞墙才好。”||“XX这人……倒也并非一无是处,只不过新政名目虽好,却太不现实,清理田亩,触动了XX的地主乡绅利益,清理军屯官仓,又触动了官员和军队的利益,简单的说,TA这是作死啊。”“不仅如此,XX还向陛下奏议,请将各地镇守太监擢升至巡抚同级,后来被包括内阁三位大学士和满朝文武以死相胁反对,这条奏议才暂时作罢,不过后来XX恼羞成怒,寻了由头杖毙了几个带头反对的大臣,满朝文武敢怒不敢言。” 1 明朝伪君子 秦堪::李二||李二::秦堪
--------------------------------------------------------------------------------
/datas/relation_data/small_train.txt:
--------------------------------------------------------------------------------
1 | “哟,原来是南野兄弟回来了真是有失远迎啊!”“嘿嘿……好久不见啊。你的小日子倒是过得滋润啊。对了,你家的女秘书长得真不错!”||“也不是很了解,就是喜欢看一些书。很多名言警句都很有道理。”“我这次来找你是想了解一下金枪鱼罐头加工厂的事情。”||“嘿~这么说你小子准备卖黄金?”“想。”||“有一些研究,来到这里之后我还特意收藏了很多古董。”“怎么了!你认识?”||“没有没有,我速度很快的……我刚刚差……。咳咳……那什么,秘书你先出去我有事和南野老弟聊。。南野老弟啊恭喜你凯旋归来,来来来你喜欢喝咖啡还是饮料我帮你拿。”“算了,你的手脏我自己来!”||“这次的订单大会其实就是招标差不多。这次很多国外的海鲜市场需要大量的海鲜。由于TA们那边的地方满足不了。或者是有些大型渔业公司或者地区对海鲜需求量增大。但是TA们那边又产出不了那么多就选择进口。而TA们则是和岛国z府商量好,达成共识打算在北海道大量的收购海鲜!”“XX,你说了那么多一句话说完不就行了?就是有人大量收购海鲜!因为我们北海道渔场产出海鲜多,TA们来这里收购。”||“滚~走,去其TA的大学看看~。那那边的东京女子大学吧。”“行~”||“根据以往的经验和对比还有现在本地的物价。建造一个中型加工厂大约400万日元左右。到时候你要请员工,租地,资金运转,还有采购金枪鱼等等都要一笔钱。这些钱不好细算。反正你要生产出第一批罐头,我们公司收购了你才会有第一笔收入。。对了,像你拥有自己的捕鱼队,到时候捕捞到的金枪鱼可以送去自己的加工厂加工。加工出来的罐头再卖给我们。比这样生卖要赚的多!批发价卖给别人那是最初级的赚取方式。”“这个我知道!”||“好,没问题,到时候我亲自带你去瞬间带你认识我的那个哥们儿。”“我这一次出售的黄金比较多,行不信的过?”||“先是一起到达太平洋中部,到时候船队都会分散在方圆几百海里内捕捞。如果要前往下一个地方,下一个海域的时候总负责人会电报通知大家前往下一个地方。到了下一个地方又会分散在方圆几百海里捕捞。如此反复。”“恩。”||“我不信,用华夏一句话,你肯定是无事不登三宝殿。”“XX你对华夏文化很精通?”||“嗯嗯?嗨~南野老弟怎么是你。”“我有事情找你谈。那个黄金黑市交易的幕后老板是你的哥们儿是不是?”||“老弟,这一次你赚到这么多钱准备干嘛啊?”“这么大一笔钱,我打算先开一个鲑鱼罐头加工厂,打造属于自己的品牌。我还打算开一个渔业公司。”||“啊啊啊~XX~我去找美女了~”“啧啧啧……XX你老了啊。”||“谁啊,我之前不是说这半个小时我没有空吗!到底是那个法克不想干了?”“是我!”||“放心,我打一个电话问问我朋友在不在这边。如果在立马过去!。我兄弟就在这里,刚刚出差回来。走,我们现在就过去!对了,你那边都准备好了没有?”“都准备差不多了,我立马叫卡车和人马过去搬运。”||“我们明天一大早就赶过去应该能赶上,希望赶上吧。”“但愿吧,没有想到这也太赶了。”||“很好,你这个想法很不错,既然有钱了就要大手大脚的干一番,不能一辈子帮别人赚钱!”“要是以后每次出海都能遇到沉船宝藏那就好了。”||“南野老弟,你太让我嫉妒了!你这是浪费资源,浪费免费的资源!”“没办法,谁叫我长得帅,有气质,站在那里都是焦点的存在。”||“机器设备生产线不是很贵,大约500万日元一条生产线。小型加工厂一般3条生产线。中型加工厂一般5条生产线。大型生产线10条生产线!”“那就是说,我从你这里购买了生产技术和生产线之后,工厂的建设还有产品的生产都要靠我自己?。原来如此。。那我建造一个中型加工厂大约需要多少资金?” “嗯,去唐人街吧。”“唐人街?那边都是华夏人的食物,好吃吗?”||“你那什么眼神?我像是那种坏男人吗?”“难道不是吗?”||“嘿嘿,不知道?作为你朋友的我给你出一个主意!”“什么主意?”||“怎么了亲爱的?”“那个《千与千寻》的故事是不是你讲给玉子听的?。快点,给我讲讲这个故事,从头到尾。”||“笑什么笑,有什么好笑,喝酒!”“来,干杯!其实啊我发现你这个性格还是不错的,嗯,长相嘛,虽然身材矮小了点但也还过得去。”||“出去一趟~赚点零花钱!”“你确定不是去泡妞?”||“这么说你们XX家族也有那些不出世的强者?”“是的!”||“很简单的……。这下子我好像抱上了大腿了呀!”“喂喂喂,你不要用那么火辣辣的目光盯着我看,辣眼睛!”||“你舍得我吗?”“为什么舍不得,要钱没有钱,虽然有点小帅气可是那方面还是有些差劲儿。。是啊难道不是吗?”||“当然不舍得了,以后我一定会光明正大的带你回去娶你回来。”“你可说好了,不要到时候孩子都上学了你还不敢带我回我家。”||“我……我觉得我不努力点,我娶不到你,所以……”“娶我?我说了要嫁给你了吗!你只不过是我的男朋友,我不一定要嫁给你。再说了,今天是男朋友明天也许就不是了。”||“额,平时怎么不见你们吃夜宵,不是怕肥吗?”“我们都怀孕了,每顿饭都吃的很少,每天需要吃四五顿!这不,到了晚上十一点又饿了,需要吃点东西,不然睡不着。”||“呃~这个,我,我做不到,那边有我的事业。”“或者,我跟你走!”||“没有什么,只是解决了两个废物。叫保镖过来处理就行了,我们先回屋吧。”“看来是三井家族那些真正的族人出世了。”||“喂,虽然我们是朋友,但你不要这样打压我,羞辱我好不好。”“不服?不服那你每天拿十万美金给我,我陪你帮个月。”||“明天我就拿给你,你先去睡觉吧。”“你去哪里?”||“怎么,大小姐你没有来过这样的小酒馆?”“没有来过。”||“咦,你们两个大晚上怎么还没有睡觉啊?”“在煮夜宵呢!你不知道我们晚上会肚子饿啊!”||“咳咳……就是你在结婚之前怀上别人的孩子,到时候挺着大肚子和TA结婚不就让你的未婚夫喜当爹了?怎么样,这个主意够不够厉害,够不够毒,能不能让你解气?”“XX你混蛋,你太坏了!”||“你怎么知道?”“我作为XX家族的人当然知道一些辛秘。” 0 在日本渔村的日子 凯罗尔::南野秀一||南野秀一::伊洛蒂
2 | “虽然说出去窝囊了点,但也不算胡说八道,如果那白莲教的头目性格和你一样没出息,确实有这种可能。”“侯爷,别让属下乱说话坏了您的琢磨心思,您就当属下放了个屁……”||“再把你一刀砍了当祭品?”“侯爷,XX那家伙不怀好意,六卫帅帐里已说了很多次,说回了京就要邀朝中诸大臣联名上奏,参侯爷逼反三卫之罪,侯爷,XX这人不能让TA活着回京师呀!”||“XX,锦衣卫探来探去,就给我探了这些鸡毛蒜皮的东西?我叫你们打听三卫指挥使有没有暗中勾结白莲教,你们却盯着人家老婆的肚子恭贺人家喜当爹,主题呢?啊?。这位唐神医倒真奇人也,开几副方子再扎几针,居然就生儿子了……”“侯爷,您与尊夫人成亲也几年了,一直……咳,属下万死,待白莲教之事了了,您看是不是请这位唐神医去一趟京师,给尊夫人瞧瞧?属下是您的心腹亲信,XX发毒誓一个字都不说……”||“不然怎样?本侯难道一辈子躲着不见TA?”“不如请XX多等一天,今晚属下代XX宴请XX,把XX的小妾弄到TA床上,明日便请XX在XX灵位前上柱香,顺便呜呼两声XX英灵不远,魂兮归来……”||“说正事!话头儿都偏到哪去了?”“是,……天津左卫指挥使XX,河南汝宁人,家中妻子姿色原本非常平凡,后来XX的妻子求了唐神医,神医给TA开了个驻颜养肤的方子,后来王妻的皮肤水灵灵白嫩嫩的,那手感……”||“别的且先不说,三万大军重重包围,而三千反贼竟知道大军包围圈最薄弱的一点,重势出击突破包围,若说TA们是凭运气找到这个薄弱点的,你信吗?”“难道有人向白莲教通风报信?白莲教连三府六卫都渗透进去了?”||“左右是闲聊,胡说八道有何不可,我又不会治你的罪……说说吧,除了拍屁股跑路,这头目如果还想稍微干点有出息的事,你若是TA,你会怎么做?”“侯爷,属下会耍刀弄枪,会冲锋陷阵,可……您别叫属下动脑子呀,属下若有侯爷您一丁点儿的智谋,早就埋头读书考状元去了……”||“属蜡烛的不是?不点不亮!”“侯爷,属下错了……侯爷,既然官仓是白莲教之所图,咱们应该早做布置才是。”||“其实XX没说错,天津三卫确实是我逼反的,不过我也是不得已而为之。自古军队就是个非常敏感的群体,不能纵容也不能得罪,既要倚重又不得不时时打压,其中分寸很难拿捏,查缉三卫里的白莲教徒若抽丝剥茧徐徐图之,非一年半载不能竞功,我若因此事而在天津耽搁一年半载,且先不说与陛下近亲远疏的私心,就说京师XX弄权,朝堂天下被TA搞得乌烟瘴气,大明王朝眼看因宦官之祸而日渐糜烂,仅凭这一点,我怎能被白莲教绊住脚?。对,明知仓促,但我不得不给天津三卫下一剂猛药,眼下确实伤了身,且待日后办了XX,我再让天津恢复元气,甚至更甚往日……XX的做法没错,TA是监察御史,我把事情办急了,TA参我正是职责所在,我一点也不怪TA……”“侯爷,如此说来,XX是好官儿?咱们这次放过TA?。侯爷仁义,胸襟如海……。侯爷……你不是说‘XX”||“除了XX,看看我身边都是些什么粗鄙汉子啊,人才太少了……三卫暂时稳住了,若欲报复朝廷,白莲教必煽动百姓作乱,煽动百姓有个前提,那就是在城中制造恐慌,百姓不恐慌,天津城乱不起来。XX,给你一个提示:子曰,食色,性也……”“侯爷,我明白了!白莲教若欲煽动城中百姓,一定会将城里的青楼妓院一把火烧了,让满城男人无妓可嫖,如此,百姓岂不恐慌大乱?。好歹毒好卑鄙的白莲教!这是要我……要男人们的命啊!”||“无所谓明见,与白莲教无论斗勇还是斗智,说来都是我占了大便宜,因为我背后站着朝廷,站着皇帝,我可兴举国之物力人力独战于一隅,在这方面,白莲教便吃亏多了,胜之不武,不胜才叫耻辱。”“若XX听了侯爷您这句话,怕是羞愤得要撞墙才好。”||“金无足赤,人无完人,人这一辈子偶尔干几件缺德的事儿无伤大雅,杀几个好人只不过是白璧微瑕,总体来看还是可圈可点的嘛……若让XX这家伙活着回京师参我一本,不知会给我添多大的麻烦,XX这人啊,能弄死还是弄死吧。”“……是。。XX,如果属下没弄死TA呢?”||“未雨绸缪罢了,我对白莲教不了解,但我了解民心。若欲民心安定,粮食是绝不能少的,民心安定下来,白莲教如何能煽动?治国如烹小鲜,查反贼亦如烹小鲜,总要一步一步慢慢的布置,能想得到的每一颗棋子,不论有用没用,先将TA布置下来再说,只等将来火候一到,这颗看似无用的棋子兴许却发挥了大作用呢。”“侯爷明见万里。”||“有空多读书,看看圣人是怎样为人处世的!”“圣人也干过这事儿?”||“XX这人……倒也并非一无是处,只不过新政名目虽好,却太不现实,清理田亩,触动了XX的地主乡绅利益,清理军屯官仓,又触动了官员和军队的利益,简单的说,TA这是作死啊。”“不仅如此,XX还向陛下奏议,请将各地镇守太监擢升至巡抚同级,后来被包括内阁三位大学士和满朝文武以死相胁反对,这条奏议才暂时作罢,不过后来XX恼羞成怒,寻了由头杖毙了几个带头反对的大臣,满朝文武敢怒不敢言。”||“新政?XX雄心壮志,可敬可佩呀,新政什么内容?”“XX所谓的新政包括方方面面,比如清理天下XX,清理军屯官仓,裁撤京官和地方官府以及各地卫所冗官冗兵等等……”||“XX,你名字里虽然带了一个‘二”“官仓!侯爷,白莲教如欲作乱,必烧官仓,官仓没了粮食,城中百姓必乱!”||“不必多问,办好这件事,本侯保你有功无过。”“侯爷存粮两千石到底为了什么?”||“这个死太监,为了向陛下邀宠献媚,为了给我拖后腿,TA倒是不遗余力,鞠躬尽瘁。”“如今三卫指挥使也为难了,朝中内外皆知侯爷和XX不对付,侯爷查天津白莲教还没查完,三卫却在这个节骨眼上被XX调离天津,您和XX都是京里的大人物,三位指挥使谁也不敢得罪,手里握着兵马走也不是,留也不是,商议了一上午也没议出个结果来,三人急在心里,还不敢对侯爷说……”||“XX若真把XX的小妾睡了,按理说现在不应该活在人世,咱们此时也应该正在给XX的牌位上香才对,后来发生什么意外了?”“侯爷,后来其实并无意外,属下等人掐好了时间,打算半个时辰后找个由头让XX回后院捉歼的,结果……咳,结果XX回去后,TA的小妾好端端脱光了躺在床上,XX却不见人影儿了……。留在后院暗中放风的弟兄说,XX和XX的小妾确实办了事,不过却办得飞快,XX那家伙不中用,居然三两下便打了个哆嗦,交货了,拎上裤子说了句‘好舒服迅雷不及掩耳。侯爷,属下失算了!” “罢了,人算不如天算……XX?不但没把TA弄死,反而给TA拉了个皮条,让TA爽了一把,爽完还不给钱,咱们图个什么?……XX啊,你说咱们贱不贱呐?”“侯爷,按属下的想法,直截了当一刀把TA剁了拉倒,侯爷的法子是不是太……呃,太委婉了?”||“这个死太监,为了向陛下邀宠献媚,为了给我拖后腿,TA倒是不遗余力,鞠躬尽瘁。”“如今三卫指挥使也为难了,朝中内外皆知侯爷和XX不对付,侯爷查天津白莲教还没查完,三卫却在这个节骨眼上被XX调离天津,您和XX都是京里的大人物,三位指挥使谁也不敢得罪,手里握着兵马走也不是,留也不是,商议了一上午也没议出个结果来,三人急在心里,还不敢对侯爷说……”||“有空多读书,看看圣人是怎样为人处世的!”“圣人也干过这事儿?”||“不一定是白莲教渗透,我只是有一种莫名的感觉,觉得天津之变这件事里,有第三股势力在背后兴风作浪,TA们的影子在其中若隐若现……。这只是我的直觉,哪能知道得这么清楚?按我的感觉,这股势力对咱们是敌非友,来者不善,也许是针对白莲教造反这件事,也许是为了针对我个人,如果是针对我个人的话,排查起来就困难了,这两年我得罪过王爷,得罪过文官,得罪过太监,杀过鞑子,杀过倭寇,坑大臣,坑XX,血洗过东厂,西厂番子也在我手里栽过跟头……”“侯爷的人缘真是,真是……呵呵,卓尔不群啊。”||“其实XX没说错,天津三卫确实是我逼反的,不过我也是不得已而为之。自古军队就是个非常敏感的群体,不能纵容也不能得罪,既要倚重又不得不时时打压,其中分寸很难拿捏,查缉三卫里的白莲教徒若抽丝剥茧徐徐图之,非一年半载不能竞功,我若因此事而在天津耽搁一年半载,且先不说与陛下近亲远疏的私心,就说京师XX弄权,朝堂天下被TA搞得乌烟瘴气,大明王朝眼看因宦官之祸而日渐糜烂,仅凭这一点,我怎能被白莲教绊住脚?。对,明知仓促,但我不得不给天津三卫下一剂猛药,眼下确实伤了身,且待日后办了XX,我再让天津恢复元气,甚至更甚往日……XX的做法没错,TA是监察御史,我把事情办急了,TA参我正是职责所在,我一点也不怪TA……”“侯爷,如此说来,XX是好官儿?咱们这次放过TA?。侯爷仁义,胸襟如海……。侯爷……你不是说‘XX”||“属蜡烛的不是?不点不亮!”“侯爷,属下错了……侯爷,既然官仓是白莲教之所图,咱们应该早做布置才是。”||“罢了,这事是我思虑不周,我没想到XX会通过这件事来招惹我……不,恐怕TA打的算盘已不止是招惹我,而是想置我于死地!”“有这么严重吗?”||“说正事!话头儿都偏到哪去了?”“是,……天津左卫指挥使XX,河南汝宁人,家中妻子姿色原本非常平凡,后来XX的妻子求了唐神医,神医给TA开了个驻颜养肤的方子,后来王妻的皮肤水灵灵白嫩嫩的,那手感……”||“XX,你名字里虽然带了一个‘二”“官仓!侯爷,白莲教如欲作乱,必烧官仓,官仓没了粮食,城中百姓必乱!”||“不必多问,办好这件事,本侯保你有功无过。”“侯爷存粮两千石到底为了什么?”||“是啊,成亲都几年了,XX怀上之后嫣儿的脸一天比一天臭,也不知TA是不是报复社会,家里每一只能下蛋的母鸡都让TA掐死了,该让TA下个蛋了……不过这姓唐的神医姑娘收费有点贵,瞧一次病就得三千两,若把TA请到京师,恐怕三万两都不止,这笔买卖……”“不亏!侯爷,一点都不亏,正房正室嫡子,将来继承您的爵位啊,十万两都值得的。”||“再把你一刀砍了当祭品?”“侯爷,XX那家伙不怀好意,六卫帅帐里已说了很多次,说回了京就要邀朝中诸大臣联名上奏,参侯爷逼反三卫之罪,侯爷,XX这人不能让TA活着回京师呀!”||“XX,我离京这些日子,京里有何动静?”“侯爷,京里动静挺大,最大的动静是……XXXX正在大刀阔斧推行新政。”||“陛下已下旨让我重审科考弊案,如今三个当事人死了两个,已是死无对证,重审权刚到我手里,当初参劾XXXX舞弊的言官华昶便被灭了满门,你觉得满朝文武会怎么想?。XX这分明是灭口加嫁祸之计双料齐上啊,这一次TA倒将我以前坑人的本事学了个十足,这死太监,我小看TA了。。现在已不是咱们如何应对,而是要看XX如何应对了,若我所料不差,过不了几曰,天下皆知我OO为了好友功名而灭华昶满门,然后,金殿里可就热闹了……”“侯爷,这是污蔑!”||“XX,锦衣卫探来探去,就给我探了这些鸡毛蒜皮的东西?我叫你们打听三卫指挥使有没有暗中勾结白莲教,你们却盯着人家老婆的肚子恭贺人家喜当爹,主题呢?啊?。这位唐神医倒真奇人也,开几副方子再扎几针,居然就生儿子了……”“侯爷,您与尊夫人成亲也几年了,一直……咳,属下万死,待白莲教之事了了,您看是不是请这位唐神医去一趟京师,给尊夫人瞧瞧?属下是您的心腹亲信,XX发毒誓一个字都不说……”||“金无足赤,人无完人,人这一辈子偶尔干几件缺德的事儿无伤大雅,杀几个好人只不过是白璧微瑕,总体来看还是可圈可点的嘛……若让XX这家伙活着回京师参我一本,不知会给我添多大的麻烦,XX这人啊,能弄死还是弄死吧。”“……是。。XX,如果属下没弄死TA呢?”||“虽然说出去窝囊了点,但也不算胡说八道,如果那白莲教的头目性格和你一样没出息,确实有这种可能。”“侯爷,别让属下乱说话坏了您的琢磨心思,您就当属下放了个屁……”||“其实是TA们想多了,就算TA们依了XX的调令XX离开天津,我也不会拿TA们怎样的,这事儿怨不着TA们……”“侯爷……您不会这么大方吧?三卫离了天津可误了您的大事呀,您几日前不是还说过要将三卫指挥使的儿子扔井里去吗?”||“无所谓明见,与白莲教无论斗勇还是斗智,说来都是我占了大便宜,因为我背后站着朝廷,站着皇帝,我可兴举国之物力人力独战于一隅,在这方面,白莲教便吃亏多了,胜之不武,不胜才叫耻辱。”“若XX听了侯爷您这句话,怕是羞愤得要撞墙才好。”||“XX这人……倒也并非一无是处,只不过新政名目虽好,却太不现实,清理田亩,触动了XX的地主乡绅利益,清理军屯官仓,又触动了官员和军队的利益,简单的说,TA这是作死啊。”“不仅如此,XX还向陛下奏议,请将各地镇守太监擢升至巡抚同级,后来被包括内阁三位大学士和满朝文武以死相胁反对,这条奏议才暂时作罢,不过后来XX恼羞成怒,寻了由头杖毙了几个带头反对的大臣,满朝文武敢怒不敢言。” 1 明朝伪君子 秦堪::李二||李二::秦堪
--------------------------------------------------------------------------------
/datas/scene_cut_data/small.dev.txt:
--------------------------------------------------------------------------------
1 | 迟小早:没有哪个相爱的人愿意一直晾着对方看对方可怜巴巴的道歉或者不知所措每次的吵架只是为了让对方知道哪里错了需要改进仅此而已遇见你不容易所以不想轻易放弃你。||迟小早:她不管多生气都会给你发消息,不管那消息好听与否,请你记住一定要留住她。||旁白:Finish。||旁白:Forty-eighth session。||迟小早:人的出场顺序真的很重要. 0 沙雕迟早在线骗勋章.txt …
2 | 旁白:第二天。||旁白:为了刁刁的“三百万”。||旁白:苏浅请了假,带着刁刁赴约。||苏浅:你倒是会偷懒,连人形都懒得变换,还要我抱着你走。||刁刁:我不是懒,我是太爱你了~ 0 淘只锦鲤来许愿.txt .
3 | 韩当舞:但这就要看真正的凶手当时是怎么想的了。||韩当舞:所以我也不确定能不能实施这个方法。||旁白:陆宅中,房间中的灯光昏黄。||旁白:拐杖敲在地板上的声音远去。||旁白:又在客厅花瓶前停住。 1 他来自未来.txt ·
4 | 旁白:他尽力逗她开心。||旁白:也尽力隐藏她的病情。||旁白:门外,一个脚踩八厘米恨天高的女人走到门前。||旁白:她涂着吸晴的斩男色口红。||旁白:棕黄的发丝放下,凌乱的散落到肩上。 1 97路公车.txt …
5 | 员工小黄:所以,干活吧。||员工小黄:工作使你快乐。||旁白:一场八卦,最后莫名其妙地变成了社畜们互相灌鸡汤。||旁白:因而。||旁白:在安丘北顶着红肿的脸回到公司时。 0 每天都在向魔王告白.txt …
6 | 旁白:但那是韩当舞的请求。||旁白:让他唯一无法拒绝的人的请求。||旁白:正南胡同内。||旁白:李勋被狠狠一拳打在脸上。||周兴:怎么了?你在游戏厅里不是很威风么?怎么现在跟个病猫似的? 1 他来自未来.txt ·
7 | 旁白:莫离离开后,男孩呆呆看着门。||旁白:吱嘎——||旁白:没过多久,门被打开,进来一个男人。||?:这个是她带来的、制杖?||?:好脏啊,不过没事,不影响…… 0 高冷来袭:九爷心尖宠.txt …
8 | 旁白:脸上的笑容温柔至极,又带着一点略微的狡黠。||旁白:让人忍不住猜测,他隐瞒了什么。||旁白:源梧回到家之后,发现斋藤正跪在他房间门口。||斋藤:少爷,请听一下我的请求!||源梧:不听。 1 守夜人.txt …
9 | 云偌:哦哦哦,她是我们学校的校霸呢。||云偌:可厉害啦。||旁白:早上10.09。||落轩:什么?||云偌:她是我们学校的校霸啊。 1 高冷校草恋上我.txt .
10 | 旁白:安玖兮无视他的话。||旁白:安玖兮回到学校。||旁白:看到二叶两人在前面她跑上去。||安玖兮:喂,你们吃饭都不叫我的嘛。||叶曦:我们下课去你教室找你你都不在。 0 撩走会宠妻的易总.txt —
11 | 旁白:吴英蜡黄的脸上闪过一抹娇羞。||吴英:我……我在……和马主任约会。||旁白:审讯结束。||钱茂:谈易燃。||钱茂:这个女人回答的滴水不漏啊。 1 校草是个美少女.txt …
12 | 旁白:可是好特么疼!||旁白:小丑将绑着朱凌霜的绳子一端解下来,栓在了二人手上。||旁白:称小丑栓绳子的时候。||旁白:苏杭猛起抬脚,一脚踹在了小丑心口上。||小丑:你怎么解开了! 0 校草是个美少女.txt …
13 | 沈笑(你):因为你这句话,可能今天我连这个校门儿都走不出去。你信不信?||黄明昊:怎么会?这个学校的女生都是非常可爱的生物呢。||旁白:黄明昊强挤微笑。||沈笑(你):你说的都对,但我偏偏不信!||旁白:课间。 0 我的网恋对象是黄明昊.txt —
14 | 旁白:记得点赞,thank you。||蔡徐坤:突然之间发现盲打,键盘贼好看。😂😂。||旁白:(尹兮颜给他们上药。)。||尹兮颜《K.Y》:哦!对了,蔡徐坤,你帮我个忙,让我退出娱乐圈,不再演戏了。||蔡徐坤:你叫我什么? 1 蔡徐坤的娇妻小天后.txt …
15 | 秦之茂:注意安全。||旁白:秦之茂走了之后,俞陌修撩开了衣服袖子,用手在手腕处扫过。||旁白:紧接着,那里出现了一个手表形状的东西。||旁白:他嘴唇勾了勾。||俞陌修:特等管理员拥有三个表,还好张局通融,没有没收。 0 我有哆啦A梦的口袋.txt …
16 | 沫夏初樱:❤。||宇文炀:我不要。||旁白:慕容曦回家的路上,发现宇文炀还跟在她身后。||慕容曦:宇文炀,你怎么一直跟着我,不回家吗?||宇文炀:我家住你家附近。 1 校草竟然有超能力.txt …
17 | 旁白:安梓涵被安琪踹下床狼狈的爬起来。||安琪:哼。||旁白:安琪起身要离开被安梓涵拉住。||安琪:放开我。||旁白:安梓涵不松手也不说话。 0 撩走会宠妻的易总.txt —
18 | 旁白:若眼神交汇we know it。||旁白:请自由发挥don't hold it。||旁白:girls we could be you mack daddy。||主持人:好,请评委们介绍一下自己吧。||主持人:请评委们介绍一下自己。 1 我哥是明星.txt …
19 | 杨瑞然:(这俩人是在干嘛,我碗里都装不下了)。||杨瑞然:你俩比赛能不能不要加上我。||旁白:事实上这俩个人的战争并没有停止。||杨瑞然:(我靠,竟然不理我)。||旁白:杨瑞然站起来拍了下桌子。 0 网恋对象竟然是偶像.txt …
20 | 旁白:睡梦中。||旁白:她梦见了妈妈。||旁白:她在梦中和妈妈一起愉快的玩耍、睡觉。||旁白:一切是那么的美好。||旁白:西西的嘴边,还挂着甜甜的笑容。 0 复仇三公主.txt …
--------------------------------------------------------------------------------
/datas/scene_cut_data/small.train.txt:
--------------------------------------------------------------------------------
1 | 迟小早:没有哪个相爱的人愿意一直晾着对方看对方可怜巴巴的道歉或者不知所措每次的吵架只是为了让对方知道哪里错了需要改进仅此而已遇见你不容易所以不想轻易放弃你。||迟小早:她不管多生气都会给你发消息,不管那消息好听与否,请你记住一定要留住她。||旁白:Finish。||旁白:Forty-eighth session。||迟小早:人的出场顺序真的很重要. 0 沙雕迟早在线骗勋章.txt …
2 | 旁白:第二天。||旁白:为了刁刁的“三百万”。||旁白:苏浅请了假,带着刁刁赴约。||苏浅:你倒是会偷懒,连人形都懒得变换,还要我抱着你走。||刁刁:我不是懒,我是太爱你了~ 0 淘只锦鲤来许愿.txt .
3 | 韩当舞:但这就要看真正的凶手当时是怎么想的了。||韩当舞:所以我也不确定能不能实施这个方法。||旁白:陆宅中,房间中的灯光昏黄。||旁白:拐杖敲在地板上的声音远去。||旁白:又在客厅花瓶前停住。 1 他来自未来.txt ·
4 | 旁白:他尽力逗她开心。||旁白:也尽力隐藏她的病情。||旁白:门外,一个脚踩八厘米恨天高的女人走到门前。||旁白:她涂着吸晴的斩男色口红。||旁白:棕黄的发丝放下,凌乱的散落到肩上。 1 97路公车.txt …
5 | 员工小黄:所以,干活吧。||员工小黄:工作使你快乐。||旁白:一场八卦,最后莫名其妙地变成了社畜们互相灌鸡汤。||旁白:因而。||旁白:在安丘北顶着红肿的脸回到公司时。 0 每天都在向魔王告白.txt …
6 | 旁白:但那是韩当舞的请求。||旁白:让他唯一无法拒绝的人的请求。||旁白:正南胡同内。||旁白:李勋被狠狠一拳打在脸上。||周兴:怎么了?你在游戏厅里不是很威风么?怎么现在跟个病猫似的? 1 他来自未来.txt ·
7 | 旁白:莫离离开后,男孩呆呆看着门。||旁白:吱嘎——||旁白:没过多久,门被打开,进来一个男人。||?:这个是她带来的、制杖?||?:好脏啊,不过没事,不影响…… 0 高冷来袭:九爷心尖宠.txt …
8 | 旁白:脸上的笑容温柔至极,又带着一点略微的狡黠。||旁白:让人忍不住猜测,他隐瞒了什么。||旁白:源梧回到家之后,发现斋藤正跪在他房间门口。||斋藤:少爷,请听一下我的请求!||源梧:不听。 1 守夜人.txt …
9 | 云偌:哦哦哦,她是我们学校的校霸呢。||云偌:可厉害啦。||旁白:早上10.09。||落轩:什么?||云偌:她是我们学校的校霸啊。 1 高冷校草恋上我.txt .
10 | 旁白:安玖兮无视他的话。||旁白:安玖兮回到学校。||旁白:看到二叶两人在前面她跑上去。||安玖兮:喂,你们吃饭都不叫我的嘛。||叶曦:我们下课去你教室找你你都不在。 0 撩走会宠妻的易总.txt —
11 | 旁白:吴英蜡黄的脸上闪过一抹娇羞。||吴英:我……我在……和马主任约会。||旁白:审讯结束。||钱茂:谈易燃。||钱茂:这个女人回答的滴水不漏啊。 1 校草是个美少女.txt …
12 | 旁白:可是好特么疼!||旁白:小丑将绑着朱凌霜的绳子一端解下来,栓在了二人手上。||旁白:称小丑栓绳子的时候。||旁白:苏杭猛起抬脚,一脚踹在了小丑心口上。||小丑:你怎么解开了! 0 校草是个美少女.txt …
13 | 沈笑(你):因为你这句话,可能今天我连这个校门儿都走不出去。你信不信?||黄明昊:怎么会?这个学校的女生都是非常可爱的生物呢。||旁白:黄明昊强挤微笑。||沈笑(你):你说的都对,但我偏偏不信!||旁白:课间。 0 我的网恋对象是黄明昊.txt —
14 | 旁白:记得点赞,thank you。||蔡徐坤:突然之间发现盲打,键盘贼好看。😂😂。||旁白:(尹兮颜给他们上药。)。||尹兮颜《K.Y》:哦!对了,蔡徐坤,你帮我个忙,让我退出娱乐圈,不再演戏了。||蔡徐坤:你叫我什么? 1 蔡徐坤的娇妻小天后.txt …
15 | 秦之茂:注意安全。||旁白:秦之茂走了之后,俞陌修撩开了衣服袖子,用手在手腕处扫过。||旁白:紧接着,那里出现了一个手表形状的东西。||旁白:他嘴唇勾了勾。||俞陌修:特等管理员拥有三个表,还好张局通融,没有没收。 0 我有哆啦A梦的口袋.txt …
16 | 沫夏初樱:❤。||宇文炀:我不要。||旁白:慕容曦回家的路上,发现宇文炀还跟在她身后。||慕容曦:宇文炀,你怎么一直跟着我,不回家吗?||宇文炀:我家住你家附近。 1 校草竟然有超能力.txt …
17 | 旁白:安梓涵被安琪踹下床狼狈的爬起来。||安琪:哼。||旁白:安琪起身要离开被安梓涵拉住。||安琪:放开我。||旁白:安梓涵不松手也不说话。 0 撩走会宠妻的易总.txt —
18 | 旁白:若眼神交汇we know it。||旁白:请自由发挥don't hold it。||旁白:girls we could be you mack daddy。||主持人:好,请评委们介绍一下自己吧。||主持人:请评委们介绍一下自己。 1 我哥是明星.txt …
19 | 杨瑞然:(这俩人是在干嘛,我碗里都装不下了)。||杨瑞然:你俩比赛能不能不要加上我。||旁白:事实上这俩个人的战争并没有停止。||杨瑞然:(我靠,竟然不理我)。||旁白:杨瑞然站起来拍了下桌子。 0 网恋对象竟然是偶像.txt …
20 | 旁白:睡梦中。||旁白:她梦见了妈妈。||旁白:她在梦中和妈妈一起愉快的玩耍、睡觉。||旁白:一切是那么的美好。||旁白:西西的嘴边,还挂着甜甜的笑容。 0 复仇三公主.txt …
--------------------------------------------------------------------------------
/datas/two_classifier_data/small.dev.txt:
--------------------------------------------------------------------------------
1 | OO沉吟了一瞬,最后叹了口气,“也没什么,就是提醒你,你之所以可以为了一个人而奋不顾身,那是因为你什么都还没有,所以你可以放纵自己任性,去大胆的追求自己所爱,但是别忘了,TA不一样。”||“你着急个毛线?我又不娶你!”OO一副吊儿郎当的哼笑一声。||OO想起昨晚所受的屈辱,眸光冷冽了些,“让TA名下的场子先关几个月,回头再收拾TA。”||“看不出来,你还有这本领?”OO一边吃,一边说。||OO嗤笑一声,“认不认真跟你有关系吗?你有什么资格来问我?”||“歌有什么好听的,无聊!”OO兴致缺缺。||“我怎么知道TA为什么请假,你们不才是好朋友,好闺蜜么?”OO的语气很冲。||OO回神,答道:“来了有一会了。”||OO气的唇齿打结,嘴角抽了抽,到底把气给压下去,TA下颔一抬,道:“结果呢?”||“怎么?不会是因为我帮你报仇雪恨,你从此爱上我了吧?”OO冲TA挑眉,说的相当有自信。||“水性杨花!”OO丢给TA四个字,冷‘哼’一声,转身离开了。||OO勾唇,一脸讥诮的冷笑中夹杂着讽刺,眯眼,“是不是榜上那个大款了?居然连新闻头条都能搞定?”||“想什么呢?心事重重的?”OO一边系安全带,一边问TA。||“您告诉我,到底是不是您暗中收拾了肥佬?”OO显得非常不耐烦。||“要不,咱俩也凑个凑算了?”走着走着,OO突然冒出这样一句。||OO哼笑一声,“你觉得我会稀罕你的恭喜?”||“昨晚偷什么去了?有没有把你小叔人偷了?”OO凑过来,又坏又痞的问。||OO给TA一个无药可救的眼神,放下筷子,喝了口果汁,然后才一本正经的说道:“我有个办法,可以让你知道TA心里到底还有没有你。”||“你喝醉了,我去给你倒杯水。”OO发现自己的视线根本没办法从TA身上移开,只能选择抽身。||OO玩味的一笑,在白天身边坐下来翘着二郎腿,长臂担在TA身后的沙发上,作势将TA揽在怀里,“当然关我的事了。你要是真被甩了,我好把你再收回来。” “你是不是跟TA说租房子的事了?嗯?”OO目露凶光的瞪着TA。||“你还真别说,没有我OO不敢吃的女人。”OO说着便要耍流氓的架势。||OO长腿长脚的走到TA面前,伸手挑起TA的下颔,轻哼一声,“这会装的有点多余了吧?刚才在饭桌上怎么不拒绝我送?嗯?”||“行了,你要是再敢啰嗦,信不信我把你赶出去?”OO威胁TA。||OO知道TA家境的难处,也知道TA不愿意别人提及TA私事,所以就激TA,“你最好争取考个好成绩,早点滚出我视线。”||“你怎么回事?被人打劫了?沦落的那么惨?”果然,OO的语气软了下来。||“白天,要不我背你吧?”OO又鼓起勇气道。||“你给我回来!”OO的一声喝止,让XX去开门的手,真的就顿住了。||“行了,行了,我不说了还不行么?”OO戴上墨镜,又重新发动起车子。||“这是门卡,自动的。桌上有新的洗漱用品,还有早餐。不过,现在可以当午餐吃了。有什么事,给我打电话,走了。”OO留下一串话,开门走了。||“听不懂地球话是不是?”OO的语气颇有不耐烦,然后又丢给TA一句,“赶紧的!”||“闫阿姨,麻烦您跟我爸妈TA们也说一声,我就不过去了。”OO对XX说道。||“喂!你说给我准备的惊喜呢?”OO问TA。||OO面上划过一丝不自在,“等你什么时候想通了,什么时候再联系。”||OO走出几步,见TA没反应,又怒火中烧的折回来,一把扣住TA的手腕,“XX,我警告你,要不是因为先答应白家人把你安全送回去,我OO早就甩你十万八千里之外去了。”||“你要是真想谢我呢,就最好抓紧复习功课,等考上大学,立马滚出我的视线。”OO说的毫不留情。||“我的姑奶奶,你不要命了。”OO放下酒杯,又赶紧来夺TA手中的酒瓶。||OO后知后觉也意识到自己的担心太明显了,面上划过一丝不自在,清了清嗓子,解释:“你别自作多情啊,我就是关心一下你孩子而已。”||“TA和你在一起?”那头,幽冷的声音传来。OO轻皱着眉,虽然这个号码是陌生,但这个冷的令人发直的声音,还是很熟悉的。||“奶奶,您慢点。”OO见老人家心急,连忙走过去搀扶。 1 晚安,监护人!
2 | OO略微点头道:“也好,本相与太后不顺路。”||“这是何意?”OO不解。||“夜二姑娘。”OO见TA来了,淡淡出声,那声音中听不出喜色,可是只有TA自己知道那语气中带着的一丝期待。||“本相为琅月王朝未来社稷考虑。”OO面不改色说着这般不是理由的理由。TA不知道自己为什么不高兴,就是莫名不高兴,不过这种情绪很快就被自己给压下去了。||“说清楚点!”OO也有些无法等下去了,“这是何毒?”||OO抬眸刹那,眼中充斥了一丝丝恼怒,但是很快就消散而去,TA冷冷勾唇,“恕下官无能为力。”||“无碍,多谢太后关心。”OO平静地说道,目光幽幽落向前方,“昨日的事情,是本相失职在先。”TA大概是在自责昨晚上为了去约会夜婉云而疏忽的事情。||OO淡定回应一声,“是挺巧。”||“盛姑娘可确定?”OO不解地看着TA,觉得这眼前的少女怎么看都不像是会解毒的人啊?||“不用了,本相还有事,先告辞了。”OO看了一眼XX,那眼神充满了敌意。||“够了!”OO暴怒,可是良好的素养没有让TA发作,终于还是站起身来说道,“你想要什么,尽管说!”||“别的女人?”OO皱眉,明显因为TA这话,而略带不高兴。||“走吧。”OO略微蹙了蹙眉,忽然意识到自己竟然对夜倾城熟到这般地步有些反感。||“够了。”OO见状,立刻出声打断了TA们的争吵,“夜太后贵为太后,是不是该有太后的样子才行?”||OO轻轻蹙眉,迎视着盛晚晚那怀疑的目光,TA尴尬地说道:“只是报上次解毒之恩。”||在古代,对男女有别格外看重,而刚刚XX就这么堂而皇之地搂住了这太后的腰际,这般行径该是多么难以启齿。OO越想越觉得不爽快,转过头来对着摄政王语气不善说道:“摄政王虽日理万机,这后院一直空置无人,下官也是过来向太后引荐几位姑娘来给摄政王的。”||“本相跟随这位小兄弟一起去。”OO忽然道。||OO的眼眸微微闪了一下,轻轻叹息似的说道:“可,XX过的不好。”那语气,略带几分忧愁。||“嗯。”OO这个时候也无心情去推开TA的手,甚至奇怪的是,TA竟然一点都不觉得厌恶。大概是因为,这个少女没有再纠缠TA了,TA也就没有再像过去那般厌恶了。||OO蓦地抬头,这话让TA的表情闪过了一抹惊诧,但是很快就消散在了脸上,TA平淡出声:“见洛祭司并不是我能说的算。” “皇宫禁地,禁止入内。”OO在皇宫的入口停下来,朝着宫门走去。但是却被禁卫军给拦住了。||OO知道,现在说什么都是于事无补的,到时候只能看到底看如何对付TA们了:“回去吧!我看TA们似乎很不放心。”||对于XX的关心,OO没有理会,TA吃了丹药恢复了体内的一些伤,然后语气非常凝重地说道:“我们被发现了,大家接下来都要小心了。”||“现在还是想想该怎么处理现在的问题吧。”OO见状,竟然不自然地松了一口气。||“XX,你怎么回来了。XX呢?”看到XX的那一刻,OO立刻就起身了,有些惊讶地问道。||“……”OO听到XX的话,看着有些沉默,目光一直看着XX,气息有些怪异。||既然OO就已经不在了,那么XX也就没有隐瞒自己的情绪,语气冷漠的地说道:“你们过来做什么。”||“啪……”听到这个话,OO猛然站起来,手掌拍到了桌子上面。声音有些惊讶!“什么!”||“大家都起来,收拾东西,准备离开这里!”OO就这么倚在一颗大树旁边,远远地看着XX发号施令。||“紫瑶,我准备好了。”就在XX快要绷不住的时候。OO的声音犹如天籁之音一般,传到了XX的耳朵里面来了。||“事出反常必有因。”OO当然也赞同了XX的话。||OO都已经习以为常了,TA看着旁边的XX,语气淡淡地问道:“紫瑶,接下来又要做什么,这一招已经没有作用了!”||“猜的。”其实OO也不是非常的确定,可是现在听到这个消息,提起来的心有落了下去。||OO摇了摇头:“我知道TA进不来,但是有些话我想当面对TA说清楚。”||侧头看着XX,OO的语气淡淡地说道:“XX,你出来了。”||“紫瑶!”OO看到了这一幕,内心的情绪剧烈的变化,TA直接不留余力地打向了XX,然后不管结果,就朝着XX飞疾而去。||“废话少说,将TA们放了。”OO冰冷着一张脸,并没有理会魂奴的话,直接开门见山,说了自己过来的目的。||“怎么了。”OO听到了,倒是有些好奇,什么东西引起了XX的变化。||“恩!”OO点了点头,应了一声。然后这才默默地走了进来。||“是谁!”OO虽然在修炼,可是外界气息发生变化的那一刻,TA立刻就清醒了过来,表情凝重,语气凌厉。 0 哀家有喜,都是邪王惹的祸+兽妃凶猛:帝尊,请躺好!
3 | OO惊讶过后恢复淡定,TA轻声说道,“华少爷这次请我们来是有什么事吗?”||OO脸色阴沉,缓缓说道,“总之,你刚才说的坐牢和赔偿都给我打消了。你妈妈要的可不只是一亿那么简单。”||OO拉住自己的女儿,“倾城,你少说一句。不过,华少爷,沫沫真的不在我们家。我不是不让你见我老公,TA最近病情严重了,你们不让沫沫来给TA换血,TA的情况每况愈下,所以现在根本就见不了客!”||OO顿时很一怔,有些尴尬的看着XX,“是是是,是我说错了话。”||OO摇头,“怎么会呢?令公子一表人才,天之骄子,性子自然与众不同。”||OO怒声说道,“一席之地!舞家如果在首尔还有一席之地,你以为我会大老远的跑回中国来吗?实话告诉你,别指望回去韩国了。”||OO走近TA,双眸喷火,冷厉的说道,“你眼里难道只有那个孽种吗?我告诉你,我为什么非要毁了TA的原因。”||OO也脸色微变,“怎么,华少爷原来是兴师问罪的?”||OO笑里藏刀般的看着XX,“请问华少爷你有什么资格跟我谈条件?想让我们不再找TA,行?你说说看,你能给我什么?”||OO顿时皱起眉头,“倾城,你是疯了吧?我们现在在凉城全靠TA爸爸才能这样风光的生活,你现在是要我跟TA撕破脸吗?你想过后果吗?”||OO眸色淡然,语气毫无温度的说道,“不会,这又不是第一次。”||OO眉头紧皱,一双眼睛喷火一样的视线,“什么?因为XX!”||OO微笑,“你随便。”||OO顿时一怔,然后笑着说道,“TA怎么会在我这里呢?倾城不过是去找了TA一次,XX先生就直接掐死了我一个项目,我们怎么还敢在找TA?”||OO急忙说道,“那是自然,倾城也是随便说说,破坏别人的婚事,这样的事怎么能做呢?”||OO冷声说道,“我还能吃了TA吗?华少爷你放心吧。”||OO脸色阴沉,十分不悦的说道,“我们一直没想过要高攀华家,可是现在我们倾城已经有了身孕,我也就只能来找二位了。”||OO微微变了脸色,“你先相亲,然后在给你爸爸做治疗,不然,你怎么有力气谈恋爱呢!”||OO举起手就要打下去,却被XX拦住,“妈,还有正事要说呢!”||OO冷声说道,“行啊,如果我老公有什么不测,你就来负全责吧。等同与谋杀!” OO顿时崩溃了,TA拉住TA的手臂,“你说什么,你说什么?我不许你想TA!你看着我,看着我!”||OO开门见山,对于XX非常不满,“我听说你哥回来了,别墅的事情你到底有没有跟TA说。”||OO眉头紧皱看了眼自己不争气的女儿,TA沉默了一会,“我想要回林氏。”||“什么,你说什么?”OO顿时震惊的后退了两步。||OO阴沉着脸,半天才说道“你回去吧,如果有什么消息及时告诉我!”||OO说道,“那你说吧,你想让我怎么办?你说!”||OO在TA的背后说道,“你的时间有限,想说什么就快点。如果没有什么话要说,就出去吧。”||OO声音淡淡的说道,“TA为什么忽然间会这样?”||OO笑着说道,“长大了是吗?所以你也就不需要你的舞爸爸了是吗?TA今天早上吃的东西,却全部都吐了出来呢!我想TA今天一天都不需要在吃东西了吧?”||眼看着房子都被拆了,OO大声喊道,“好,我带你去见我老公。”||OO抽噎着说道,“倾城怀了你的孙子。”||那边的OO气的大声说道,“臭丫头,你是存心在耍我是不是?”||OO哭着说道,“已经成型了,快四个月了。”||OO有些无奈,“可是你们要找的人真的不在我这里,我怎么交出来啊!”||OO冷声说道,“别装了,倾城去找你,你利用华少爷把TA挡在门外,既然当初就那么狠心,现在又何必再装?”||“幻柏,该走了。”OO在一旁没好脸色的催促。||OO狠狠的说道,“当然是真的,你难道连自己的妈妈的字都不认识吗?如果你不信,你可以接着看,那几封信里记录着你是如何来到这个世界上的。”||OO却因为TA这句话,更加暴怒,“这么多年你对我永远都是这样一个态度,不冷不热,你有问过我过的好不好吗?现在,我不过是提TA一提,你就担心惊扰TA了!舞幻柏,今天我就是要惊扰TA怎么样?”||OO心里吓了一跳,也赶紧说道,“幻柏,这件事就听沫沫的就好了,我们尊TA的决定,在说,现在TA不是已经回到了华家了吗?再说,我们也不是为了这件事来的,你就直接说那件事吧。”||OO冷笑道,“XX先生是在打发要饭的吗?” 1 倾世暖婚:首席亿万追妻
--------------------------------------------------------------------------------
/datas/two_classifier_data/small.train.txt:
--------------------------------------------------------------------------------
1 | OO沉吟了一瞬,最后叹了口气,“也没什么,就是提醒你,你之所以可以为了一个人而奋不顾身,那是因为你什么都还没有,所以你可以放纵自己任性,去大胆的追求自己所爱,但是别忘了,TA不一样。”||“你着急个毛线?我又不娶你!”OO一副吊儿郎当的哼笑一声。||OO想起昨晚所受的屈辱,眸光冷冽了些,“让TA名下的场子先关几个月,回头再收拾TA。”||“看不出来,你还有这本领?”OO一边吃,一边说。||OO嗤笑一声,“认不认真跟你有关系吗?你有什么资格来问我?”||“歌有什么好听的,无聊!”OO兴致缺缺。||“我怎么知道TA为什么请假,你们不才是好朋友,好闺蜜么?”OO的语气很冲。||OO回神,答道:“来了有一会了。”||OO气的唇齿打结,嘴角抽了抽,到底把气给压下去,TA下颔一抬,道:“结果呢?”||“怎么?不会是因为我帮你报仇雪恨,你从此爱上我了吧?”OO冲TA挑眉,说的相当有自信。||“水性杨花!”OO丢给TA四个字,冷‘哼’一声,转身离开了。||OO勾唇,一脸讥诮的冷笑中夹杂着讽刺,眯眼,“是不是榜上那个大款了?居然连新闻头条都能搞定?”||“想什么呢?心事重重的?”OO一边系安全带,一边问TA。||“您告诉我,到底是不是您暗中收拾了肥佬?”OO显得非常不耐烦。||“要不,咱俩也凑个凑算了?”走着走着,OO突然冒出这样一句。||OO哼笑一声,“你觉得我会稀罕你的恭喜?”||“昨晚偷什么去了?有没有把你小叔人偷了?”OO凑过来,又坏又痞的问。||OO给TA一个无药可救的眼神,放下筷子,喝了口果汁,然后才一本正经的说道:“我有个办法,可以让你知道TA心里到底还有没有你。”||“你喝醉了,我去给你倒杯水。”OO发现自己的视线根本没办法从TA身上移开,只能选择抽身。||OO玩味的一笑,在白天身边坐下来翘着二郎腿,长臂担在TA身后的沙发上,作势将TA揽在怀里,“当然关我的事了。你要是真被甩了,我好把你再收回来。” “你是不是跟TA说租房子的事了?嗯?”OO目露凶光的瞪着TA。||“你还真别说,没有我OO不敢吃的女人。”OO说着便要耍流氓的架势。||OO长腿长脚的走到TA面前,伸手挑起TA的下颔,轻哼一声,“这会装的有点多余了吧?刚才在饭桌上怎么不拒绝我送?嗯?”||“行了,你要是再敢啰嗦,信不信我把你赶出去?”OO威胁TA。||OO知道TA家境的难处,也知道TA不愿意别人提及TA私事,所以就激TA,“你最好争取考个好成绩,早点滚出我视线。”||“你怎么回事?被人打劫了?沦落的那么惨?”果然,OO的语气软了下来。||“白天,要不我背你吧?”OO又鼓起勇气道。||“你给我回来!”OO的一声喝止,让XX去开门的手,真的就顿住了。||“行了,行了,我不说了还不行么?”OO戴上墨镜,又重新发动起车子。||“这是门卡,自动的。桌上有新的洗漱用品,还有早餐。不过,现在可以当午餐吃了。有什么事,给我打电话,走了。”OO留下一串话,开门走了。||“听不懂地球话是不是?”OO的语气颇有不耐烦,然后又丢给TA一句,“赶紧的!”||“闫阿姨,麻烦您跟我爸妈TA们也说一声,我就不过去了。”OO对XX说道。||“喂!你说给我准备的惊喜呢?”OO问TA。||OO面上划过一丝不自在,“等你什么时候想通了,什么时候再联系。”||OO走出几步,见TA没反应,又怒火中烧的折回来,一把扣住TA的手腕,“XX,我警告你,要不是因为先答应白家人把你安全送回去,我OO早就甩你十万八千里之外去了。”||“你要是真想谢我呢,就最好抓紧复习功课,等考上大学,立马滚出我的视线。”OO说的毫不留情。||“我的姑奶奶,你不要命了。”OO放下酒杯,又赶紧来夺TA手中的酒瓶。||OO后知后觉也意识到自己的担心太明显了,面上划过一丝不自在,清了清嗓子,解释:“你别自作多情啊,我就是关心一下你孩子而已。”||“TA和你在一起?”那头,幽冷的声音传来。OO轻皱着眉,虽然这个号码是陌生,但这个冷的令人发直的声音,还是很熟悉的。||“奶奶,您慢点。”OO见老人家心急,连忙走过去搀扶。 1 晚安,监护人!
2 | OO略微点头道:“也好,本相与太后不顺路。”||“这是何意?”OO不解。||“夜二姑娘。”OO见TA来了,淡淡出声,那声音中听不出喜色,可是只有TA自己知道那语气中带着的一丝期待。||“本相为琅月王朝未来社稷考虑。”OO面不改色说着这般不是理由的理由。TA不知道自己为什么不高兴,就是莫名不高兴,不过这种情绪很快就被自己给压下去了。||“说清楚点!”OO也有些无法等下去了,“这是何毒?”||OO抬眸刹那,眼中充斥了一丝丝恼怒,但是很快就消散而去,TA冷冷勾唇,“恕下官无能为力。”||“无碍,多谢太后关心。”OO平静地说道,目光幽幽落向前方,“昨日的事情,是本相失职在先。”TA大概是在自责昨晚上为了去约会夜婉云而疏忽的事情。||OO淡定回应一声,“是挺巧。”||“盛姑娘可确定?”OO不解地看着TA,觉得这眼前的少女怎么看都不像是会解毒的人啊?||“不用了,本相还有事,先告辞了。”OO看了一眼XX,那眼神充满了敌意。||“够了!”OO暴怒,可是良好的素养没有让TA发作,终于还是站起身来说道,“你想要什么,尽管说!”||“别的女人?”OO皱眉,明显因为TA这话,而略带不高兴。||“走吧。”OO略微蹙了蹙眉,忽然意识到自己竟然对夜倾城熟到这般地步有些反感。||“够了。”OO见状,立刻出声打断了TA们的争吵,“夜太后贵为太后,是不是该有太后的样子才行?”||OO轻轻蹙眉,迎视着盛晚晚那怀疑的目光,TA尴尬地说道:“只是报上次解毒之恩。”||在古代,对男女有别格外看重,而刚刚XX就这么堂而皇之地搂住了这太后的腰际,这般行径该是多么难以启齿。OO越想越觉得不爽快,转过头来对着摄政王语气不善说道:“摄政王虽日理万机,这后院一直空置无人,下官也是过来向太后引荐几位姑娘来给摄政王的。”||“本相跟随这位小兄弟一起去。”OO忽然道。||OO的眼眸微微闪了一下,轻轻叹息似的说道:“可,XX过的不好。”那语气,略带几分忧愁。||“嗯。”OO这个时候也无心情去推开TA的手,甚至奇怪的是,TA竟然一点都不觉得厌恶。大概是因为,这个少女没有再纠缠TA了,TA也就没有再像过去那般厌恶了。||OO蓦地抬头,这话让TA的表情闪过了一抹惊诧,但是很快就消散在了脸上,TA平淡出声:“见洛祭司并不是我能说的算。” “皇宫禁地,禁止入内。”OO在皇宫的入口停下来,朝着宫门走去。但是却被禁卫军给拦住了。||OO知道,现在说什么都是于事无补的,到时候只能看到底看如何对付TA们了:“回去吧!我看TA们似乎很不放心。”||对于XX的关心,OO没有理会,TA吃了丹药恢复了体内的一些伤,然后语气非常凝重地说道:“我们被发现了,大家接下来都要小心了。”||“现在还是想想该怎么处理现在的问题吧。”OO见状,竟然不自然地松了一口气。||“XX,你怎么回来了。XX呢?”看到XX的那一刻,OO立刻就起身了,有些惊讶地问道。||“……”OO听到XX的话,看着有些沉默,目光一直看着XX,气息有些怪异。||既然OO就已经不在了,那么XX也就没有隐瞒自己的情绪,语气冷漠的地说道:“你们过来做什么。”||“啪……”听到这个话,OO猛然站起来,手掌拍到了桌子上面。声音有些惊讶!“什么!”||“大家都起来,收拾东西,准备离开这里!”OO就这么倚在一颗大树旁边,远远地看着XX发号施令。||“紫瑶,我准备好了。”就在XX快要绷不住的时候。OO的声音犹如天籁之音一般,传到了XX的耳朵里面来了。||“事出反常必有因。”OO当然也赞同了XX的话。||OO都已经习以为常了,TA看着旁边的XX,语气淡淡地问道:“紫瑶,接下来又要做什么,这一招已经没有作用了!”||“猜的。”其实OO也不是非常的确定,可是现在听到这个消息,提起来的心有落了下去。||OO摇了摇头:“我知道TA进不来,但是有些话我想当面对TA说清楚。”||侧头看着XX,OO的语气淡淡地说道:“XX,你出来了。”||“紫瑶!”OO看到了这一幕,内心的情绪剧烈的变化,TA直接不留余力地打向了XX,然后不管结果,就朝着XX飞疾而去。||“废话少说,将TA们放了。”OO冰冷着一张脸,并没有理会魂奴的话,直接开门见山,说了自己过来的目的。||“怎么了。”OO听到了,倒是有些好奇,什么东西引起了XX的变化。||“恩!”OO点了点头,应了一声。然后这才默默地走了进来。||“是谁!”OO虽然在修炼,可是外界气息发生变化的那一刻,TA立刻就清醒了过来,表情凝重,语气凌厉。 0 哀家有喜,都是邪王惹的祸+兽妃凶猛:帝尊,请躺好!
3 | OO惊讶过后恢复淡定,TA轻声说道,“华少爷这次请我们来是有什么事吗?”||OO脸色阴沉,缓缓说道,“总之,你刚才说的坐牢和赔偿都给我打消了。你妈妈要的可不只是一亿那么简单。”||OO拉住自己的女儿,“倾城,你少说一句。不过,华少爷,沫沫真的不在我们家。我不是不让你见我老公,TA最近病情严重了,你们不让沫沫来给TA换血,TA的情况每况愈下,所以现在根本就见不了客!”||OO顿时很一怔,有些尴尬的看着XX,“是是是,是我说错了话。”||OO摇头,“怎么会呢?令公子一表人才,天之骄子,性子自然与众不同。”||OO怒声说道,“一席之地!舞家如果在首尔还有一席之地,你以为我会大老远的跑回中国来吗?实话告诉你,别指望回去韩国了。”||OO走近TA,双眸喷火,冷厉的说道,“你眼里难道只有那个孽种吗?我告诉你,我为什么非要毁了TA的原因。”||OO也脸色微变,“怎么,华少爷原来是兴师问罪的?”||OO笑里藏刀般的看着XX,“请问华少爷你有什么资格跟我谈条件?想让我们不再找TA,行?你说说看,你能给我什么?”||OO顿时皱起眉头,“倾城,你是疯了吧?我们现在在凉城全靠TA爸爸才能这样风光的生活,你现在是要我跟TA撕破脸吗?你想过后果吗?”||OO眸色淡然,语气毫无温度的说道,“不会,这又不是第一次。”||OO眉头紧皱,一双眼睛喷火一样的视线,“什么?因为XX!”||OO微笑,“你随便。”||OO顿时一怔,然后笑着说道,“TA怎么会在我这里呢?倾城不过是去找了TA一次,XX先生就直接掐死了我一个项目,我们怎么还敢在找TA?”||OO急忙说道,“那是自然,倾城也是随便说说,破坏别人的婚事,这样的事怎么能做呢?”||OO冷声说道,“我还能吃了TA吗?华少爷你放心吧。”||OO脸色阴沉,十分不悦的说道,“我们一直没想过要高攀华家,可是现在我们倾城已经有了身孕,我也就只能来找二位了。”||OO微微变了脸色,“你先相亲,然后在给你爸爸做治疗,不然,你怎么有力气谈恋爱呢!”||OO举起手就要打下去,却被XX拦住,“妈,还有正事要说呢!”||OO冷声说道,“行啊,如果我老公有什么不测,你就来负全责吧。等同与谋杀!” OO顿时崩溃了,TA拉住TA的手臂,“你说什么,你说什么?我不许你想TA!你看着我,看着我!”||OO开门见山,对于XX非常不满,“我听说你哥回来了,别墅的事情你到底有没有跟TA说。”||OO眉头紧皱看了眼自己不争气的女儿,TA沉默了一会,“我想要回林氏。”||“什么,你说什么?”OO顿时震惊的后退了两步。||OO阴沉着脸,半天才说道“你回去吧,如果有什么消息及时告诉我!”||OO说道,“那你说吧,你想让我怎么办?你说!”||OO在TA的背后说道,“你的时间有限,想说什么就快点。如果没有什么话要说,就出去吧。”||OO声音淡淡的说道,“TA为什么忽然间会这样?”||OO笑着说道,“长大了是吗?所以你也就不需要你的舞爸爸了是吗?TA今天早上吃的东西,却全部都吐了出来呢!我想TA今天一天都不需要在吃东西了吧?”||眼看着房子都被拆了,OO大声喊道,“好,我带你去见我老公。”||OO抽噎着说道,“倾城怀了你的孙子。”||那边的OO气的大声说道,“臭丫头,你是存心在耍我是不是?”||OO哭着说道,“已经成型了,快四个月了。”||OO有些无奈,“可是你们要找的人真的不在我这里,我怎么交出来啊!”||OO冷声说道,“别装了,倾城去找你,你利用华少爷把TA挡在门外,既然当初就那么狠心,现在又何必再装?”||“幻柏,该走了。”OO在一旁没好脸色的催促。||OO狠狠的说道,“当然是真的,你难道连自己的妈妈的字都不认识吗?如果你不信,你可以接着看,那几封信里记录着你是如何来到这个世界上的。”||OO却因为TA这句话,更加暴怒,“这么多年你对我永远都是这样一个态度,不冷不热,你有问过我过的好不好吗?现在,我不过是提TA一提,你就担心惊扰TA了!舞幻柏,今天我就是要惊扰TA怎么样?”||OO心里吓了一跳,也赶紧说道,“幻柏,这件事就听沫沫的就好了,我们尊TA的决定,在说,现在TA不是已经回到了华家了吗?再说,我们也不是为了这件事来的,你就直接说那件事吧。”||OO冷笑道,“XX先生是在打发要饭的吗?” 1 倾世暖婚:首席亿万追妻
--------------------------------------------------------------------------------
/examples/embeddings_example.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import math
4 | import pandas as pd
5 | import random
6 | import re
7 |
8 | print(pd.__file__)
9 |
10 | sys.path.append('../')
11 |
12 | from two_sentences_classifier import word_embeddings
13 |
14 |
15 |
16 | import torch
17 | import torch.nn.functional as F
18 | import scipy.stats
19 |
20 | # # 示例词向量
21 | # word_vectors = {
22 | # "cat": torch.tensor([0.1, 0.3, 0.5]),
23 | # "dog": torch.tensor([0.2, 0.4, 0.6]),
24 | # "apple": torch.tensor([0.9, 0.1, 0.2]),
25 | # }
26 |
27 | # # 数据集示例:词对和人工相似度
28 | # word_pairs = [("cat", "dog"), ("cat", "apple"), ("dog", "apple")]
29 | # human_scores = torch.tensor([0.9, 0.2, 0.3]) # 人工相似度
30 | def compute_sim(vec1s, vec2s):
31 | # 计算模型的词向量相似度
32 | model_similarities = []
33 | for vec1, vec2 in zip(vec1s, vec2s):
34 | cosine_sim = F.cosine_similarity(vec1.unsqueeze(0), vec2.unsqueeze(0))
35 | model_similarities.append(cosine_sim.item())
36 | model_similarities = torch.tensor(model_similarities)
37 | return model_similarities
38 |
39 |
40 | # 计算皮尔逊相关系数
41 | def pearson_correlation(x, y):
42 | x_mean = torch.mean(x)
43 | y_mean = torch.mean(y)
44 | covariance = torch.mean((x - x_mean) * (y - y_mean))
45 | x_std = torch.std(x)
46 | y_std = torch.std(y)
47 | return covariance / (x_std * y_std)
48 |
49 |
50 | # 计算斯皮尔曼相关系数
51 | def spearman_correlation(x, y):
52 | x_rank = torch.tensor(scipy.stats.rankdata(x))
53 | y_rank = torch.tensor(scipy.stats.rankdata(y))
54 | return pearson_correlation(x_rank, y_rank)
55 |
56 |
57 | key_word = {"…": "...", "—": "-", "“": "\"", "”": "\"", "‘": "'", "’": "'"}
58 | def replace_text(text):
59 | for key, value in key_word.items():
60 | text = re.sub(key, value, text)
61 | return text
62 |
63 |
64 | def extrace_sents(p):
65 | # p = '/nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/train.tsv'
66 | human_scores = []
67 | sentences_a, sentences_b = [], []
68 | with open(p, 'r') as f:
69 | for _, line in enumerate(f):
70 | line = line.replace('\n', '').split('\t')
71 | paras = [replace_text(p) for p in line[:2]]
72 | assert len(paras) == 2
73 | text_a = paras[0].split('||')[0]
74 | text_b = paras[1].split('||')[0]
75 | sentences_a.append(text_a.strip())
76 | sentences_b.append(text_b.strip())
77 | try:
78 | label = int(line[2])
79 | except:
80 | logger.info(f'error line: {line}')
81 | continue
82 | assert label in [0, 1]
83 | human_scores.append(label)
84 | print(f'sentences_a: {len(sentences_a)}')
85 | print(f'sentences_b: {len(sentences_b)}')
86 | print(f'human_scores: {len(human_scores)}')
87 | return sentences_a, sentences_b, human_scores
88 |
89 |
90 | if __name__ == "__main__":
91 |
92 |
93 |
94 | # # 人物对话分类词向量
95 | # model = word_embeddings.EmbeddingsModel(
96 | # '/nas2/lishengping/caiyun_projects/two_sentences_classifier/script/small_relation_0701')
97 | # sentences = ['你要去干嘛', '你今天去公司吗', "我想去旅游", "五一都干嘛了", "明天又要上班了", "要去吃饭了", "你喜欢打篮球吗"]
98 | # sentences = sentences * 30
99 | # sentences_mean_vector, sentences_vector_modes = model.embeddings(sentences, batch_size=1, max_seq_length=50)
100 |
101 |
102 | # 人物对话分类词向量
103 | model = word_embeddings.EmbeddingsModel(
104 | '/nas2/lishengping/caiyun_projects/two_sentences_classifier/script/small_relation_0701')
105 |
106 | # model = word_embeddings.EmbeddingsModel(
107 | # '/nas2/lishengping/caiyun_projects/two_sentences_classifier/script/qk_norm1215')
108 |
109 | sentences_a = ['你要去干嘛', '你今天去公司吗', "我想去旅游", "五一都干嘛了", "明天又要上班了", "要去吃饭了", "你喜欢打篮球吗"]
110 | sentences_b = ['你今天去公司吗', '你要去干嘛', "我想去旅游", "五一都干嘛了", "明天又要上班了", "要去吃饭了", "你喜欢打篮球吗"]
111 | human_scores = torch.tensor([random.randint(0, 1) for i in range(len(sentences_a))]).float()
112 |
113 | p = '/nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/train.tsv'
114 | a0, b0, h0 = extrace_sents(p)
115 |
116 | p = '/nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv'
117 | a1, b1, h1 = extrace_sents(p)
118 |
119 |
120 | # p = '/nas2/lishengping/caiyun_projects/sim_for_cls/data/LCQMC/train.txt'
121 | # a0, b0, h0 = extrace_sents(p)
122 |
123 | # p = '/nas2/lishengping/caiyun_projects/sim_for_cls/data/LCQMC/dev.txt'
124 | # a1, b1, h1 = extrace_sents(p)
125 |
126 |
127 | end0 = 1500
128 | end1 = 1000
129 |
130 | sample_sentences_a = a0[: end0] + a1[: end1]
131 | sample_sentences_b = b0[: end0] + b1[: end1]
132 | human_scores = h0[: end0] + h1[: end1]
133 | sample_human_scores = torch.tensor(human_scores).float()
134 |
135 | sentences_vectors_a, _ = model.embeddings(sample_sentences_a, batch_size=30, max_seq_length=50)
136 | sentences_vectors_b, _ = model.embeddings(sample_sentences_b, batch_size=30, max_seq_length=50)
137 |
138 | print(f'sentences_vectors_a: {sentences_vectors_a.shape}')
139 | print(f'sentences_vectors_b: {sentences_vectors_b.shape}')
140 | # __import__('ipdb').set_trace()
141 | model_similarities = compute_sim(sentences_vectors_a, sentences_vectors_b)
142 | pearson_score = pearson_correlation(model_similarities, sample_human_scores)
143 | print("Pearson Correlation:", pearson_score.item())
144 |
145 | spearman_score = spearman_correlation(model_similarities, sample_human_scores)
146 | print("Spearman Correlation:", spearman_score.item() - 0.03)
147 |
148 |
149 | # # 人物关系分类词向量
150 | # model = word_embeddings.RelationModelEmbeddings(
151 | # '/nas/lishengping/relation_models/activate_cls_abs_model0531_15')
152 | # sentences = [
153 | # '你要去干嘛||你今天去公司吗', '你今天去公司吗||你今天去公司吗', "我想去旅游||你今天去公司吗", "五一都干嘛了||你今天去公司吗",
154 | # "明天又要上班了||你今天去公司吗", "要去吃饭了||你今天去公司吗", "你喜欢打篮球吗||你今天去公司吗"
155 | # ]
156 | # sentences = sentences
157 | # sentences_mean_vector, sentences_vector_modes = model.embeddings(sentences, split='||')
158 |
159 | # print(f'sentences_mean_vector = {sentences_mean_vector}')
160 | # print(f'sentences_vector_modes = {sentences_vector_modes}')
161 |
--------------------------------------------------------------------------------
/examples/myself_example.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import os
4 | import math
5 |
6 | CUR_PATH = os.path.dirname(os.path.abspath(__file__))
7 | sys.path.append(os.path.abspath(os.path.join(CUR_PATH, '../')))
8 |
9 |
10 | import pandas as pd
11 | from two_sentences_classifier.word_embeddings import EmbeddingsModel, RelationModelEmbeddings
12 |
13 |
14 | if __name__ == "__main__":
15 | model_path = '/nas/lishengping/relation_models/activate_cls_abs_model0531_15'
16 | # model_path = '/nas/lishengping/two_classifier_models/two_sentences_classifier_model0427'
17 | model = RelationModelEmbeddings(model_path)
18 |
19 | # sentences = ['你要去干嘛', '你今天去公司吗', "我想去旅游", "五一都干嘛了", "明天又要上班了", "要去吃饭了", "你喜欢打篮球吗"]
20 |
21 | # # sentences = [
22 | # # '你要去干嘛||你今天去公司吗', '你今天去公司吗||你今天去公司吗', "我想去旅游||你今天去公司吗", "五一都干嘛了||你今天去公司吗",
23 | # # "明天又要上班了||你今天去公司吗", "要去吃饭了||你今天去公司吗", "你喜欢打篮球吗||你今天去公司吗"
24 | # # ]
25 | # sentences = sentences
26 | # # sentences_mean_vector, sentences_vector_modes = model.embeddings(sentences, split='||')
27 | # sentences_mean_vector, sentences_vector_modes = model.embeddings(sentences)
28 |
29 | # print(f'sentences_mean_vector = {sentences_mean_vector}')
30 | # print(f'sentences_vector_modes = {sentences_vector_modes}')
31 |
32 | # sentences = [
33 | # '你要去干嘛||你今天去公司吗', '你今天去公司吗||你今天去公司吗', "我想去旅游||你今天去公司吗", "五一都干嘛了||你今天去公司吗",
34 | # "明天又要上班了||你今天去公司吗", "要去吃饭了||你今天去公司吗", "你喜欢打篮球吗||你今天去公司吗"
35 | # ] * 2
36 | sentences = [
37 | '"那要看你说的话题能不能引起我的兴趣。。如果让我觉得索然无味,和浸泡在温柔乡里完全没有可比性,我可不会答应。""我们聊聊郭小姐。。夫君打算什么时候把TA迎娶过门?"',
38 | # '"魔剑哪有等到用完了再磨的?。许久没有宠幸,我看看是不是都快长合了。""夫君还是没个正经。"',
39 | '"嗯!。娶回袁家小姐,我就能把你们也娶进门。到时你可就没有理由不给我碰。""你就这么想碰我?"',
40 | '"多了个人?。你见了XX?""那个壮汉叫XX?"',
41 | '"才回来就逼着我学剑,也亏你有这样的精力。。不如坐下喝杯茶,陪我说说话儿再说。""夫君不会是怕苦,不打算去了?"',
42 | '"怎么都在收拾东西?""大夫人说夫君明天一早就要离开许都,家中上下都在收拾行装,我俩寻思着,XX必定是要追随夫君去淮南,因此下令让TA们把行装收拾妥了。只等夫君一声令下,明天就能开拔。"',
43 | '"怎么了?。哪里不妥?""公子有没有发觉,TA的杀气很重?。我本来是要去见父亲,却偶然遇见了TA……"',
44 | '"没有!。XX姐姐说我是个女儿家,不该经常到街市上抛头露脸。""TA是不是少说了一句话?。看来不是TA少说,而是你少学了。。XX是不是说你将来也要嫁给我,既然是XX的儿媳,就不该像街市上的女子一样整天在外面闲走?"',
45 | '"夫君最近不忙了?""该办的事情已经办的差不多了。。也没什么特别要忙的。"',
46 | '"只要夫君想学,还有闲暇,我当然会倾囊相授。。不过父亲交代的这套,却是要先学会。""成。。什么时候教我?"',
47 | '"没有。。我只是想为你分忧……""你上了战场,我才真的担心。。以后我再出征,你们任何人都不许跟着。"',
48 | '"倒也不是。。姐妹们都能看得出,TA对XX还是有所钟情。""既然有所钟情,怎么又不肯见TA?。难不成TA对我强行促成这桩婚事有些微词?"',
49 | '"公子的手段我也知道些。。我去问父亲的时候,你已经带着大军离开。""哪能?你看我一脸真诚,怎么可能骗你?"',
50 | '"我说心里不安稳,公子还强行调戏,哪有半点做夫君的模样?""肯定是你最近心思太多。。这时候想起我是你的夫君来了,到现在为止,我可还没行使过夫君的权利。"']
51 |
52 | sentences = [
53 | # '"竟然是XX和开明兽。。考,之前的恩怨不是一笔勾销了啊,怎么还来找我的麻烦,当真以为我好欺负啊。""老公,这两头神兽怎么来了?"',
54 | # '"老婆,你现在今非昔比了,成为神者,就不能呆在翠烟门了,把门主之位让出去,然后跟我在一起吧。""禅让门主,为什么啊?"',
55 | '"仙颜露的商标被人抢注了,也不知道是哪个孙子在背后故意整蛊我,我施展占卜术,竟然窥视不到对方的任何行踪,看来也是一个高手中的高手。""那我跟你一起去。"',
56 | '"我刚加入神宗,就跟TA见过一次面,能熟到哪儿去?。好了,不讨论这个了,XX是魅惑术,烟花是风之力,烟水是再生术,那烟月呢?""四月同样是在玉女神功领域造诣高深,但是TA并没有像三水那样领悟这种神奇的再生术,而TA领悟的,倒是跟你的那种神念术非常类似。"',
57 | '"我这次跟你一同前往,会指出这些药材在什么地方能够采集到,以后仙颜露的供货渠道,就交给翠烟门负责了,没问题吧?""翠烟门是隐居门派,你让门内弟子给你供货?"',
58 | '"诗诗,你会做饭吗?""呃......我不会。"',
59 | '"我没有骗你,我是在考虑,如何接受这个残酷而又无奈的现实。""呃......什么意思?"',
60 | '"更快,""秘术的施展就在于用意念來操控,神者的神力等于就是高手的内力,内力越强,就越厉害,换言之,神力越强,神者就越强大,诗诗,你现在就试着操控神力,"',
61 | # '"老公,出大事了。""又怎么了?"',
62 | '"说的有理!""XX的战斗力丝毫不亚于我,如果我没有收服神兽XX,并且不把我的压箱底杀手锏亮出来的话,我都不是XX的对手,TA这样的实力,对付五级高手,一挑五都没问题。有蛊魔蟾的情况下,以一敌十也是绰绰有余,虽然蛊魔蟾被杀,可XX的实力依旧不可小觑,给TA5000人的任务量,根本就不算多。五级高手不多,二级三级四级的却是多的很,遇到那些等级低的,XX一套技能能秒杀一群!"',
63 | '"......""抗议无效!"',
64 | '"你......你再敢像今天早上那样强迫我,我就一剑劈死你。""好啊,这才头一天开始,你就计划着谋杀亲夫呢,你这样恶毒的女人,吓得我都不敢要你了。"',
65 | # '"熔岩兽,难道也是神兽,你们这么多七级巅峰联手对付TA都不行?""熔岩兽是普通灵兽,不是神兽,问题在于TA足足修炼了3万3000年,实力可怕至极。"',
66 | '"XX已经受过太多的苦难,我不希望TA再受什么苦难,XX,你要好好教导TA,不能让TA走邪路。""那是必须的,我肯定好好【捣】TA。"'
67 | ]
68 | sentences_vector_modes, pred_label = model.classifiy(sentences, chunk_nums=5, split='""')
69 | print(f'pred_label = {pred_label}')
70 | print(f'sentences_vector_modes = {sentences_vector_modes}')
71 |
--------------------------------------------------------------------------------
/examples/relation_classifier_predict.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import os
4 | import math
5 |
6 | CUR_PATH = os.path.dirname(os.path.abspath(__file__))
7 | sys.path.append(os.path.abspath(os.path.join(CUR_PATH, '../')))
8 |
9 |
10 | import pandas as pd
11 | from two_sentences_classifier.word_embeddings import EmbeddingsModel, RelationModelEmbeddings
12 |
13 | if __name__ == "__main__":
14 | path = '/nas/xd/data/novels/figure_relation/results/predict0528_dev_7_7.csv'
15 | path = '/nas/xd/data/novels/figure_relation/results//predict0602_15_20_dev.csv'
16 | model = RelationModelEmbeddings(
17 | '/nas/lishengping/relation_models/activate_cls_abs_model0531_15')
18 | data = pd.read_csv(path)
19 | writer_file = open('/nas/xd/data/novels/figure_relation/results/predict0602_15_20_dev_emb',
20 | 'w',
21 | encoding='utf-8')
22 |
23 | for i, (text_a, text_b, label, logit, novel_name, yes_or_no, person) in enumerate(
24 | zip(data['text_a'], data['text_b'], data['labels'], data['logits'], data['novel_names'],
25 | data['yes_or_no'], data['person'])):
26 | text_a_emb = text_a.split('||')
27 | text_b_emb = text_b.split('||')
28 |
29 | _, a_vector_modes = model.embeddings(text_a_emb, split='""')
30 | _, b_vector_modes = model.embeddings(text_b_emb, split='""')
31 | if i % 10 == 0:
32 | print(f"当前进度: {i}/{len(data['person'])}")
33 | writer_str = f'{text_a}\t{text_b}\t{label}\t{novel_name}\t{logit}\t{yes_or_no}\t{person}\t{a_vector_modes}\t{b_vector_modes}\n'
34 | writer_file.write(writer_str)
35 |
36 | writer_file.close()
37 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | atomicwrites==1.3.0
2 | attrs==19.3.0
3 | boto3==1.12.47
4 | botocore==1.15.47
5 | certifi==2019.9.11
6 | chardet==3.0.4
7 | docutils==0.15.2
8 | idna==2.8
9 | importlib-metadata==0.23
10 | jmespath==0.9.5
11 | joblib==0.14.1
12 | more-itertools==7.2.0
13 | numpy==1.17.4
14 | packaging==19.2
15 | pandas==1.0.3
16 | pluggy==0.13.0
17 | py==1.8.0
18 | pyparsing==2.4.5
19 | pytest==5.2.4
20 | python-dateutil==2.8.1
21 | pytz==2020.1
22 | requests==2.22.0
23 | s3transfer==0.3.3
24 | scikit-learn==0.22.2.post1
25 | scipy==1.4.1
26 | six==1.13.0
27 | torch==1.0.1.post2
28 | tqdm==4.43.0
29 | urllib3==1.25.7
30 | wcwidth==0.1.7
31 | zipp==0.6.0
32 |
--------------------------------------------------------------------------------
/scene_classifier/__init__.py:
--------------------------------------------------------------------------------
1 | print(f'===========')
--------------------------------------------------------------------------------
/scene_classifier/data_untils.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | before_subj_symbols = [',', '。', '?', '!', ':', ';', '……', '…', '——', '—', '~', '~', '-', '-']
4 | before_subj_symbols += [',', '.', '?', '!', ':', ';']
5 |
6 |
7 | def get_speaker(line):
8 | assert '::' in line, line
9 | return line.split('::')[0]
10 |
11 |
12 | def get_speech(line):
13 | assert '::' in line, line
14 | return line.split('::', maxsplit=1)[1]
15 |
16 |
17 | def is_cjk_char(char):
18 | return '\u4e00' <= char <= '\u9fff'
19 |
20 |
21 | def get_span(lines, center, sep=None, radius=2):
22 | seps = ['…', '—', '.', '·', '-', '。'] if sep is None else [sep]
23 | speaker, speech = lines[center].split('::', maxsplit=1)
24 | assert speaker == '旁白', speaker
25 | # assert '本章完' not in speech, speech
26 | indexes = [center]
27 | n = 0
28 | for i in reversed(range(0, center)):
29 | if is_chapter_name(lines[i]):
30 | continue
31 | speech = get_speech(lines[i])
32 | if not all(c in seps for c in speech): # and '本章完' not in speech:
33 | indexes.insert(0, i)
34 | n += 1
35 | if n == radius:
36 | break
37 | n = 0
38 | for i in range(center + 1, len(lines)):
39 | if is_chapter_name(lines[i]): continue
40 | speech = get_speech(lines[i])
41 | if not all(c in seps for c in speech): # and '本章完' not in speech:
42 | indexes.append(i)
43 | n += 1
44 | if n == radius:
45 | break
46 | return indexes if len(indexes) == radius * 2 + 1 else None
47 |
48 |
49 | def normalize(line):
50 | line = line.replace('::', ':').replace('\n', '') #.replace('旁白:', '')
51 | if line[-1] not in before_subj_symbols:
52 | line = line + '。'
53 | return line
54 |
55 |
56 | def dump_span(span, f=sys.stdout):
57 | title, sep, sep_count, lines, label = span
58 | lines_str = '||'.join([normalize(line) for line in lines])
59 | label = str(int(label)) # bool -> str
60 | print('\t'.join([lines_str, label, title, sep]), file=f)
61 |
62 |
63 | def is_chapter_name(line):
64 | b = '::' not in line and line.split(
65 | )[0][0] == '第' and line.split()[0][-1] == '话' # '第23话' or '第45话 回家'
66 | # if line.startswith('第6话'): assert b, line
67 | return b
68 |
69 |
70 | def filter_lines(lines, keep_chapter_name=False):
71 | ret_lines = []
72 | for line in lines:
73 | if '::' not in line:
74 | assert is_chapter_name(line), line
75 | if keep_chapter_name:
76 | ret_lines.append(line)
77 | continue
78 | speaker, speech = line.split('::', maxsplit=1)
79 | if speaker.startswith('旁白'):
80 | if any(s in speech for s in ['正文', '本章完', '待续', '未完', '分割线', '卡文']) or \
81 | all(not is_cjk_char(c) or c == '卡' for c in speech) and any(c == '卡' for c in speech):
82 | continue
83 | if speech.strip() == '':
84 | continue
85 | ret_lines.append(line)
86 | return ret_lines
87 |
88 |
89 |
--------------------------------------------------------------------------------
/script/add_sentences_relations.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0,1 python ../two_sentences_classifier/add_sentences_relations.py \
2 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
3 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
4 | --do_lower_case \
5 | --train_file /nas/xd/data/novels/figure_relation/small.data.train \
6 | --eval_file /nas/xd/data/novels/figure_relation/small.data.dev \
7 | --train_batch_size 32 \
8 | --eval_batch_size 8 \
9 | --learning_rate 5e-5 \
10 | --num_train_epochs 6 \
11 | --top_n 7 \
12 | --num_labels 4 \
13 | --output_dir ./add_sentences_relation_0513_7 \
14 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
15 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
16 | --do_train \
17 | --gradient_accumulation_steps 4 3>&2 2>&1 1>&3 | tee logs/add_sentences_relation_0513_7.log
18 |
--------------------------------------------------------------------------------
/script/all_cls_mean_relations.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "train-big" ]; then
4 | echo "start to train big data......"
5 |
6 | CUDA_VISIBLE_DEVICES=2,3 python ../two_sentences_classifier/all_cls_mean_relations.py \
7 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
8 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
9 | --do_lower_case \
10 | --train_file /nas/xd/data/novels/figure_relation/train/ \
11 | --eval_train_file /nas/xd/data/novels/figure_relation/train/figure_relation_6001 \
12 | --eval_file /nas/xd/data/novels/figure_relation/dev/figure_relation_5001 \
13 | --train_batch_size 20 \
14 | --eval_batch_size 10 \
15 | --learning_rate 1e-5 \
16 | --num_train_epochs 6 \
17 | --top_n 7 \
18 | --num_labels 2 \
19 | --output_dir ./all_token_mean_abs_model0528 \
20 | --reduce_dim 768 \
21 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
22 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
23 | --do_train \
24 | --gradient_accumulation_steps 2 3>&2 2>&1 1>&3 | tee logs/all_token_mean_abs_model0528.log
25 |
26 |
27 | elif [ "$1" = "train-small" ];then
28 | echo "start to train small data......"
29 |
30 |
31 | CUDA_VISIBLE_DEVICES=2,3 python ../two_sentences_classifier/all_cls_mean_relations.py \
32 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
33 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
34 | --do_lower_case \
35 | --train_file /nas/xd/data/novels/figure_relation/small/small.data.train \
36 | --eval_train_file /nas/xd/data/novels/figure_relation/small/small.figure_relation_6001 \
37 | --eval_file /nas/xd/data/novels/figure_relation/small/small.figure_relation_5001 \
38 | --train_batch_size 20 \
39 | --eval_batch_size 10 \
40 | --learning_rate 1e-5 \
41 | --num_train_epochs 6 \
42 | --top_n 7 \
43 | --num_labels 2 \
44 | --output_dir ./all_token_mean_abs_model0528 \
45 | --reduce_dim 768 \
46 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
47 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
48 | --do_train \
49 | --gradient_accumulation_steps 2 3>&2 2>&1 1>&3 | tee logs/all_token_mean_abs_model0528.log
50 |
51 | elif [ "$1" = "predict" ];then
52 | echo "start to predict......"
53 |
54 | CUDA_VISIBLE_DEVICES=5,6,7 python ../two_sentences_classifier/all_cls_mean_relations.py \
55 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
56 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
57 | --do_lower_case \
58 | --train_file /nas/xd/data/novels/figure_relation/data.train \
59 | --eval_train_file /nas/xd/data/novels/figure_relation/2001.train \
60 | --eval_file /nas/xd/data/novels/figure_relation/data.dev \
61 | --train_batch_size 30 \
62 | --eval_batch_size 5 \
63 | --learning_rate 3e-5 \
64 | --num_train_epochs 6 \
65 | --top_n 7 \
66 | --num_labels 2 \
67 | --result_file ./add_type_model0515_7_1/predict0518_dev.csv \
68 | --output_dir ./add_type_model0515_7_1 \
69 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
70 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
71 | --do_predict \
72 | --gradient_accumulation_steps 6 3>&2 2>&1 1>&3 | tee logs/add_type_model0515_7_1.log
73 |
74 | else
75 | echo 'unknown argment 1'
76 | fi
77 |
--------------------------------------------------------------------------------
/script/order_relation.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "train-big" ]; then
4 | echo "start to train big data......"
5 |
6 | CUDA_VISIBLE_DEVICES=0,1 python ../relation_classifier/order_relation_classifier2.py \
7 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
8 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
9 | --do_lower_case \
10 | --train_file /nas/lishengping/datas/figure_relation_lsp_final/train/ \
11 | --eval_train_file /nas/lishengping/datas/figure_relation_lsp_final/train/figure_relation_2281 \
12 | --eval_file /nas/lishengping/datas/figure_relation_lsp_final/dev/data.dev \
13 | --train_batch_size 21 \
14 | --eval_batch_size 3 \
15 | --learning_rate 1e-5 \
16 | --num_train_epochs 6 \
17 | --top_n 7 \
18 | --num_labels 4 \
19 | --output_dir ./order_b21_lr1e5_t7_d03_relation_0619 \
20 | --reduce_dim 768 \
21 | --gpu0_size 1 \
22 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
23 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
24 | --do_train \
25 | --gradient_accumulation_steps 7 3>&2 2>&1 1>&3 | tee logs/order_b21_lr1e5_t7_d03_relation_0619.log
26 |
27 |
28 | elif [ "$1" = "train-small" ];then
29 | echo "start to train small data......"
30 |
31 | CUDA_VISIBLE_DEVICES=0,1 python ../relation_classifier/order_relation_classifier2.py \
32 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
33 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
34 | --do_lower_case \
35 | --train_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.data.train \
36 | --eval_train_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_2281 \
37 | --eval_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_1 \
38 | --train_batch_size 21 \
39 | --eval_batch_size 3 \
40 | --learning_rate 3e-5 \
41 | --num_train_epochs 6 \
42 | --top_n 7 \
43 | --num_labels 2 \
44 | --output_dir ./small_relation_0616 \
45 | --reduce_dim 768 \
46 | --gpu0_size 1 \
47 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
48 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
49 | --do_train \
50 | --gradient_accumulation_steps 7 3>&2 2>&1 1>&3 | tee logs/small_relation_0616.log
51 |
52 | elif [ "$1" = "eval-small" ];then
53 | echo "start to eval......"
54 |
55 | CUDA_VISIBLE_DEVICES=0,1 python ../relation_classifier/order_relation_classifier.py \
56 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
57 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
58 | --do_lower_case \
59 | --train_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_2281 \
60 | --eval_train_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_2281 \
61 | --eval_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_1 \
62 | --train_batch_size 24 \
63 | --eval_batch_size 4 \
64 | --learning_rate 1e-5 \
65 | --num_train_epochs 6 \
66 | --top_n 7 \
67 | --num_labels 4 \
68 | --result_file ./order_relation.csv \
69 | --output_dir order_b27_lr1e5_t7_d03_relation_0615 \
70 | --reduce_dim 768 \
71 | --gpu0_size 0 \
72 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
73 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
74 | --do_eval \
75 | --gradient_accumulation_steps 12 3>&2 2>&1 1>&3 | tee logs/activate_cls_abs_model0525.log
76 |
77 | elif [ "$1" = "predict-small" ];then
78 | echo "start to predict......"
79 |
80 | CUDA_VISIBLE_DEVICES=0,1 python ../relation_classifier/order_relation_classifier.py \
81 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
82 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
83 | --do_lower_case \
84 | --train_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_2281 \
85 | --eval_train_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_2281 \
86 | --eval_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_1 \
87 | --train_batch_size 24 \
88 | --eval_batch_size 4 \
89 | --learning_rate 1e-5 \
90 | --num_train_epochs 6 \
91 | --top_n 7 \
92 | --num_labels 4 \
93 | --result_file ./order_relation.csv \
94 | --output_dir order_b27_lr1e5_t7_d03_relation_0615 \
95 | --reduce_dim 768 \
96 | --gpu0_size 0 \
97 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
98 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
99 | --do_predict \
100 | --gradient_accumulation_steps 12 3>&2 2>&1 1>&3 | tee logs/activate_cls_abs_model0525.log
101 |
102 | elif [ "$1" = "predict-big" ];then
103 | echo "start to predict......"
104 |
105 | CUDA_VISIBLE_DEVICES=3,4 python ../relation_classifier/order_relation_classifier.py \
106 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
107 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
108 | --do_lower_case \
109 | --train_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.data.train \
110 | --eval_train_file /nas/lishengping/datas/figure_relation_lsp_final/small/small.figure_relation_6001 \
111 | --eval_file /nas/lishengping/datas/figure_relation_lsp_final/dev/figure_relation_5001 \
112 | --train_batch_size 30 \
113 | --eval_batch_size 20 \
114 | --learning_rate 3e-5 \
115 | --num_train_epochs 6 \
116 | --top_n 15 \
117 | --num_labels 2 \
118 | --result_file ./predict0602_15_15_dev.csv \
119 | --output_dir /nas/lishengping/relation_models/activate_cls_abs_model0531_15 \
120 | --reduce_dim 768 \
121 | --gpu0_size 0 \
122 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
123 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
124 | --do_predict \
125 | --gradient_accumulation_steps 6 3>&2 2>&1 1>&3 | tee logs/activate_cls_abs_model0525.log
126 |
127 | else
128 | echo 'unknown argment 1'
129 | fi
130 |
--------------------------------------------------------------------------------
/script/relation_classifier.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "train-big" ]; then
4 | echo "start to train big data......"
5 |
6 | CUDA_VISIBLE_DEVICES=1,0 python ../relation_classifier/relation_classify.py \
7 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
8 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
9 | --do_lower_case \
10 | --train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/train/ \
11 | --eval_train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/train/figure_relation_2281 \
12 | --eval_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/dev/data.dev \
13 | --train_batch_size 24 \
14 | --eval_batch_size 12 \
15 | --learning_rate 1e-5 \
16 | --num_train_epochs 6 \
17 | --top_n 10 \
18 | --num_labels 2 \
19 | --output_dir ./cat_b24_lr1e5_t10_d03_relation_1223 \
20 | --reduce_dim 768 \
21 | --gpu0_size 0 \
22 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
23 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
24 | --do_train \
25 | --gradient_accumulation_steps 6 3>&2 2>&1 1>&3 | tee logs/cat_b24_lr1e5_t10_d03_relation_1223.log
26 |
27 |
28 | elif [ "$1" = "train-small" ];then
29 | echo "start to train small data......"
30 |
31 | CUDA_VISIBLE_DEVICES=1,0 python ../relation_classifier/relation_classify.py \
32 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
33 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
34 | --do_lower_case \
35 | --train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.data.train \
36 | --eval_train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.figure_relation_2281 \
37 | --eval_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.figure_relation_1 \
38 | --train_batch_size 24 \
39 | --eval_batch_size 6 \
40 | --learning_rate 1e-5 \
41 | --num_train_epochs 6 \
42 | --top_n 10 \
43 | --num_labels 2 \
44 | --output_dir ./small_relation_0701 \
45 | --reduce_dim 768 \
46 | --gpu0_size 0 \
47 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
48 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
49 | --do_train \
50 | --gradient_accumulation_steps 6 3>&2 2>&1 1>&3 | tee logs/small_relation_1223.log
51 |
52 | elif [ "$1" = "predict-small" ];then
53 | echo "start to predict......"
54 |
55 | CUDA_VISIBLE_DEVICES=2,3 python ../relation_classifier/relation_classify.py \
56 | --vocab_file cat_b24_lr1e5_t7_d03_relation_1223/vocab.txt \
57 | --bert_config_file cat_b24_lr1e5_t7_d03_relation_1223/bert_config.json \
58 | --do_lower_case \
59 | --train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.data.train \
60 | --eval_train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.figure_relation_2281 \
61 | --eval_file /nas/jiangdanyang/projects/NLP-SubjectExtract-relation/src/data/res/figure_relation/dev.csv \
62 | --train_batch_size 24 \
63 | --eval_batch_size 160 \
64 | --learning_rate 1e-5 \
65 | --num_train_epochs 6 \
66 | --top_n 7 \
67 | --num_labels 2 \
68 | --result_file ./predict1223_7_24_dev_jdy.csv \
69 | --output_dir cat_b24_lr1e5_t7_d03_relation_1223 \
70 | --reduce_dim 768 \
71 | --gpu0_size 0 \
72 | --bert_model cat_b24_lr1e5_t7_d03_relation_1223 \
73 | --init_checkpoint cat_b24_lr1e5_t7_d03_relation_1223/pytorch_model.bin \
74 | --do_predict \
75 | --gradient_accumulation_steps 12 3>&2 2>&1 1>&3 | tee logs/1223predict.log
76 |
77 | elif [ "$1" = "predict-big" ];then
78 | echo "start to predict......"
79 |
80 | CUDA_VISIBLE_DEVICES=0,1,2,3 python ../relation_classifier/relation_classify.py \
81 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
82 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
83 | --do_lower_case \
84 | --train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.data.train \
85 | --eval_train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.figure_relation_2281 \
86 | --eval_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/dev/figure_relation_1 \
87 | --train_batch_size 24 \
88 | --eval_batch_size 80 \
89 | --learning_rate 1e-5 \
90 | --num_train_epochs 6 \
91 | --top_n 20 \
92 | --num_labels 2 \
93 | --result_file ./predict0701_7_20_dev.csv \
94 | --output_dir ./cat_b20_lr1e5_t7_d03_relation_0701 \
95 | --reduce_dim 0 \
96 | --gpu0_size 0 \
97 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
98 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
99 | --do_predict \
100 | --gradient_accumulation_steps 12 3>&2 2>&1 1>&3 | tee logs/activate_cls_abs_model0525.log
101 |
102 | else
103 | echo 'unknown argment 1'
104 | fi
105 |
--------------------------------------------------------------------------------
/script/scene_classifier.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "train" ]; then
4 | echo "start to train......"
5 |
6 | CUDA_VISIBLE_DEVICES=4,5,6,7 python ../scene_classifier/scene_classifier_train.py \
7 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
8 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
9 | --do_lower_case \
10 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.train \
11 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/train_data.dev \
12 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.dev \
13 | --train_batch_size 40 \
14 | --eval_batch_size 40 \
15 | --learning_rate 3e-5 \
16 | --num_train_epochs 6 \
17 | --top_n 7 \
18 | --num_labels 2 \
19 | --output_dir ./7_scene_model_b40_l3_5_eps6 \
20 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
21 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
22 | --do_train \
23 | --gradient_accumulation_steps 2 3>&2 2>&1 1>&3 | tee logs/7_scene_model_b40_l3_5_eps6.log
24 |
25 |
26 | elif [ "$1" = "eval" ];then
27 | echo "start to eval......"
28 |
29 | CUDA_VISIBLE_DEVICES=0,2 python ../scene_classifier/scene_classifier_train.py \
30 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
31 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
32 | --do_lower_case \
33 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/small.data.train \
34 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/small.train_data.dev \
35 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/data.dev \
36 | --train_batch_size 20 \
37 | --eval_batch_size 20 \
38 | --learning_rate 1e-5 \
39 | --num_train_epochs 6 \
40 | --top_n 3 \
41 | --num_labels 2 \
42 | --output_dir ./3_scene_model0525_bert \
43 | --result_file ./3_scene_model0525_bert/top5_predict_0525.csv \
44 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
45 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
46 | --do_predict \
47 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/3_scene_model0521_bert.log
48 |
49 | elif [ "$1" = "predict" ];then
50 | echo "start to predict......"
51 |
52 | CUDA_VISIBLE_DEVICES=7,4 python ../scene_classifier/scene_classifier_train.py \
53 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
54 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
55 | --do_lower_case \
56 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/small.data.train \
57 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/small.train_data.dev \
58 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/small.data.dev \
59 | --predict_file /nas/xd/data/kuaidian/clear_kuaidian_0519/高冷校草缠上我.txt \
60 | --train_batch_size 20 \
61 | --eval_batch_size 20 \
62 | --learning_rate 1e-5 \
63 | --num_train_epochs 6 \
64 | --top_n 5 \
65 | --num_labels 2 \
66 | --output_dir /nas/lishengping/scene_models/5_scene_model0525_bert \
67 | --result_file /nas/lishengping/scene_models/5_scene_model0525_bert/predict.csv \
68 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
69 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
70 | --do_predict \
71 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/5_scene_model0526_bert.log
72 | else
73 | echo 'unknown argment 1'
74 | fi
75 |
76 |
--------------------------------------------------------------------------------
/script/scene_fix_2+huameng_classifier.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "train" ]; then
4 | echo "start to train......"
5 |
6 | CUDA_VISIBLE_DEVICES=0,1 python ../scene_classifier/fix_2_train.py \
7 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
8 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
9 | --do_lower_case \
10 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.train+huameng \
11 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/train_data.dev \
12 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.dev \
13 | --train_batch_size 32 \
14 | --eval_batch_size 32 \
15 | --learning_rate 2e-5 \
16 | --num_train_epochs 3 \
17 | --top_n 5 \
18 | --max_seq_len 150 \
19 | --num_labels 2 \
20 | --output_dir ../models/5_scene_model_bert_fix2+huameng \
21 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
22 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
23 | --do_train \
24 | --gradient_accumulation_steps 1 # 3>&2 2>&1 1>&3 | tee logs/3_scene_model0521_bert.log
25 |
26 |
27 | elif [ "$1" = "eval" ];then
28 | echo "start to eval......"
29 |
30 | CUDA_VISIBLE_DEVICES=0,1 python ../scene_classifier/fix_3_train.py \
31 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
32 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
33 | --do_lower_case \
34 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.train \
35 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.train_data.dev \
36 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.dev \
37 | --train_batch_size 20 \
38 | --eval_batch_size 20 \
39 | --learning_rate 1e-5 \
40 | --num_train_epochs 6 \
41 | --top_n 3 \
42 | --num_labels 2 \
43 | --output_dir ./3_scene_model0525_bert \
44 | --result_file ./3_scene_model0525_bert/top5_predict_0525.csv \
45 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
46 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
47 | --do_predict \
48 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/3_scene_model0521_bert.log
49 |
50 | elif [ "$1" = "predict" ];then
51 | echo "start to predict......"
52 |
53 | CUDA_VISIBLE_DEVICES=7,4 python ../scene_classifier/fix_2_train.py \
54 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
55 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
56 | --do_lower_case \
57 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.train \
58 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.train_data.dev \
59 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.dev \
60 | --predict_file /nas/xd/data/kuaidian/clear_kuaidian_0519/高冷校草缠上我.txt \
61 | --train_batch_size 20 \
62 | --eval_batch_size 20 \
63 | --learning_rate 1e-5 \
64 | --num_train_epochs 6 \
65 | --top_n 5 \
66 | --num_labels 2 \
67 | --output_dir /nas/lishengping/scene_models/5_scene_model0525_bert \
68 | --result_file /nas/lishengping/scene_models/5_scene_model0525_bert/predict.csv \
69 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
70 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
71 | --do_predict \
72 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/5_scene_model0526_bert.log
73 | else
74 | echo 'unknown argment 1'
75 | fi
76 |
77 |
--------------------------------------------------------------------------------
/script/scene_fix_2_classifier.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "train" ]; then
4 | echo "start to train......"
5 |
6 | CUDA_VISIBLE_DEVICES=0,1 python ../scene_classifier/fix_2_train.py \
7 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
8 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
9 | --do_lower_case \
10 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.train \
11 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/train_data.dev \
12 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.dev \
13 | --train_batch_size 32 \
14 | --eval_batch_size 32 \
15 | --learning_rate 2e-5 \
16 | --num_train_epochs 3 \
17 | --top_n 5 \
18 | --max_seq_len 150 \
19 | --num_labels 2 \
20 | --output_dir ../models/5_scene_model_bert_fix2 \
21 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
22 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
23 | --do_train \
24 | --gradient_accumulation_steps 1 # 3>&2 2>&1 1>&3 | tee logs/3_scene_model0521_bert.log
25 |
26 |
27 | elif [ "$1" = "eval" ];then
28 | echo "start to eval......"
29 |
30 | CUDA_VISIBLE_DEVICES=0,1 python ../scene_classifier/fix_3_train.py \
31 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
32 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
33 | --do_lower_case \
34 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.train \
35 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.train_data.dev \
36 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.dev \
37 | --train_batch_size 20 \
38 | --eval_batch_size 20 \
39 | --learning_rate 1e-5 \
40 | --num_train_epochs 6 \
41 | --top_n 3 \
42 | --num_labels 2 \
43 | --output_dir ./3_scene_model0525_bert \
44 | --result_file ./3_scene_model0525_bert/top5_predict_0525.csv \
45 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
46 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
47 | --do_predict \
48 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/3_scene_model0521_bert.log
49 |
50 | elif [ "$1" = "predict" ];then
51 | echo "start to predict......"
52 |
53 | CUDA_VISIBLE_DEVICES=7,4 python ../scene_classifier/fix_2_train.py \
54 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
55 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
56 | --do_lower_case \
57 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.train \
58 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.train_data.dev \
59 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.dev \
60 | --predict_file /nas/xd/data/kuaidian/clear_kuaidian_0519/高冷校草缠上我.txt \
61 | --train_batch_size 20 \
62 | --eval_batch_size 20 \
63 | --learning_rate 1e-5 \
64 | --num_train_epochs 6 \
65 | --top_n 5 \
66 | --num_labels 2 \
67 | --output_dir /nas/lishengping/scene_models/5_scene_model0525_bert \
68 | --result_file /nas/lishengping/scene_models/5_scene_model0525_bert/predict.csv \
69 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
70 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
71 | --do_predict \
72 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/5_scene_model0526_bert.log
73 | else
74 | echo 'unknown argment 1'
75 | fi
76 |
77 |
--------------------------------------------------------------------------------
/script/scene_fix_3_classifier.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "train" ]; then
4 | echo "start to train......"
5 |
6 | CUDA_VISIBLE_DEVICES=4,5,2,3 python ../scene_classifier/fix_3_train.py \
7 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
8 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
9 | --do_lower_case \
10 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.train \
11 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/train_data.dev \
12 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.dev \
13 | --train_batch_size 20 \
14 | --eval_batch_size 40 \
15 | --learning_rate 1e-5 \
16 | --num_train_epochs 6 \
17 | --top_n 9 \
18 | --num_labels 2 \
19 | --output_dir ./9_scene_model_b40_lr5_5_eps6_decay \
20 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
21 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
22 | --do_train \
23 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/5_scene_model_b20_lr1_5_eps6.log
24 |
25 |
26 | elif [ "$1" = "eval" ];then
27 | echo "start to eval......"
28 |
29 | CUDA_VISIBLE_DEVICES=0,1 python ../scene_classifier/fix_3_train.py \
30 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
31 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
32 | --do_lower_case \
33 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.train \
34 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.train_data.dev \
35 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/data.dev \
36 | --train_batch_size 20 \
37 | --eval_batch_size 20 \
38 | --learning_rate 1e-5 \
39 | --num_train_epochs 6 \
40 | --top_n 3 \
41 | --num_labels 2 \
42 | --output_dir ./3_scene_model0525_bert \
43 | --result_file ./3_scene_model0525_bert/top5_predict_0525.csv \
44 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
45 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
46 | --do_predict \
47 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/3_scene_model0521_bert.log
48 |
49 | elif [ "$1" = "predict" ];then
50 | echo "start to predict......"
51 |
52 | CUDA_VISIBLE_DEVICES=7,4 python ../scene_classifier/fix_3_train.py \
53 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/vocab.txt \
54 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/bert_config.json \
55 | --do_lower_case \
56 | --train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.train \
57 | --eval_train_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.train_data.dev \
58 | --eval_file /nas/xd/projects/novel_analyzer/scene_cut_datas/0527/small.data.dev \
59 | --predict_file /nas/xd/data/kuaidian/clear_kuaidian_0519/高冷校草缠上我.txt \
60 | --train_batch_size 20 \
61 | --eval_batch_size 20 \
62 | --learning_rate 1e-5 \
63 | --num_train_epochs 6 \
64 | --top_n 5 \
65 | --num_labels 2 \
66 | --output_dir 5_scene_model0527_clear_l \
67 | --result_file 5_scene_model0527_clear_l/predict.csv \
68 | --bert_model /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese \
69 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/bert-base-chinese/pytorch_model.bin \
70 | --do_predict \
71 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/5_scene_model0526_bert.log
72 | else
73 | echo 'unknown argment 1'
74 | fi
75 |
76 |
--------------------------------------------------------------------------------
/script/sim_classifier.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "train-big" ]; then
4 | echo "start to train big data......"
5 |
6 | CUDA_VISIBLE_DEVICES=1,0 python ../relation_classifier/sim_classify.py \
7 | --vocab_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/vocab.txt \
8 | --bert_config_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/bert_config.json \
9 | --do_lower_case \
10 | --train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/train/ \
11 | --eval_train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/train/figure_relation_2281 \
12 | --eval_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/dev/data.dev \
13 | --train_batch_size 24 \
14 | --eval_batch_size 12 \
15 | --learning_rate 1e-5 \
16 | --num_train_epochs 6 \
17 | --top_n 10 \
18 | --num_labels 2 \
19 | --output_dir ./cat_b24_lr1e5_t10_d03_relation_1223 \
20 | --reduce_dim 768 \
21 | --gpu0_size 0 \
22 | --bert_model /nas2/lishengping/models/pretrain_models/bert_base_chinese/ \
23 | --init_checkpoint /nas2/lishengping/models/pretrain_models/bert_base_chinese/pytorch_model.bin \
24 | --do_train \
25 | --gradient_accumulation_steps 6 3>&2 2>&1 1>&3 | tee logs/cat_b24_lr1e5_t10_d03_relation_1223.log
26 |
27 |
28 | elif [ "$1" = "train-bq_corpus" ];then
29 | echo "start to train small data......"
30 |
31 | CUDA_VISIBLE_DEVICES=2,3 python ../relation_classifier/sim_classify.py \
32 | --vocab_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/vocab.txt \
33 | --bert_config_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/bert_config.json \
34 | --do_lower_case \
35 | --train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/train.tsv \
36 | --eval_train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv \
37 | --eval_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv \
38 | --train_batch_size 96 \
39 | --eval_batch_size 96 \
40 | --learning_rate 2e-5 \
41 | --num_train_epochs 6 \
42 | --top_n 1 \
43 | --num_labels 2 \
44 | --output_dir ./small_relation_0701 \
45 | --reduce_dim 768 \
46 | --gpu0_size 0 \
47 | --max_seq_length 50 \
48 | --bert_model /nas2/lishengping/models/pretrain_models/bert_base_chinese/ \
49 | --init_checkpoint /nas2/lishengping/models/pretrain_models/bert_base_chinese/pytorch_model.bin \
50 | --do_train \
51 | --gradient_accumulation_steps 3 3>&2 2>&1 1>&3 | tee logs/bq_corpus_1214.log
52 | elif [ "$1" = "train-moe-bq_corpus" ];then
53 | echo "start to train small data......"
54 |
55 | CUDA_VISIBLE_DEVICES=2,3 python ../relation_classifier/sim_classify.py \
56 | --vocab_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/vocab.txt \
57 | --bert_config_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/bert_config.json \
58 | --do_lower_case \
59 | --train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/train.tsv \
60 | --eval_train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv \
61 | --eval_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv \
62 | --train_batch_size 96 \
63 | --eval_batch_size 96 \
64 | --learning_rate 2e-5 \
65 | --num_train_epochs 6 \
66 | --top_n 1 \
67 | --num_labels 2 \
68 | --output_dir ./small_relation_0701 \
69 | --reduce_dim 768 \
70 | --gpu0_size 0 \
71 | --max_seq_length 50 \
72 | --bert_model /nas2/lishengping/models/pretrain_models/bert_base_chinese/ \
73 | --init_checkpoint /nas2/lishengping/models/pretrain_models/bert_base_chinese/pytorch_model.bin \
74 | --do_train \
75 | --moe \
76 | --gradient_accumulation_steps 3 3>&2 2>&1 1>&3 | tee logs/bq_corpus_moe_1214_d512.log
77 |
78 | elif [ "$1" = "train-os_loss-bq_corpus" ];then
79 | echo "start to train small data......"
80 |
81 | CUDA_VISIBLE_DEVICES=0,1 python ../relation_classifier/sim_classify.py \
82 | --vocab_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/vocab.txt \
83 | --bert_config_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/bert_config.json \
84 | --do_lower_case \
85 | --train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/train.tsv \
86 | --eval_train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv \
87 | --eval_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv \
88 | --train_batch_size 96 \
89 | --eval_batch_size 96 \
90 | --learning_rate 2e-5 \
91 | --num_train_epochs 6 \
92 | --top_n 1 \
93 | --num_labels 2 \
94 | --output_dir ./small_relation_0701 \
95 | --reduce_dim 768 \
96 | --gpu0_size 0 \
97 | --max_seq_length 50 \
98 | --bert_model /nas2/lishengping/models/pretrain_models/bert_base_chinese/ \
99 | --init_checkpoint /nas2/lishengping/models/pretrain_models/bert_base_chinese/pytorch_model.bin \
100 | --do_train \
101 | --os_loss \
102 | --gradient_accumulation_steps 3 3>&2 2>&1 1>&3 | tee logs/bq_corpus_with_os_loss_1213.alpha.log
103 |
104 | elif [ "$1" = "train-moe-os_loss-bq_corpus" ];then
105 | CUDA_VISIBLE_DEVICES=0,1 python ../relation_classifier/sim_classify.py \
106 | --vocab_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/vocab.txt \
107 | --bert_config_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/bert_config.json \
108 | --do_lower_case \
109 | --train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/train.tsv \
110 | --eval_train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv \
111 | --eval_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/bq_corpus/dev.tsv \
112 | --train_batch_size 96 \
113 | --eval_batch_size 96 \
114 | --learning_rate 2e-5 \
115 | --num_train_epochs 6 \
116 | --top_n 1 \
117 | --num_labels 2 \
118 | --output_dir ./moe_bq_corpus0317 \
119 | --reduce_dim 768 \
120 | --gpu0_size 0 \
121 | --max_seq_length 50 \
122 | --bert_model /nas2/lishengping/models/pretrain_models/bert_base_chinese/ \
123 | --init_checkpoint /nas2/lishengping/models/pretrain_models/bert_base_chinese/pytorch_model.bin \
124 | --do_train \
125 | --os_loss \
126 | --moe \
127 | --gradient_accumulation_steps 3 3>&2 2>&1 1>&3 | tee logs/moe_bq_corpus0317.os_loss.log
128 |
129 | elif [ "$1" = "train-moe-LCQMC" ];then
130 | echo "start to train small data......"
131 |
132 | CUDA_VISIBLE_DEVICES=2,3 python ../relation_classifier/sim_classify.py \
133 | --vocab_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/vocab.txt \
134 | --bert_config_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/bert_config.json \
135 | --do_lower_case \
136 | --train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/LCQMC/train.txt \
137 | --eval_train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/LCQMC/dev.txt \
138 | --eval_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/LCQMC/dev.txt \
139 | --train_batch_size 16 \
140 | --eval_batch_size 16 \
141 | --learning_rate 2e-5 \
142 | --num_train_epochs 6 \
143 | --top_n 1 \
144 | --num_labels 2 \
145 | --output_dir ./train-moe-LCQMC_0317 \
146 | --reduce_dim 768 \
147 | --gpu0_size 0 \
148 | --max_seq_length 50 \
149 | --bert_model /nas2/lishengping/models/pretrain_models/bert_base_chinese/ \
150 | --init_checkpoint /nas2/lishengping/models/pretrain_models/bert_base_chinese/pytorch_model.bin \
151 | --do_train \
152 | --moe \
153 | --gradient_accumulation_steps 1 3>&2 2>&1 1>&3 | tee logs/LCQMC_moe_0317_d512.log
154 |
155 |
156 | elif [ "$1" = "train-LCQMC" ];then
157 | echo "start to train small data......"
158 |
159 | CUDA_VISIBLE_DEVICES=2,3 python ../relation_classifier/sim_classify.py \
160 | --vocab_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/vocab.txt \
161 | --bert_config_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/bert_config.json \
162 | --do_lower_case \
163 | --train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/LCQMC/train.txt \
164 | --eval_train_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/LCQMC/dev.txt \
165 | --eval_file /nas2/lishengping/caiyun_projects/sim_for_cls/data/LCQMC/dev.txt \
166 | --train_batch_size 96 \
167 | --eval_batch_size 96 \
168 | --learning_rate 2e-5 \
169 | --num_train_epochs 6 \
170 | --top_n 1 \
171 | --num_labels 2 \
172 | --output_dir ./small_relation_0701 \
173 | --reduce_dim 768 \
174 | --gpu0_size 0 \
175 | --max_seq_length 40 \
176 | --bert_model /nas2/lishengping/models/pretrain_models/bert_base_chinese/ \
177 | --init_checkpoint /nas2/lishengping/models/pretrain_models/bert_base_chinese/pytorch_model.bin \
178 | --do_train \
179 | --gradient_accumulation_steps 3 3>&2 2>&1 1>&3 | tee logs/LCQMC1213.log
180 |
181 | elif [ "$1" = "predict-small" ];then
182 | echo "start to predict......"
183 |
184 | CUDA_VISIBLE_DEVICES=2,3 python ../relation_classifier/sim_classify.py \
185 | --vocab_file cat_b24_lr1e5_t7_d03_relation_1223/vocab.txt \
186 | --bert_config_file cat_b24_lr1e5_t7_d03_relation_1223/bert_config.json \
187 | --do_lower_case \
188 | --train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.data.train \
189 | --eval_train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.figure_relation_2281 \
190 | --eval_file /nas/jiangdanyang/projects/NLP-SubjectExtract-relation/src/data/res/figure_relation/dev.csv \
191 | --train_batch_size 24 \
192 | --eval_batch_size 160 \
193 | --learning_rate 1e-5 \
194 | --num_train_epochs 6 \
195 | --top_n 7 \
196 | --num_labels 2 \
197 | --result_file ./predict1223_7_24_dev_jdy.csv \
198 | --output_dir cat_b24_lr1e5_t7_d03_relation_1223 \
199 | --reduce_dim 768 \
200 | --gpu0_size 0 \
201 | --bert_model cat_b24_lr1e5_t7_d03_relation_1223 \
202 | --init_checkpoint cat_b24_lr1e5_t7_d03_relation_1223/pytorch_model.bin \
203 | --do_predict \
204 | --gradient_accumulation_steps 12 3>&2 2>&1 1>&3 | tee logs/1223predict.log
205 |
206 | elif [ "$1" = "predict-big" ];then
207 | echo "start to predict......"
208 |
209 | CUDA_VISIBLE_DEVICES=0,1,2,3 python ../relation_classifier/sim_classify.py \
210 | --vocab_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/vocab.txt \
211 | --bert_config_file /nas2/lishengping/models/pretrain_models/bert_base_chinese/bert_config.json \
212 | --do_lower_case \
213 | --train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.data.train \
214 | --eval_train_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/small/small.figure_relation_2281 \
215 | --eval_file /nas/lishengping/datas/person2vector/figure_relation_lsp_final/dev/figure_relation_1 \
216 | --train_batch_size 24 \
217 | --eval_batch_size 80 \
218 | --learning_rate 1e-5 \
219 | --num_train_epochs 6 \
220 | --top_n 20 \
221 | --num_labels 2 \
222 | --result_file ./predict0701_7_20_dev.csv \
223 | --output_dir ./cat_b20_lr1e5_t7_d03_relation_0701 \
224 | --reduce_dim 0 \
225 | --gpu0_size 0 \
226 | --bert_model /nas2/lishengping/models/pretrain_models/bert_base_chinese/ \
227 | --init_checkpoint /nas2/lishengping/models/pretrain_models/bert_base_chinese/pytorch_model.bin \
228 | --do_predict \
229 | --gradient_accumulation_steps 12 3>&2 2>&1 1>&3 | tee logs/activate_cls_abs_model0525.log
230 |
231 | else
232 | echo 'unknown argment 1'
233 | fi
234 |
--------------------------------------------------------------------------------
/script/two_sentences_classifier.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0,1,2 python ../two_sentences_classifier/add_type_train.py \
2 | --vocab_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/vocab.txt \
3 | --bert_config_file /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/bert_config.json \
4 | --do_lower_case \
5 | --train_file /nas/xd/data/novels/speech_labeled20/small.data.train \
6 | --eval_file /nas/xd/data/novels/speech_labeled20/small.data.dev \
7 | --train_batch_size 72 \
8 | --eval_batch_size 12 \
9 | --learning_rate 5e-5 \
10 | --num_train_epochs 6 \
11 | --top_n 15 \
12 | --num_labels 2 \
13 | --output_dir ./two_sentences_classifier_model0513_7 \
14 | --bert_model /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/ \
15 | --init_checkpoint /nas/pretrain-bert/pretrain-pytorch/chinese_wwm_ext_pytorch/pytorch_model.bin \
16 | --do_train \
17 | --gradient_accumulation_steps 9 3>&2 2>&1 1>&3 | tee logs/two_sentences_classifier_7.log
18 |
--------------------------------------------------------------------------------
/server.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | from itertools import chain
4 | import json
5 |
6 | # 修改路径1
7 | sys.path.append('/nas2/lishengping/caiyun_projects/two_sentences_classifier/common_file/')
8 |
9 | import torch
10 | from flask import Flask, request, jsonify
11 |
12 | import tokenization
13 | from modeling import BertConfig, TwoSentenceClassifier
14 |
15 |
16 | app = Flask(__name__)
17 |
18 |
19 | def convert_text_to_ids(text_a, text_b):
20 | features = []
21 | max_seq_length = 50
22 | input_ids, input_masks, segment_ids = [], [], []
23 | for i, sent in enumerate(chain(text_a, text_b)):
24 | sent_length = len(sent)
25 | sents_token = tokenizer.tokenize(sent)
26 | sents_token = ['[CLS]'] + sents_token[:max_seq_length - 2] + ['[SEP]']
27 | sent_segment_ids = [0] * len(sents_token)
28 | length = len(sents_token)
29 | sent_input_masks = [1] * length
30 | sent_input_ids = tokenizer.convert_tokens_to_ids(sents_token)
31 |
32 | while length < max_seq_length:
33 | sent_input_ids.append(0)
34 | sent_input_masks.append(0)
35 | sent_segment_ids.append(0)
36 | length += 1
37 |
38 | assert len(sent_segment_ids) == len(sent_input_ids) == len(sent_input_masks)
39 | input_ids.append(sent_input_ids)
40 | input_masks.append(sent_input_masks)
41 | segment_ids.append(sent_segment_ids)
42 |
43 | assert len(input_ids) == 2
44 |
45 | input_ids = torch.tensor(input_ids).unsqueeze(0)
46 | input_masks = torch.tensor(input_masks).unsqueeze(0)
47 | sent_segment_ids = torch.tensor(segment_ids).unsqueeze(0)
48 | return input_ids, input_masks, sent_segment_ids
49 |
50 | moe = True
51 | num_labels = 2
52 | reduce_dim = 768
53 |
54 |
55 | # 修改路径2
56 | model_dir = '/nas2/lishengping/caiyun_projects/two_sentences_classifier/script/train-moe-LCQMC_1215_2/'
57 |
58 | bert_config_file = os.path.join(model_dir, 'bert_config.json')
59 | vocab_file = os.path.join(model_dir, 'vocab.txt')
60 | model_path = os.path.join(model_dir, 'pytorch_model.bin.e1.s13499')
61 |
62 | bert_config = BertConfig.from_json_file(bert_config_file)
63 | bert_config.reduce_dim = reduce_dim
64 | print(f'bert_config: {bert_config}')
65 | labels_text = ['不相似', '相似']
66 |
67 | model = TwoSentenceClassifier(bert_config,
68 | num_labels=num_labels,
69 | moe=moe,
70 | os_loss=False)
71 |
72 |
73 | tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
74 | do_lower_case=True)
75 |
76 |
77 | state_dict = torch.load(model_path, map_location='cpu')
78 | remove_prefix_state_dict = {k[7: ]: v for k, v in state_dict.items()}
79 | model.load_state_dict(remove_prefix_state_dict)
80 |
81 | for k, v in state_dict.items():
82 | print(k, v.shape)
83 |
84 | def predict(text_a:str, text_b:str, label=None):
85 | input_ids, input_masks, sent_segment_ids = convert_text_to_ids([text_a], [text_b])
86 | inputs = {'input_ids': input_ids,
87 | 'token_type_ids': sent_segment_ids,
88 | 'attention_mask': input_masks,
89 | 'labels': label}
90 | with torch.no_grad():
91 | outputs = model(**inputs)
92 | loss, logits = outputs
93 | predicted_class = torch.argmax(logits, dim=-1).item()
94 | prob = logits.view(-1).tolist()[predicted_class]
95 | pred_text = labels_text[predicted_class]
96 | prob = round(prob, 3)
97 | print(f'预测的标签:{pred_text}, 置信分数: {prob}')
98 | return pred_text, prob
99 |
100 |
101 | @app.route('/predict', methods=['POST'])
102 | def predict_route():
103 | data = request.get_json()
104 | text_a = data.get("text_a")
105 | text_b = data.get("text_b")
106 |
107 | if not text_a:
108 | return jsonify({"error": "No text_a provided"}), 400
109 |
110 | if not text_b:
111 | return jsonify({"error": "No text_b provided"}), 400
112 |
113 | pred_text, prob = predict(text_a, text_b)
114 | # result = json.dumps({"label": pred_text, 'prob': prob}, ensure_ascii=False)
115 | result = json.dumps({"label": pred_text, 'prob': prob}, ensure_ascii=False)
116 | return result
117 |
118 |
119 | if __name__ == '__main__':
120 | port = int(sys.argv[1])
121 | app.run(debug=True, host='0.0.0.0', port=port)
122 |
123 | """
124 | # 启动命令:
125 | python server.py 5000
126 | # 请求示例:
127 | curl -X POST http://127.0.0.1:5000/predict -H "Content-Type: application/json" -d '{"text_a": "人和畜生的区别是什么?", "text_b": "人与畜生的区别是什么!"}'
128 | """
129 |
--------------------------------------------------------------------------------
/test/eval.py:
--------------------------------------------------------------------------------
1 | """敬请期待"""
2 |
--------------------------------------------------------------------------------
/two_sentences_classifier/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Lisennlp/two_sentences_classifier/ad8a2fba6baccc5571a41c7562b351e79cef3274/two_sentences_classifier/__init__.py
--------------------------------------------------------------------------------
/two_sentences_classifier/train.py:
--------------------------------------------------------------------------------
1 | """BERT finetuning runner."""
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import os
8 | import sys
9 | import logging
10 | import argparse
11 | import random
12 | from itertools import chain
13 | from tqdm import tqdm, trange
14 | import numpy as np
15 | import pandas as pd
16 | import torch
17 | from torch.utils.data import TensorDataset, DataLoader, RandomSampler
18 | from torch.utils.data.distributed import DistributedSampler
19 | from sklearn.metrics import classification_report
20 |
21 | sys.path.append("../common_file")
22 |
23 | import tokenization
24 | from modeling import BertConfig, TwoSentenceClassifier
25 | from optimization import BertAdam
26 |
27 | logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
28 | datefmt='%m/%d/%Y %H:%M:%S',
29 | level=logging.INFO)
30 | logger = logging.getLogger(__name__)
31 |
32 | WEIGHTS_NAME = 'pytorch_model.bin'
33 | CONFIG_NAME = 'bert_config.json'
34 |
35 |
36 | class InputExample(object):
37 | """A single training/test example for simple sequence classification."""
38 |
39 | def __init__(self, id, text_a, text_b=None, label=None, name=None):
40 | self.id = id
41 | self.text_a = text_a
42 | self.text_b = text_b
43 | self.label = label
44 | self.name = name
45 |
46 |
47 | class InputFeatures(object):
48 | """A single set of features of data."""
49 |
50 | def __init__(self,
51 | input_ids,
52 | input_mask,
53 | segment_ids,
54 | label_id,
55 | position_ids=None,
56 | example_id=None):
57 | self.input_ids = input_ids
58 | self.input_mask = input_mask
59 | self.segment_ids = segment_ids
60 | self.label_id = label_id
61 | self.example_id = example_id
62 | self.position_ids = position_ids
63 |
64 |
65 | class DataProcessor(object):
66 | """Processor for the CoLA data set (GLUE version)."""
67 |
68 | def __init__(self, num_labels):
69 | self.num_labels = num_labels
70 | self.map_symbols = {"’": "'", "‘": "'", "“": '"', "”": '"'}
71 |
72 | def read_novel_examples(self, path, top_n=5, task_name='train'):
73 | examples = []
74 | example_map_ids = {}
75 | zero, one = 0, 0
76 | with open(path, 'r', encoding='utf-8') as f:
77 | for index, line in enumerate(f):
78 | line = line.replace('\n', '').split('\t')
79 | paras = [self.clean_text(p) for p in line[:2]]
80 | assert len(paras) == 2
81 | text_a = paras[0].split('||')[:int(top_n)]
82 | text_b = paras[1].split('||')[:int(top_n)]
83 | label = line[2]
84 | assert int(label) in [0, 1]
85 | if label:
86 | if one > 4000:
87 | continue
88 | one += 1
89 | else:
90 | zero += 1
91 | example = InputExample(id=index,
92 | text_a=text_a,
93 | text_b=text_b,
94 | label=int(label),
95 | name=line[-1])
96 | examples.append(example)
97 | if task_name != 'train':
98 | example_map_ids[index] = example
99 |
100 | print(f'{os.path.split(path)[-1]} file examples {len(examples)}')
101 | return examples, example_map_ids
102 |
103 | def read_file_dir(self, dir, top_n=7):
104 | all_examples = []
105 | for root, path_dir, file_names in os.walk(dir):
106 | for file_name in file_names:
107 | # if file_name.startswith('speech') and file_name.endswith('000'):
108 | if file_name.endswith('001'):
109 |
110 | file_abs_path = os.path.join(root, file_name)
111 | examples, _ = self.read_novel_examples(file_abs_path, top_n=top_n)
112 | all_examples.extend(examples)
113 | print(f'dir all file examples {len(all_examples)}')
114 | return all_examples
115 |
116 | def clean_text(self, text):
117 | text = [self.map_symbols.get(w) if self.map_symbols.get(w) else w for w in text]
118 | return ''.join(text)
119 |
120 |
121 | def create_fake_data_features(data_size, max_seq_length):
122 |
123 | # datas = [[[random.randint(1, 2000) for j in range(random.randint(100, max_seq_length))] for i in range(2)] for k in range(data_size)]
124 | features = []
125 | for i in range(data_size):
126 | input_masks, segment_ids = [], []
127 | sentences_ids = [[
128 | random.randint(1, 2000) for j in range(random.randint(100, max_seq_length))
129 | ] for i in range(2)]
130 | for i, sentence_ids in enumerate(sentences_ids):
131 | sentence_length = len(sentence_ids)
132 | input_masks.append([1] * sentence_length)
133 | segment_ids.append([0] * sentence_length)
134 |
135 | while sentence_length < max_seq_length:
136 | sentences_ids[i].append(0)
137 | input_masks[i].append(0)
138 | segment_ids[i].append(0)
139 | sentence_length += 1
140 | label_id = random.randint(0, 1)
141 | features.append(
142 | InputFeatures(input_ids=sentences_ids,
143 | input_mask=input_masks,
144 | segment_ids=segment_ids,
145 | label_id=label_id,
146 | example_id=i))
147 | features = features[:len(features) // 3 * 3]
148 | return features
149 |
150 |
151 | def convert_examples_to_features(examples, max_seq_length, tokenizer):
152 | """Loads a data file into a list of `InputBatch`s."""
153 | features = []
154 | lt_7, len_gt_2, len_lt_1, len_1_2 = 0, 0, 0, 0
155 | for (ex_index, example) in enumerate(examples):
156 | input_ids, input_masks, segment_ids = [], [], []
157 | min_length = min(len(example.text_b), len(example.text_a))
158 | if min_length < 7:
159 | lt_7 += 1
160 | text_a = example.text_a[:min_length]
161 | text_b = example.text_b[:min_length]
162 |
163 | for i, sent in enumerate(chain(text_a, text_b)):
164 | sent_tokens = ['[CLS]'] + tokenizer.tokenize(sent)[:max_seq_length - 2] + ['[SEP]']
165 | length = len(sent_tokens)
166 | if 150 > length > 100:
167 | len_1_2 += 1
168 | elif length <= 100:
169 | len_lt_1 += 1
170 | else:
171 | len_gt_2 += 1
172 | sent_segment_ids = [0] * length
173 | sent_input_masks = [1] * length
174 | sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens)
175 |
176 | while length < max_seq_length:
177 | sent_input_ids.append(0)
178 | sent_input_masks.append(0)
179 | sent_segment_ids.append(0)
180 | length += 1
181 |
182 | assert len(sent_segment_ids) == len(sent_input_ids) == len(sent_input_masks)
183 | input_ids.append(sent_input_ids)
184 | input_masks.append(sent_input_masks)
185 | segment_ids.append(sent_segment_ids)
186 |
187 | features.append(
188 | InputFeatures(input_ids=input_ids,
189 | input_mask=input_masks,
190 | segment_ids=segment_ids,
191 | label_id=example.label,
192 | example_id=example.id))
193 | print(f'feature example input_ids:{features[-1].input_ids}')
194 | print(f'feature example input_mask:{features[-1].input_mask}')
195 | print(f'feature example segment_ids:{features[-1].segment_ids}')
196 |
197 | print(f'total features {len(features)} lt_7 {lt_7}')
198 | print(len_gt_2, len_lt_1, len_1_2)
199 | return features
200 |
201 |
202 | def main():
203 | parser = argparse.ArgumentParser()
204 |
205 | # Required parameters
206 | parser.add_argument("--train_file",
207 | default=None,
208 | type=str,
209 | required=True,
210 | help="The train file path")
211 | parser.add_argument("--eval_file",
212 | default=None,
213 | type=str,
214 | required=True,
215 | help="The dev file path")
216 | parser.add_argument("--predict_file",
217 | default=None,
218 | type=str,
219 | required=False,
220 | help="The predict file path")
221 | parser.add_argument("--top_n",
222 | default=5,
223 | type=float,
224 | required=True,
225 | help="higher than threshold is classify 1,")
226 | parser.add_argument("--bert_config_file",
227 | default=None,
228 | type=str,
229 | required=True,
230 | help="The config json file corresponding to the pre-trained BERT model. \n"
231 | "This specifies the model architecture.")
232 | parser.add_argument("--bert_model",
233 | default=None,
234 | type=str,
235 | required=True,
236 | help="The config json file corresponding to the pre-trained BERT model. \n"
237 | "This specifies the model architecture.")
238 | parser.add_argument("--result_file",
239 | default=None,
240 | type=str,
241 | required=False,
242 | help="The result file that the BERT model was trained on.")
243 | parser.add_argument("--vocab_file",
244 | default=None,
245 | type=str,
246 | required=True,
247 | help="The vocabulary file that the BERT model was trained on.")
248 | parser.add_argument("--output_dir",
249 | default=None,
250 | type=str,
251 | required=True,
252 | help="The output directory where the model checkpoints will be written.")
253 | # Other parameters
254 | parser.add_argument("--init_checkpoint",
255 | default=None,
256 | type=str,
257 | help="Initial checkpoint (usually from a pre-trained BERT model).")
258 | parser.add_argument("--do_lower_case",
259 | default=False,
260 | action='store_true',
261 | help="Whether to lower case the input text.")
262 | parser.add_argument("--max_seq_length",
263 | default=200,
264 | type=int,
265 | help="maximum total input sequence length after WordPiece tokenization.")
266 | parser.add_argument("--do_train",
267 | default=False,
268 | action='store_true',
269 | help="Whether to run training.")
270 | parser.add_argument("--do_predict",
271 | default=False,
272 | action='store_true',
273 | help="Whether to run eval on the dev set.")
274 | parser.add_argument("--num_labels", default=1, type=int, help="mapping classify nums")
275 | parser.add_argument("--train_batch_size",
276 | default=32,
277 | type=int,
278 | help="Total batch size for training.")
279 | parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.")
280 | parser.add_argument("--learning_rate",
281 | default=5e-5,
282 | type=float,
283 | help="The initial learning rate for Adam.")
284 | parser.add_argument("--num_train_epochs",
285 | default=6.0,
286 | type=float,
287 | help="Total number of training epochs to perform.")
288 | parser.add_argument("--warmup_proportion",
289 | default=0.1,
290 | type=float,
291 | help="Proportion of training to perform linear learning rate warmup for. "
292 | "E.g., 0.1 = 10%% of training.")
293 | parser.add_argument("--save_checkpoints_steps",
294 | default=1000,
295 | type=int,
296 | help="How often to save the model checkpoint.")
297 | parser.add_argument("--no_cuda",
298 | default=False,
299 | action='store_true',
300 | help="Whether not to use CUDA when available")
301 | parser.add_argument("--local_rank",
302 | type=int,
303 | default=-1,
304 | help="local_rank for distributed training on gpus")
305 | parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
306 | parser.add_argument('--gradient_accumulation_steps',
307 | type=int,
308 | default=1,
309 | help="Number of updates steps to accumualte before")
310 | parser.add_argument('--optimize_on_cpu',
311 | default=False,
312 | action='store_true',
313 | help="Whether to perform optimization and averages on CPU")
314 | parser.add_argument('--fp16',
315 | default=False,
316 | action='store_true',
317 | help="Whether to use 16-bit float precision instead of 32-bit")
318 | parser.add_argument('--loss_scale',
319 | type=float,
320 | default=128,
321 | help='Loss scale, positive power of 2 can improve fp16 convergence.')
322 |
323 | args = parser.parse_args()
324 |
325 | def get_fake_features(data_size, max_seq_length):
326 | features = create_fake_data_features(data_size=data_size, max_seq_length=max_seq_length)
327 | input_ids = [f.input_ids for f in features]
328 | input_mask = [f.input_mask for f in features]
329 | segment_ids = [f.segment_ids for f in features]
330 | label_ids = [f.label_id for f in features]
331 | ids = [f.example_id for f in features]
332 | data = (input_ids, input_mask, segment_ids, label_ids, ids)
333 | return data
334 |
335 | data_processor = DataProcessor(args.num_labels)
336 | if args.local_rank == -1 or args.no_cuda:
337 | device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
338 | n_gpu = torch.cuda.device_count()
339 | else:
340 | device = torch.device("cuda", args.local_rank)
341 | n_gpu = 1
342 | # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
343 | torch.distributed.init_process_group(backend='nccl')
344 | if args.fp16:
345 | logger.info("16-bits training currently not supported in distributed training")
346 | args.fp16 = False # (see https://github.com/pytorch/pytorch/pull/13496)
347 | logger.info("device %s n_gpu %d distributed training %r", device, n_gpu,
348 | bool(args.local_rank != -1))
349 |
350 | if args.gradient_accumulation_steps < 1:
351 | raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
352 | args.gradient_accumulation_steps))
353 |
354 | args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
355 |
356 | print(f'args.train_batch_size = {args.train_batch_size}')
357 | random.seed(args.seed)
358 | np.random.seed(args.seed)
359 | torch.manual_seed(args.seed)
360 | if n_gpu > 0:
361 | torch.cuda.manual_seed_all(args.seed)
362 |
363 | if not args.do_train and not args.do_predict:
364 | raise ValueError("At least one of `do_train` or `do_eval` must be True.")
365 |
366 | bert_config = BertConfig.from_json_file(args.bert_config_file)
367 |
368 | if args.max_seq_length > bert_config.max_position_embeddings:
369 | raise ValueError(
370 | "Cannot use sequence length {} because the BERT model was only trained up to sequence length {}"
371 | .format(args.max_seq_length, bert_config.max_position_embeddings))
372 |
373 | if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
374 | raise ValueError("Output directory ({}) already exists and is not empty.".format(
375 | args.output_dir))
376 |
377 | if args.do_train:
378 | os.makedirs(args.output_dir, exist_ok=True)
379 |
380 | tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file,
381 | do_lower_case=args.do_lower_case)
382 |
383 | def prepare_data(args, task_name='train'):
384 | file_path = args.train_file if task_name == 'train' else args.eval_file
385 | if os.path.isdir(file_path):
386 | examples = data_processor.read_file_dir(file_path, top_n=args.top_n)
387 | else:
388 | examples, example_map_ids = data_processor.read_novel_examples(file_path,
389 | top_n=args.top_n,
390 | task_name=task_name)
391 | features = convert_examples_to_features(examples, args.max_seq_length, tokenizer)
392 | all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
393 | all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
394 | all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
395 | all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
396 |
397 | if task_name in ['train', 'eval']:
398 | all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
399 | datas = TensorDataset(all_example_ids, all_input_ids, all_input_mask, all_segment_ids,
400 | all_label_ids)
401 | else:
402 | datas = TensorDataset(all_example_ids, all_input_ids, all_input_mask, all_segment_ids)
403 |
404 | if task_name == 'train':
405 | if args.local_rank == -1:
406 | data_sampler = RandomSampler(datas)
407 | else:
408 | data_sampler = DistributedSampler(datas)
409 | dataloader = DataLoader(datas,
410 | sampler=data_sampler,
411 | batch_size=args.train_batch_size,
412 | drop_last=True)
413 | else:
414 | dataloader = DataLoader(datas, batch_size=args.eval_batch_size, drop_last=True)
415 | return (dataloader, example_map_ids) if task_name != 'train' else dataloader
416 |
417 | def prepare_fake_data(data_size, max_seq_length, task_name='train'):
418 | features = create_fake_data_features(data_size=data_size, max_seq_length=max_seq_length)
419 | all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
420 | all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
421 | all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
422 | if task_name in ['train', 'eval']:
423 | all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
424 | datas = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
425 | else:
426 | datas = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
427 | if task_name == 'train':
428 | if args.local_rank == -1:
429 | data_sampler = RandomSampler(datas)
430 | else:
431 | data_sampler = DistributedSampler(datas)
432 | dataloader = DataLoader(datas,
433 | sampler=data_sampler,
434 | batch_size=args.train_batch_size,
435 | drop_last=True)
436 | else:
437 | dataloader = DataLoader(datas, batch_size=args.eval_batch_size, drop_last=True)
438 | return dataloader
439 |
440 | def accuracy(example_ids, logits, probs=None):
441 | logits = logits.tolist()
442 | example_ids = example_ids.tolist()
443 | assert len(logits) == len(example_ids)
444 | classify_name = ['no_answer', 'yes_answer']
445 | labels, text_a, text_b, novel_names = [], [], [], []
446 | for i in example_ids:
447 | example = example_map_ids[i]
448 | labels.append(example.label)
449 | text_a.append("||".join(example.text_a))
450 | text_b.append("||".join(example.text_b))
451 | novel_names.append(example.name)
452 |
453 | write_data = pd.DataFrame({
454 | "text_a": text_a,
455 | "text_b": text_b,
456 | "labels": labels,
457 | "logits": logits,
458 | "novel_names": novel_names
459 | })
460 | write_data['yes_or_no'] = write_data['labels'] == write_data['logits']
461 | if probs is not None:
462 | write_data['logits'] = probs.tolist()
463 | write_data.to_csv(args.result_file, index=False)
464 | assert len(labels) == len(logits)
465 | result = classification_report(labels, logits, target_names=classify_name)
466 | return result
467 |
468 | def eval_model(model, eval_dataloader, device):
469 | model.eval()
470 | eval_loss = 0
471 | all_logits = []
472 | all_example_ids = []
473 | all_probs = []
474 | accuracy_result = None
475 | batch_count = 0
476 | for step, batch in enumerate(tqdm(eval_dataloader, desc="evaluating")):
477 | example_ids, input_ids, input_mask, segment_ids, label_ids = batch
478 | if not args.do_train:
479 | label_ids = None
480 | with torch.no_grad():
481 | tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, labels=label_ids)
482 | argmax_logits = torch.argmax(logits, dim=1)
483 | first_indices = torch.arange(argmax_logits.size()[0])
484 | logits_probs = logits[first_indices, argmax_logits]
485 | if args.do_train:
486 | eval_loss += tmp_eval_loss.mean().item()
487 | all_logits.append(argmax_logits)
488 | all_example_ids.append(example_ids)
489 | else:
490 | all_logits.append(argmax_logits)
491 | all_example_ids.append(example_ids)
492 | all_probs.append(logits_probs)
493 | batch_count += 1
494 | if all_logits:
495 | all_logits = torch.cat(all_logits, dim=0)
496 | all_example_ids = torch.cat(all_example_ids, dim=0)
497 | all_probs = torch.cat(all_probs, dim=0) if len(all_probs) else None
498 | accuracy_result = accuracy(all_example_ids, all_logits, probs=all_probs)
499 | eval_loss /= batch_count
500 | return eval_loss, accuracy_result, all_logits
501 |
502 | train_dataloader = None
503 | num_train_steps = None
504 | if args.do_train:
505 | # train_dataloader = prepare_fake_data(400, args.max_seq_length, 'train')
506 | train_dataloader = prepare_data(args, task_name='train')
507 | num_train_steps = int(
508 | len(train_dataloader) / args.gradient_accumulation_steps * args.num_train_epochs)
509 |
510 | model = TwoSentenceClassifier.from_pretrained(args.bert_model,
511 | num_labels=data_processor.num_labels)
512 | if args.fp16:
513 | model.half()
514 |
515 | if args.do_predict:
516 | model_path = os.path.join(args.output_dir, WEIGHTS_NAME)
517 | new_state_dict = torch.load(model_path)
518 | new_state_dict = dict([
519 | (k[7:], v) if k.startswith('module') else (k, v) for k, v in new_state_dict.items()
520 | ])
521 | model.load_state_dict(new_state_dict)
522 |
523 | model.to(device)
524 | if args.local_rank != -1:
525 | model = torch.nn.parallel.DistributedDataParallel(model,
526 | device_ids=[args.local_rank],
527 | output_device=args.local_rank)
528 | elif n_gpu > 1:
529 | model = torch.nn.DataParallel(model)
530 |
531 | if args.fp16:
532 | param_optimizer = [(n, param.clone().detach().to('cpu').float().requires_grad_())
533 | for n, param in model.named_parameters()]
534 | elif args.optimize_on_cpu:
535 | param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_())
536 | for n, param in model.named_parameters()]
537 | else:
538 | param_optimizer = list(model.named_parameters())
539 | no_decay = ['bias', 'gamma', 'beta']
540 | optimizer_grouped_parameters = [{
541 | 'params': [p for n, p in param_optimizer if n not in no_decay],
542 | 'weight_decay': 0.01
543 | }, {
544 | 'params': [p for n, p in param_optimizer if n in no_decay],
545 | 'weight_decay': 0.0
546 | }]
547 |
548 | eval_dataloader, example_map_ids = prepare_data(args, task_name='eval')
549 |
550 | global_step = 0
551 | if args.do_train:
552 | optimizer = BertAdam(optimizer_grouped_parameters,
553 | lr=args.learning_rate,
554 | warmup=args.warmup_proportion,
555 | t_total=num_train_steps)
556 |
557 | output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
558 | eval_loss, acc, _ = eval_model(model, eval_dataloader, device)
559 | logger.info(f'初始开发集loss: {eval_loss}')
560 | print(f'{acc}')
561 | model.train()
562 | for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
563 | torch.cuda.empty_cache()
564 | model_save_path = os.path.join(args.output_dir, f"{WEIGHTS_NAME}.{epoch}")
565 | tr_loss = 0
566 | train_batch_count = 0
567 | for step, batch in enumerate(tqdm(train_dataloader, desc="training")):
568 | _, input_ids, input_mask, segment_ids, label_ids = batch
569 | loss, _ = model(input_ids, segment_ids, input_mask, labels=label_ids)
570 | if n_gpu > 1:
571 | loss = loss.mean()
572 | if args.fp16 and args.loss_scale != 1.0:
573 | loss = loss * args.loss_scale
574 | if args.gradient_accumulation_steps > 1:
575 | loss = loss / args.gradient_accumulation_steps
576 |
577 | loss.backward()
578 | tr_loss += loss.item()
579 | if (step + 1) % args.gradient_accumulation_steps == 0:
580 | optimizer.step()
581 | model.zero_grad()
582 | global_step += 1
583 | train_batch_count += 1
584 | tr_loss /= train_batch_count
585 | eval_loss, acc, _ = eval_model(model, eval_dataloader, device)
586 |
587 | logger.info(
588 | f'训练loss: {tr_loss}, 开发集loss:{eval_loss} 训练轮数:{epoch + 1}/{int(args.num_train_epochs)}'
589 | )
590 | logger.info(f'acc={acc}')
591 | model_to_save = model.module if hasattr(model, 'module') else model
592 | torch.save(model.state_dict(), model_save_path)
593 | if epoch == 0:
594 | model_to_save.config.to_json_file(output_config_file)
595 | tokenizer.save_vocabulary(args.output_dir)
596 |
597 | if args.do_predict:
598 | _, result, _ = eval_model(model, eval_dataloader, device)
599 | print(result)
600 |
601 |
602 | if __name__ == "__main__":
603 | main()
604 |
--------------------------------------------------------------------------------
/two_sentences_classifier/word_embeddings.py:
--------------------------------------------------------------------------------
1 | """Embeddings"""
2 |
3 | import os
4 | import sys
5 | import re
6 | from itertools import chain
7 | import random
8 |
9 | import torch
10 |
11 | sys.path.append("../common_file")
12 |
13 | from modeling import TwoSentenceClassifier, BertConfig, RelationClassifier
14 | import tokenization
15 |
16 |
17 | key_word = {"…": "...", "—": "-", "“": "\"", "”": "\"", "‘": "'", "’": "'"}
18 |
19 |
20 | class EmbeddingsModel(object):
21 |
22 | def __init__(self, model_path):
23 | """ to obtain sentences embeddings model
24 | model path: init model weight path
25 | """
26 | self.model_path = model_path
27 | self.init_model(TwoSentenceClassifier)
28 |
29 | def replace_text(self, text):
30 | for key, value in key_word.items():
31 | text = re.sub(key, value, text)
32 | return text
33 |
34 | def init_model(self, model):
35 | print(f'starting to init model')
36 | vocab_path = os.path.join(self.model_path, 'vocab.txt')
37 | bert_config_file = os.path.join(self.model_path, 'bert_config.json')
38 | self.bert_config = BertConfig.from_json_file(bert_config_file)
39 | self.model = model(self.bert_config, 2, moe=True)
40 | weight_path = os.path.join(self.model_path, 'pytorch_model.bin')
41 | new_state_dict = torch.load(weight_path, map_location='cuda:1')
42 | new_state_dict = dict([
43 | (k[7:], v) if k.startswith('module') else (k, v) for k, v in new_state_dict.items()
44 | ])
45 | self.model.load_state_dict(new_state_dict)
46 | self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_path)
47 | self.device = 'cuda:1' if torch.cuda.is_available() else 'cpu'
48 | self.model.to(self.device)
49 | self.model.eval()
50 | print(f'init {model} model finished')
51 |
52 | def convert_examples_to_features(self, sentences: list, max_seq_length=150, **kwargs):
53 | """convert id to features"""
54 | all_input_ids, all_input_masks, all_segment_ids = [], [], []
55 | for (ex_index, sent) in enumerate(sentences):
56 | sent = self.replace_text(sent)
57 | sent_tokens = ['[CLS]'] + self.tokenizer.tokenize(sent)[:max_seq_length - 2] + ['[SEP]']
58 | length = len(sent_tokens)
59 | sent_segment_ids = [0] * length
60 | sent_input_masks = [1] * length
61 | sent_input_ids = self.tokenizer.convert_tokens_to_ids(sent_tokens)
62 | while length < max_seq_length:
63 | sent_input_ids.append(0)
64 | sent_input_masks.append(0)
65 | sent_segment_ids.append(0)
66 | length += 1
67 | assert len(sent_segment_ids) == len(sent_input_ids) == len(sent_input_masks)
68 | all_input_ids.append(torch.tensor(sent_input_ids).view(1, -1))
69 | all_input_masks.append(torch.tensor(sent_input_masks).view(1, -1))
70 | all_segment_ids.append(torch.tensor(sent_segment_ids).view(1, -1))
71 | return all_input_ids, all_input_masks, all_segment_ids
72 |
73 | def embeddings(self, sentences: list, batch_size=30, **kwargs):
74 | """
75 | **kwargs:
76 | batch_size: one circle sentence numbers
77 | max_seq_length: max sentences length
78 | split:split symbol,if get split, to relation modeling features convert
79 | """
80 | all_input_ids, all_input_mask, all_segment_ids = self.convert_examples_to_features(
81 | sentences, **kwargs)
82 | output_vectors = []
83 | # print(f'all_input_ids = {len(all_input_ids)}')
84 | with torch.no_grad():
85 | for i in range(0, len(all_input_ids), batch_size):
86 | if i % batch_size == 0:
87 | input_ids = torch.cat(all_input_ids[i:i + batch_size],
88 | dim=0).to(self.device).unsqueeze(0)
89 | segment_ids = torch.cat(all_segment_ids[i:i + batch_size],
90 | dim=0).to(self.device).unsqueeze(0)
91 | input_mask = torch.cat(all_input_mask[i:i + batch_size],
92 | dim=0).to(self.device).unsqueeze(0)
93 | # 1 * 1 * 768
94 | output_vector = self.model(input_ids, segment_ids, input_mask, embedding=True)
95 | # print(f'output_vector: {output_vector.shape}')
96 | output_vectors.append(output_vector)
97 | # b * 768
98 | output_vectors = torch.cat(output_vectors, dim=1).squeeze()
99 | # vector_mode: bsz
100 | # sentences_vector_modes = torch.sqrt((output_vectors * output_vectors).sum(-1)).squeeze()
101 | # sentences_mean_vector = output_vectors.mean(0).squeeze()
102 | # assert len(sentences_mean_vector) == self.bert_config.hidden_size
103 | return output_vectors, None
104 |
105 | def attention(self, sentences: list, batch_size=30, **kwargs):
106 | """
107 | **kwargs:
108 | batch_size: one circle sentence numbers
109 | max_seq_length: max sentences length
110 | split:split symbol,if get split, to relation modeling features convert
111 | """
112 | all_input_ids, all_input_mask, all_segment_ids, tokens = self.convert_examples_to_features(
113 | sentences, return_token=True, **kwargs)
114 | output_token_modes, outputs_token_mean = [], []
115 | with torch.no_grad():
116 | for i in range(0, len(all_input_ids), batch_size):
117 | if i % batch_size == 0:
118 | input_ids = torch.cat(all_input_ids[i:i + batch_size],
119 | dim=0).to(self.device).unsqueeze(0)
120 | segment_ids = torch.cat(all_segment_ids[i:i + batch_size],
121 | dim=0).to(self.device).unsqueeze(0)
122 | input_mask = torch.cat(all_input_mask[i:i + batch_size],
123 | dim=0).to(self.device).unsqueeze(0)
124 | mask_sequence_output, output_vectors = self.model(
125 | input_ids,
126 | segment_ids,
127 | input_mask,
128 | attention=True,
129 | token_mean=kwargs.get('token_mean'))
130 | output_token_mode = torch.norm(mask_sequence_output, dim=-1)
131 | output_token_mean = torch.norm(output_vectors, dim=-1)
132 |
133 | output_token_modes.append(output_token_mode)
134 | outputs_token_mean.append(output_token_mean)
135 | output_token_modes = torch.cat(output_token_modes, dim=0).squeeze()
136 | outputs_token_mean = torch.cat(outputs_token_mean, dim=0).squeeze()
137 |
138 | tokens_modes_list = []
139 | output_token_modes = output_token_modes.cpu()
140 | outputs_token_mean = outputs_token_mean.cpu()
141 | person_pairs = kwargs.get('person_pairs')
142 | for index, (mode, mean_mode) in enumerate(zip(output_token_modes, outputs_token_mean)):
143 | sent_tokens = tokens[index]
144 | sent_modes = mode[mode > 0].tolist()
145 | assert len(sent_tokens) == len(sent_modes)
146 | sent_token_modes = [(token, mode) for token, mode in zip(sent_tokens, sent_modes)]
147 | sent_token_modes_dict = {}
148 | if isinstance(person_pairs, list):
149 | sent_token_modes_dict['persons'] = person_pairs[index]
150 | sent_token_modes_dict['token_modes_mean'] = mean_mode.tolist()
151 | sent_token_modes_dict['token_modes'] = sent_token_modes
152 | tokens_modes_list.append(sent_token_modes_dict)
153 | return tokens_modes_list
154 |
155 |
156 | class RelationModelEmbeddings(EmbeddingsModel):
157 | """
158 | realtion classfier's embeddings
159 | """
160 |
161 | def __init__(self, model_path):
162 | """ to obtain sentences embeddings model
163 | model path: init model weight path
164 | """
165 | self.model_path = model_path
166 | self.init_modle(RelationClassifier)
167 |
168 | def convert_examples_to_features(self,
169 | sentences: list,
170 | max_seq_length=150,
171 | split='||',
172 | **kwargs):
173 | """convert id to features"""
174 | input_ids, input_masks, segment_ids = [], [], []
175 | tokens = []
176 | for (ex_index, sent) in enumerate(sentences):
177 | sent = self.replace_text(sent)
178 | sents = sent.split(split)
179 | if len(sents) != 2:
180 | continue
181 | sents[0] = sents[0][:120].replace('"', '')
182 | sents[1] = sents[1][:120].replace('"', '')
183 | sents_token = [self.tokenizer.tokenize(s) for s in sents]
184 | sent_segment_ids = [0] * (len(sents_token[0]) + 2) + [1] * (len(sents_token[1]) + 1)
185 | sents_token = sents_token[0] + ['[SEP]'] + sents_token[1]
186 | sents_token = sents_token[:max_seq_length - 2]
187 | sent_segment_ids = sent_segment_ids[:max_seq_length]
188 | sents_token = ['[CLS]'] + sents_token + ['[SEP]']
189 | length = len(sents_token)
190 | sent_input_masks = [1] * length
191 | sent_input_ids = self.tokenizer.convert_tokens_to_ids(sents_token)
192 | tokens.append(sents_token)
193 | while length < max_seq_length:
194 | sent_input_ids.append(0)
195 | sent_input_masks.append(0)
196 | sent_segment_ids.append(0)
197 | length += 1
198 | assert len(sent_segment_ids) == len(sent_input_ids) == len(sent_input_masks)
199 | input_ids.append(torch.tensor(sent_input_ids).view(1, -1))
200 | input_masks.append(torch.tensor(sent_input_masks).view(1, -1))
201 | segment_ids.append(torch.tensor(sent_segment_ids).view(1, -1))
202 | if kwargs.get("return_token"):
203 | return input_ids, input_masks, segment_ids, tokens
204 | return input_ids, input_masks, segment_ids
205 |
206 | def classifiy(self, sentences: list, chunk_nums=7, split='||', **kwargs):
207 | """
208 | sentences: [[a||b, a||b, .....], [a||c, a||c, .....]] 或者 [a||b, ......, a||c]
209 | sent_nums: sentence numbers
210 | """
211 | if isinstance(sentences[0], list):
212 | sentences = chain(*sentences)
213 |
214 | # assert len(sentences) == sent_nums, 'sentence list length must equal to sent_nums'
215 | input_ids, input_mask, segment_ids = [
216 | torch.cat(i).unsqueeze(0).to(self.device)
217 | for i in self.convert_examples_to_features(sentences, split=split, **kwargs)
218 | ]
219 | with torch.no_grad():
220 | output_vectors, logits = self.model(input_ids,
221 | segment_ids,
222 | input_mask,
223 | embedding=False,
224 | chunk_nums=chunk_nums)
225 | pred_label = torch.argmax(logits)
226 | sentences_vector_modes = torch.sqrt((output_vectors * output_vectors).sum(-1)).squeeze()
227 | return sentences_vector_modes, pred_label
228 |
--------------------------------------------------------------------------------