├── BERT_analysis.ipynb
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── __init__.py
├── bert-mgpu-env.yml
├── create_pretraining_data.py
├── data
└── metacritic
│ └── train.tsv
├── extract_features.py
├── get_embedding.py
├── join_result.py
├── modeling.py
├── modeling_test.py
├── multilingual.md
├── optimization.py
├── optimization_hvd.py
├── optimization_test.py
├── pretrain_data
└── sample_text.txt
├── read_data.py
├── requirements.txt
├── run_classifier.py
├── run_classifier_hvd.py
├── run_meta.sh
├── run_predict.sh
├── run_pretraining.py
├── run_pretraining.sh
├── run_pretraining_hvd.py
├── run_squad.py
├── run_squad_hvd.py
├── test.tsv
├── tokenization.py
└── tokenization_test.py
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | BERT needs to maintain permanent compatibility with the pre-trained model files,
4 | so we do not plan to make any major changes to this library (other than what was
5 | promised in the README). However, we can accept small patches related to
6 | re-factoring and documentation. To submit contributes, there are just a few
7 | small guidelines you need to follow.
8 |
9 | ## Contributor License Agreement
10 |
11 | Contributions to this project must be accompanied by a Contributor License
12 | Agreement. You (or your employer) retain the copyright to your contribution;
13 | this simply gives us permission to use and redistribute your contributions as
14 | part of the project. Head over to to see
15 | your current agreements on file or to sign a new one.
16 |
17 | You generally only need to submit a CLA once, so if you've already submitted one
18 | (even if it was for a different project), you probably don't need to do it
19 | again.
20 |
21 | ## Code reviews
22 |
23 | All submissions, including submissions by project members, require review. We
24 | use GitHub pull requests for this purpose. Consult
25 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
26 | information on using pull requests.
27 |
28 | ## Community Guidelines
29 |
30 | This project follows
31 | [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
32 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 |
--------------------------------------------------------------------------------
/bert-mgpu-env.yml:
--------------------------------------------------------------------------------
1 | name: bert-mgpu-env
2 | channels:
3 | - defaults
4 | dependencies:
5 | - ca-certificates=2019.5.15=0
6 | - certifi=2019.3.9=py37_0
7 | - libedit=3.1.20181209=hc058e9b_0
8 | - libffi=3.2.1=hd88cf55_4
9 | - libgcc-ng=8.2.0=hdf63c60_1
10 | - libstdcxx-ng=8.2.0=hdf63c60_1
11 | - ncurses=6.1=he6710b0_1
12 | - openssl=1.1.1c=h7b6447c_1
13 | - pip=19.1.1=py37_0
14 | - python=3.7.3=h0371630_0
15 | - readline=7.0=h7b6447c_5
16 | - setuptools=41.0.1=py37_0
17 | - sqlite=3.28.0=h7b6447c_0
18 | - tk=8.6.8=hbc83047_0
19 | - wheel=0.33.4=py37_0
20 | - xz=5.2.4=h14c3975_4
21 | - zlib=1.2.11=h7b6447c_3
22 | - pip:
23 | - absl-py==0.7.1
24 | - astor==0.8.0
25 | - attrs==19.1.0
26 | - bert-serving-client==1.9.6
27 | - bert-serving-server==1.9.6
28 | - blis==0.2.4
29 | - cffi==1.12.3
30 | - chardet==3.0.4
31 | - cloudpickle==1.2.0
32 | - cycler==0.10.0
33 | - cymem==2.0.2
34 | - en-core-web-sm==2.1.0
35 | - gast==0.2.2
36 | - gputil==1.4.0
37 | - grpcio==1.21.1
38 | - h5py==2.9.0
39 | - horovod==0.16.3
40 | - idna==2.8
41 | - joblib==0.13.2
42 | - jsonschema==3.0.1
43 | - keras-applications==1.0.8
44 | - keras-preprocessing==1.1.0
45 | - kiwisolver==1.1.0
46 | - llvmlite==0.29.0
47 | - markdown==3.1.1
48 | - matplotlib==3.1.0
49 | - mock==3.0.5
50 | - murmurhash==1.0.2
51 | - numba==0.44.1
52 | - numpy==1.16.4
53 | - pandas==0.24.2
54 | - plac==0.9.6
55 | - preshed==2.0.1
56 | - protobuf==3.8.0
57 | - psutil==5.6.2
58 | - pycparser==2.19
59 | - pyparsing==2.4.0
60 | - pyrsistent==0.15.2
61 | - python-dateutil==2.8.0
62 | - pytz==2019.1
63 | - pyzmq==18.0.1
64 | - requests==2.22.0
65 | - scikit-learn==0.21.2
66 | - scipy==1.3.0
67 | - sentencepiece==0.1.82
68 | - six==1.12.0
69 | - spacy==2.1.4
70 | - srsly==0.0.7
71 | - tensorboard==1.13.1
72 | - tensorflow-estimator==1.13.0
73 | - tensorflow-gpu==1.13.1
74 | - termcolor==1.1.0
75 | - thinc==7.0.4
76 | - tqdm==4.32.2
77 | - umap-learn==0.3.9
78 | - urllib3==1.25.3
79 | - wasabi==0.2.2
80 | - werkzeug==0.15.4
81 | prefix: /home/ydu/miniconda3/envs/bert-mgpu-env
82 |
83 |
--------------------------------------------------------------------------------
/create_pretraining_data.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Create masked LM/next sentence masked_lm TF examples for BERT."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import collections
22 | import random
23 | import tensorflow as tf
24 | import tokenization
25 |
26 | flags = tf.flags
27 |
28 | FLAGS = flags.FLAGS
29 |
30 | flags.DEFINE_string("input_file", None,
31 | "Input raw text file (or comma-separated list of files).")
32 |
33 | flags.DEFINE_string(
34 | "output_file", None,
35 | "Output TF example file (or comma-separated list of files).")
36 |
37 | flags.DEFINE_string("vocab_file", None,
38 | "The vocabulary file that the BERT model was trained on.")
39 |
40 | flags.DEFINE_bool(
41 | "do_lower_case", True,
42 | "Whether to lower case the input text. Should be True for uncased "
43 | "models and False for cased models.")
44 |
45 | flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.")
46 |
47 | flags.DEFINE_integer("max_predictions_per_seq", 20,
48 | "Maximum number of masked LM predictions per sequence.")
49 |
50 | flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
51 |
52 | flags.DEFINE_integer(
53 | "dupe_factor", 10,
54 | "Number of times to duplicate the input data (with different masks).")
55 |
56 | flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
57 |
58 | flags.DEFINE_float(
59 | "short_seq_prob", 0.1,
60 | "Probability of creating sequences which are shorter than the "
61 | "maximum length.")
62 |
63 |
64 | class TrainingInstance(object):
65 | """A single training instance (sentence pair)."""
66 |
67 | def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
68 | is_random_next):
69 | self.tokens = tokens
70 | self.segment_ids = segment_ids
71 | self.is_random_next = is_random_next
72 | self.masked_lm_positions = masked_lm_positions
73 | self.masked_lm_labels = masked_lm_labels
74 |
75 | def __str__(self):
76 | s = ""
77 | s += "tokens: %s\n" % (" ".join(
78 | [tokenization.printable_text(x) for x in self.tokens]))
79 | s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
80 | s += "is_random_next: %s\n" % self.is_random_next
81 | s += "masked_lm_positions: %s\n" % (" ".join(
82 | [str(x) for x in self.masked_lm_positions]))
83 | s += "masked_lm_labels: %s\n" % (" ".join(
84 | [tokenization.printable_text(x) for x in self.masked_lm_labels]))
85 | s += "\n"
86 | return s
87 |
88 | def __repr__(self):
89 | return self.__str__()
90 |
91 |
92 | def write_instance_to_example_files(instances, tokenizer, max_seq_length,
93 | max_predictions_per_seq, output_files):
94 | """Create TF example files from `TrainingInstance`s."""
95 | writers = []
96 | for output_file in output_files:
97 | writers.append(tf.python_io.TFRecordWriter(output_file))
98 |
99 | writer_index = 0
100 |
101 | total_written = 0
102 | for (inst_index, instance) in enumerate(instances):
103 | input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
104 | input_mask = [1] * len(input_ids)
105 | segment_ids = list(instance.segment_ids)
106 | assert len(input_ids) <= max_seq_length
107 |
108 | while len(input_ids) < max_seq_length:
109 | input_ids.append(0)
110 | input_mask.append(0)
111 | segment_ids.append(0)
112 |
113 | assert len(input_ids) == max_seq_length
114 | assert len(input_mask) == max_seq_length
115 | assert len(segment_ids) == max_seq_length
116 |
117 | masked_lm_positions = list(instance.masked_lm_positions)
118 | masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
119 | masked_lm_weights = [1.0] * len(masked_lm_ids)
120 |
121 | while len(masked_lm_positions) < max_predictions_per_seq:
122 | masked_lm_positions.append(0)
123 | masked_lm_ids.append(0)
124 | masked_lm_weights.append(0.0)
125 |
126 | next_sentence_label = 1 if instance.is_random_next else 0
127 |
128 | features = collections.OrderedDict()
129 | features["input_ids"] = create_int_feature(input_ids)
130 | features["input_mask"] = create_int_feature(input_mask)
131 | features["segment_ids"] = create_int_feature(segment_ids)
132 | features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
133 | features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
134 | features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
135 | features["next_sentence_labels"] = create_int_feature([next_sentence_label])
136 |
137 | tf_example = tf.train.Example(features=tf.train.Features(feature=features))
138 |
139 | writers[writer_index].write(tf_example.SerializeToString())
140 | writer_index = (writer_index + 1) % len(writers)
141 |
142 | total_written += 1
143 |
144 | if inst_index < 20:
145 | tf.logging.info("*** Example ***")
146 | tf.logging.info("tokens: %s" % " ".join(
147 | [tokenization.printable_text(x) for x in instance.tokens]))
148 |
149 | for feature_name in features.keys():
150 | feature = features[feature_name]
151 | values = []
152 | if feature.int64_list.value:
153 | values = feature.int64_list.value
154 | elif feature.float_list.value:
155 | values = feature.float_list.value
156 | tf.logging.info(
157 | "%s: %s" % (feature_name, " ".join([str(x) for x in values])))
158 |
159 | for writer in writers:
160 | writer.close()
161 |
162 | tf.logging.info("Wrote %d total instances", total_written)
163 |
164 |
165 | def create_int_feature(values):
166 | feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
167 | return feature
168 |
169 |
170 | def create_float_feature(values):
171 | feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
172 | return feature
173 |
174 |
175 | def create_training_instances(input_files, tokenizer, max_seq_length,
176 | dupe_factor, short_seq_prob, masked_lm_prob,
177 | max_predictions_per_seq, rng):
178 | """Create `TrainingInstance`s from raw text."""
179 | all_documents = [[]]
180 |
181 | # Input file format:
182 | # (1) One sentence per line. These should ideally be actual sentences, not
183 | # entire paragraphs or arbitrary spans of text. (Because we use the
184 | # sentence boundaries for the "next sentence prediction" task).
185 | # (2) Blank lines between documents. Document boundaries are needed so
186 | # that the "next sentence prediction" task doesn't span between documents.
187 | for input_file in input_files:
188 | with tf.gfile.GFile(input_file, "r") as reader:
189 | while True:
190 | line = tokenization.convert_to_unicode(reader.readline())
191 | if not line:
192 | break
193 | line = line.strip()
194 |
195 | # Empty lines are used as document delimiters
196 | if not line:
197 | all_documents.append([])
198 | tokens = tokenizer.tokenize(line)
199 | if tokens:
200 | all_documents[-1].append(tokens)
201 |
202 | # Remove empty documents
203 | all_documents = [x for x in all_documents if x]
204 | rng.shuffle(all_documents)
205 |
206 | vocab_words = list(tokenizer.vocab.keys())
207 | instances = []
208 | for _ in range(dupe_factor):
209 | for document_index in range(len(all_documents)):
210 | instances.extend(
211 | create_instances_from_document(
212 | all_documents, document_index, max_seq_length, short_seq_prob,
213 | masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
214 |
215 | rng.shuffle(instances)
216 | return instances
217 |
218 |
219 | def create_instances_from_document(
220 | all_documents, document_index, max_seq_length, short_seq_prob,
221 | masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
222 | """Creates `TrainingInstance`s for a single document."""
223 | document = all_documents[document_index]
224 |
225 | # Account for [CLS], [SEP], [SEP]
226 | max_num_tokens = max_seq_length - 3
227 |
228 | # We *usually* want to fill up the entire sequence since we are padding
229 | # to `max_seq_length` anyways, so short sequences are generally wasted
230 | # computation. However, we *sometimes*
231 | # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
232 | # sequences to minimize the mismatch between pre-training and fine-tuning.
233 | # The `target_seq_length` is just a rough target however, whereas
234 | # `max_seq_length` is a hard limit.
235 | target_seq_length = max_num_tokens
236 | if rng.random() < short_seq_prob:
237 | target_seq_length = rng.randint(2, max_num_tokens)
238 |
239 | # We DON'T just concatenate all of the tokens from a document into a long
240 | # sequence and choose an arbitrary split point because this would make the
241 | # next sentence prediction task too easy. Instead, we split the input into
242 | # segments "A" and "B" based on the actual "sentences" provided by the user
243 | # input.
244 | instances = []
245 | current_chunk = []
246 | current_length = 0
247 | i = 0
248 | while i < len(document):
249 | segment = document[i]
250 | current_chunk.append(segment)
251 | current_length += len(segment)
252 | if i == len(document) - 1 or current_length >= target_seq_length:
253 | if current_chunk:
254 | # `a_end` is how many segments from `current_chunk` go into the `A`
255 | # (first) sentence.
256 | a_end = 1
257 | if len(current_chunk) >= 2:
258 | a_end = rng.randint(1, len(current_chunk) - 1)
259 |
260 | tokens_a = []
261 | for j in range(a_end):
262 | tokens_a.extend(current_chunk[j])
263 |
264 | tokens_b = []
265 | # Random next
266 | is_random_next = False
267 | if len(current_chunk) == 1 or rng.random() < 0.5:
268 | is_random_next = True
269 | target_b_length = target_seq_length - len(tokens_a)
270 |
271 | # This should rarely go for more than one iteration for large
272 | # corpora. However, just to be careful, we try to make sure that
273 | # the random document is not the same as the document
274 | # we're processing.
275 | for _ in range(10):
276 | random_document_index = rng.randint(0, len(all_documents) - 1)
277 | if random_document_index != document_index:
278 | break
279 |
280 | random_document = all_documents[random_document_index]
281 | random_start = rng.randint(0, len(random_document) - 1)
282 | for j in range(random_start, len(random_document)):
283 | tokens_b.extend(random_document[j])
284 | if len(tokens_b) >= target_b_length:
285 | break
286 | # We didn't actually use these segments so we "put them back" so
287 | # they don't go to waste.
288 | num_unused_segments = len(current_chunk) - a_end
289 | i -= num_unused_segments
290 | # Actual next
291 | else:
292 | is_random_next = False
293 | for j in range(a_end, len(current_chunk)):
294 | tokens_b.extend(current_chunk[j])
295 | truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
296 |
297 | assert len(tokens_a) >= 1
298 | assert len(tokens_b) >= 1
299 |
300 | tokens = []
301 | segment_ids = []
302 | tokens.append("[CLS]")
303 | segment_ids.append(0)
304 | for token in tokens_a:
305 | tokens.append(token)
306 | segment_ids.append(0)
307 |
308 | tokens.append("[SEP]")
309 | segment_ids.append(0)
310 |
311 | for token in tokens_b:
312 | tokens.append(token)
313 | segment_ids.append(1)
314 | tokens.append("[SEP]")
315 | segment_ids.append(1)
316 |
317 | (tokens, masked_lm_positions,
318 | masked_lm_labels) = create_masked_lm_predictions(
319 | tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
320 | instance = TrainingInstance(
321 | tokens=tokens,
322 | segment_ids=segment_ids,
323 | is_random_next=is_random_next,
324 | masked_lm_positions=masked_lm_positions,
325 | masked_lm_labels=masked_lm_labels)
326 | instances.append(instance)
327 | current_chunk = []
328 | current_length = 0
329 | i += 1
330 |
331 | return instances
332 |
333 |
334 | MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
335 | ["index", "label"])
336 |
337 |
338 | def create_masked_lm_predictions(tokens, masked_lm_prob,
339 | max_predictions_per_seq, vocab_words, rng):
340 | """Creates the predictions for the masked LM objective."""
341 |
342 | cand_indexes = []
343 | for (i, token) in enumerate(tokens):
344 | if token == "[CLS]" or token == "[SEP]":
345 | continue
346 | cand_indexes.append(i)
347 |
348 | rng.shuffle(cand_indexes)
349 |
350 | output_tokens = list(tokens)
351 |
352 | num_to_predict = min(max_predictions_per_seq,
353 | max(1, int(round(len(tokens) * masked_lm_prob))))
354 |
355 | masked_lms = []
356 | covered_indexes = set()
357 | for index in cand_indexes:
358 | if len(masked_lms) >= num_to_predict:
359 | break
360 | if index in covered_indexes:
361 | continue
362 | covered_indexes.add(index)
363 |
364 | masked_token = None
365 | # 80% of the time, replace with [MASK]
366 | if rng.random() < 0.8:
367 | masked_token = "[MASK]"
368 | else:
369 | # 10% of the time, keep original
370 | if rng.random() < 0.5:
371 | masked_token = tokens[index]
372 | # 10% of the time, replace with random word
373 | else:
374 | masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
375 |
376 | output_tokens[index] = masked_token
377 |
378 | masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
379 |
380 | masked_lms = sorted(masked_lms, key=lambda x: x.index)
381 |
382 | masked_lm_positions = []
383 | masked_lm_labels = []
384 | for p in masked_lms:
385 | masked_lm_positions.append(p.index)
386 | masked_lm_labels.append(p.label)
387 |
388 | return (output_tokens, masked_lm_positions, masked_lm_labels)
389 |
390 |
391 | def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
392 | """Truncates a pair of sequences to a maximum sequence length."""
393 | while True:
394 | total_length = len(tokens_a) + len(tokens_b)
395 | if total_length <= max_num_tokens:
396 | break
397 |
398 | trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
399 | assert len(trunc_tokens) >= 1
400 |
401 | # We want to sometimes truncate from the front and sometimes from the
402 | # back to add more randomness and avoid biases.
403 | if rng.random() < 0.5:
404 | del trunc_tokens[0]
405 | else:
406 | trunc_tokens.pop()
407 |
408 |
409 | def main(_):
410 | tf.logging.set_verbosity(tf.logging.INFO)
411 |
412 | tokenizer = tokenization.FullTokenizer(
413 | vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
414 |
415 | input_files = []
416 | for input_pattern in FLAGS.input_file.split(","):
417 | input_files.extend(tf.gfile.Glob(input_pattern))
418 |
419 | tf.logging.info("*** Reading from input files ***")
420 | for input_file in input_files:
421 | tf.logging.info(" %s", input_file)
422 |
423 | rng = random.Random(FLAGS.random_seed)
424 | instances = create_training_instances(
425 | input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
426 | FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
427 | rng)
428 |
429 | output_files = FLAGS.output_file.split(",")
430 | tf.logging.info("*** Writing to output files ***")
431 | for output_file in output_files:
432 | tf.logging.info(" %s", output_file)
433 |
434 | write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
435 | FLAGS.max_predictions_per_seq, output_files)
436 |
437 |
438 | if __name__ == "__main__":
439 | flags.mark_flag_as_required("input_file")
440 | flags.mark_flag_as_required("output_file")
441 | flags.mark_flag_as_required("vocab_file")
442 | tf.app.run()
443 |
--------------------------------------------------------------------------------
/extract_features.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Extract pre-computed feature vectors from BERT."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import codecs
22 | import collections
23 | import json
24 | import re
25 |
26 | import modeling
27 | import tokenization
28 | import tensorflow as tf
29 |
30 | flags = tf.flags
31 |
32 | FLAGS = flags.FLAGS
33 |
34 | flags.DEFINE_string("input_file", None, "")
35 |
36 | flags.DEFINE_string("output_file", None, "")
37 |
38 | flags.DEFINE_string("layers", "-1,-2,-3,-4", "")
39 |
40 | flags.DEFINE_string(
41 | "bert_config_file", None,
42 | "The config json file corresponding to the pre-trained BERT model. "
43 | "This specifies the model architecture.")
44 |
45 | flags.DEFINE_integer(
46 | "max_seq_length", 128,
47 | "The maximum total input sequence length after WordPiece tokenization. "
48 | "Sequences longer than this will be truncated, and sequences shorter "
49 | "than this will be padded.")
50 |
51 | flags.DEFINE_string(
52 | "init_checkpoint", None,
53 | "Initial checkpoint (usually from a pre-trained BERT model).")
54 |
55 | flags.DEFINE_string("vocab_file", None,
56 | "The vocabulary file that the BERT model was trained on.")
57 |
58 | flags.DEFINE_bool(
59 | "do_lower_case", True,
60 | "Whether to lower case the input text. Should be True for uncased "
61 | "models and False for cased models.")
62 |
63 | flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.")
64 |
65 | flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
66 |
67 | flags.DEFINE_string("master", None,
68 | "If using a TPU, the address of the master.")
69 |
70 | flags.DEFINE_integer(
71 | "num_tpu_cores", 8,
72 | "Only used if `use_tpu` is True. Total number of TPU cores to use.")
73 |
74 | flags.DEFINE_bool(
75 | "use_one_hot_embeddings", False,
76 | "If True, tf.one_hot will be used for embedding lookups, otherwise "
77 | "tf.nn.embedding_lookup will be used. On TPUs, this should be True "
78 | "since it is much faster.")
79 |
80 |
81 | class InputExample(object):
82 |
83 | def __init__(self, unique_id, text_a, text_b):
84 | self.unique_id = unique_id
85 | self.text_a = text_a
86 | self.text_b = text_b
87 |
88 |
89 | class InputFeatures(object):
90 | """A single set of features of data."""
91 |
92 | def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
93 | self.unique_id = unique_id
94 | self.tokens = tokens
95 | self.input_ids = input_ids
96 | self.input_mask = input_mask
97 | self.input_type_ids = input_type_ids
98 |
99 |
100 | def input_fn_builder(features, seq_length):
101 | """Creates an `input_fn` closure to be passed to TPUEstimator."""
102 |
103 | all_unique_ids = []
104 | all_input_ids = []
105 | all_input_mask = []
106 | all_input_type_ids = []
107 |
108 | for feature in features:
109 | all_unique_ids.append(feature.unique_id)
110 | all_input_ids.append(feature.input_ids)
111 | all_input_mask.append(feature.input_mask)
112 | all_input_type_ids.append(feature.input_type_ids)
113 |
114 | def input_fn(params):
115 | """The actual input function."""
116 | batch_size = params["batch_size"]
117 |
118 | num_examples = len(features)
119 |
120 | # This is for demo purposes and does NOT scale to large data sets. We do
121 | # not use Dataset.from_generator() because that uses tf.py_func which is
122 | # not TPU compatible. The right way to load data is with TFRecordReader.
123 | d = tf.data.Dataset.from_tensor_slices({
124 | "unique_ids":
125 | tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
126 | "input_ids":
127 | tf.constant(
128 | all_input_ids, shape=[num_examples, seq_length],
129 | dtype=tf.int32),
130 | "input_mask":
131 | tf.constant(
132 | all_input_mask,
133 | shape=[num_examples, seq_length],
134 | dtype=tf.int32),
135 | "input_type_ids":
136 | tf.constant(
137 | all_input_type_ids,
138 | shape=[num_examples, seq_length],
139 | dtype=tf.int32),
140 | })
141 |
142 | d = d.batch(batch_size=batch_size, drop_remainder=False)
143 | return d
144 |
145 | return input_fn
146 |
147 |
148 | def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,
149 | use_one_hot_embeddings):
150 | """Returns `model_fn` closure for TPUEstimator."""
151 |
152 | def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
153 | """The `model_fn` for TPUEstimator."""
154 |
155 | unique_ids = features["unique_ids"]
156 | input_ids = features["input_ids"]
157 | input_mask = features["input_mask"]
158 | input_type_ids = features["input_type_ids"]
159 |
160 | model = modeling.BertModel(
161 | config=bert_config,
162 | is_training=False,
163 | input_ids=input_ids,
164 | input_mask=input_mask,
165 | token_type_ids=input_type_ids,
166 | use_one_hot_embeddings=use_one_hot_embeddings)
167 |
168 | if mode != tf.estimator.ModeKeys.PREDICT:
169 | raise ValueError("Only PREDICT modes are supported: %s" % (mode))
170 |
171 | tvars = tf.trainable_variables()
172 | scaffold_fn = None
173 | (assignment_map,
174 | initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
175 | tvars, init_checkpoint)
176 | if use_tpu:
177 |
178 | def tpu_scaffold():
179 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
180 | return tf.train.Scaffold()
181 |
182 | scaffold_fn = tpu_scaffold
183 | else:
184 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
185 |
186 | tf.logging.info("**** Trainable Variables ****")
187 | for var in tvars:
188 | init_string = ""
189 | if var.name in initialized_variable_names:
190 | init_string = ", *INIT_FROM_CKPT*"
191 | tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
192 | init_string)
193 |
194 | all_layers = model.get_all_encoder_layers()
195 |
196 | predictions = {
197 | "unique_id": unique_ids,
198 | }
199 |
200 | for (i, layer_index) in enumerate(layer_indexes):
201 | predictions["layer_output_%d" % i] = all_layers[layer_index]
202 |
203 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
204 | mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
205 | return output_spec
206 |
207 | return model_fn
208 |
209 |
210 | def convert_examples_to_features(examples, seq_length, tokenizer):
211 | """Loads a data file into a list of `InputBatch`s."""
212 |
213 | features = []
214 | for (ex_index, example) in enumerate(examples):
215 | tokens_a = tokenizer.tokenize(example.text_a)
216 |
217 | tokens_b = None
218 | if example.text_b:
219 | tokens_b = tokenizer.tokenize(example.text_b)
220 |
221 | if tokens_b:
222 | # Modifies `tokens_a` and `tokens_b` in place so that the total
223 | # length is less than the specified length.
224 | # Account for [CLS], [SEP], [SEP] with "- 3"
225 | _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
226 | else:
227 | # Account for [CLS] and [SEP] with "- 2"
228 | if len(tokens_a) > seq_length - 2:
229 | tokens_a = tokens_a[0:(seq_length - 2)]
230 |
231 | # The convention in BERT is:
232 | # (a) For sequence pairs:
233 | # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
234 | # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
235 | # (b) For single sequences:
236 | # tokens: [CLS] the dog is hairy . [SEP]
237 | # type_ids: 0 0 0 0 0 0 0
238 | #
239 | # Where "type_ids" are used to indicate whether this is the first
240 | # sequence or the second sequence. The embedding vectors for `type=0` and
241 | # `type=1` were learned during pre-training and are added to the wordpiece
242 | # embedding vector (and position vector). This is not *strictly* necessary
243 | # since the [SEP] token unambiguously separates the sequences, but it makes
244 | # it easier for the model to learn the concept of sequences.
245 | #
246 | # For classification tasks, the first vector (corresponding to [CLS]) is
247 | # used as as the "sentence vector". Note that this only makes sense because
248 | # the entire model is fine-tuned.
249 | tokens = []
250 | input_type_ids = []
251 | tokens.append("[CLS]")
252 | input_type_ids.append(0)
253 | for token in tokens_a:
254 | tokens.append(token)
255 | input_type_ids.append(0)
256 | tokens.append("[SEP]")
257 | input_type_ids.append(0)
258 |
259 | if tokens_b:
260 | for token in tokens_b:
261 | tokens.append(token)
262 | input_type_ids.append(1)
263 | tokens.append("[SEP]")
264 | input_type_ids.append(1)
265 |
266 | input_ids = tokenizer.convert_tokens_to_ids(tokens)
267 |
268 | # The mask has 1 for real tokens and 0 for padding tokens. Only real
269 | # tokens are attended to.
270 | input_mask = [1] * len(input_ids)
271 |
272 | # Zero-pad up to the sequence length.
273 | while len(input_ids) < seq_length:
274 | input_ids.append(0)
275 | input_mask.append(0)
276 | input_type_ids.append(0)
277 |
278 | assert len(input_ids) == seq_length
279 | assert len(input_mask) == seq_length
280 | assert len(input_type_ids) == seq_length
281 |
282 | if ex_index < 5:
283 | tf.logging.info("*** Example ***")
284 | tf.logging.info("unique_id: %s" % (example.unique_id))
285 | tf.logging.info("tokens: %s" % " ".join(
286 | [tokenization.printable_text(x) for x in tokens]))
287 | tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
288 | tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
289 | tf.logging.info(
290 | "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
291 |
292 | features.append(
293 | InputFeatures(
294 | unique_id=example.unique_id,
295 | tokens=tokens,
296 | input_ids=input_ids,
297 | input_mask=input_mask,
298 | input_type_ids=input_type_ids))
299 | return features
300 |
301 |
302 | def _truncate_seq_pair(tokens_a, tokens_b, max_length):
303 | """Truncates a sequence pair in place to the maximum length."""
304 |
305 | # This is a simple heuristic which will always truncate the longer sequence
306 | # one token at a time. This makes more sense than truncating an equal percent
307 | # of tokens from each, since if one sequence is very short then each token
308 | # that's truncated likely contains more information than a longer sequence.
309 | while True:
310 | total_length = len(tokens_a) + len(tokens_b)
311 | if total_length <= max_length:
312 | break
313 | if len(tokens_a) > len(tokens_b):
314 | tokens_a.pop()
315 | else:
316 | tokens_b.pop()
317 |
318 |
319 | def read_examples(input_file):
320 | """Read a list of `InputExample`s from an input file."""
321 | examples = []
322 | unique_id = 0
323 | with tf.gfile.GFile(input_file, "r") as reader:
324 | while True:
325 | line = tokenization.convert_to_unicode(reader.readline())
326 | if not line:
327 | break
328 | line = line.strip()
329 | text_a = None
330 | text_b = None
331 | m = re.match(r"^(.*) \|\|\| (.*)$", line)
332 | if m is None:
333 | text_a = line
334 | else:
335 | text_a = m.group(1)
336 | text_b = m.group(2)
337 | examples.append(
338 | InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
339 | unique_id += 1
340 | return examples
341 |
342 |
343 | def main(_):
344 | tf.logging.set_verbosity(tf.logging.INFO)
345 |
346 | layer_indexes = [int(x) for x in FLAGS.layers.split(",")]
347 |
348 | bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
349 |
350 | tokenizer = tokenization.FullTokenizer(
351 | vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
352 |
353 | is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
354 | run_config = tf.contrib.tpu.RunConfig(
355 | master=FLAGS.master,
356 | tpu_config=tf.contrib.tpu.TPUConfig(
357 | num_shards=FLAGS.num_tpu_cores,
358 | per_host_input_for_training=is_per_host))
359 |
360 | examples = read_examples(FLAGS.input_file)
361 |
362 | features = convert_examples_to_features(
363 | examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)
364 |
365 | unique_id_to_feature = {}
366 | for feature in features:
367 | unique_id_to_feature[feature.unique_id] = feature
368 |
369 | model_fn = model_fn_builder(
370 | bert_config=bert_config,
371 | init_checkpoint=FLAGS.init_checkpoint,
372 | layer_indexes=layer_indexes,
373 | use_tpu=FLAGS.use_tpu,
374 | use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
375 |
376 | # If TPU is not available, this will fall back to normal Estimator on CPU
377 | # or GPU.
378 | estimator = tf.contrib.tpu.TPUEstimator(
379 | use_tpu=FLAGS.use_tpu,
380 | model_fn=model_fn,
381 | config=run_config,
382 | predict_batch_size=FLAGS.batch_size)
383 |
384 | input_fn = input_fn_builder(
385 | features=features, seq_length=FLAGS.max_seq_length)
386 |
387 | with codecs.getwriter("utf-8")(tf.gfile.Open(FLAGS.output_file,
388 | "w")) as writer:
389 | for result in estimator.predict(input_fn, yield_single_examples=True):
390 | unique_id = int(result["unique_id"])
391 | feature = unique_id_to_feature[unique_id]
392 | output_json = collections.OrderedDict()
393 | output_json["linex_index"] = unique_id
394 | all_features = []
395 | for (i, token) in enumerate(feature.tokens):
396 | all_layers = []
397 | for (j, layer_index) in enumerate(layer_indexes):
398 | layer_output = result["layer_output_%d" % j]
399 | layers = collections.OrderedDict()
400 | layers["index"] = layer_index
401 | layers["values"] = [
402 | round(float(x), 6) for x in layer_output[i:(i + 1)].flat
403 | ]
404 | all_layers.append(layers)
405 | features = collections.OrderedDict()
406 | features["token"] = token
407 | features["layers"] = all_layers
408 | all_features.append(features)
409 | output_json["features"] = all_features
410 | writer.write(json.dumps(output_json) + "\n")
411 |
412 |
413 | if __name__ == "__main__":
414 | flags.mark_flag_as_required("input_file")
415 | flags.mark_flag_as_required("vocab_file")
416 | flags.mark_flag_as_required("bert_config_file")
417 | flags.mark_flag_as_required("init_checkpoint")
418 | flags.mark_flag_as_required("output_file")
419 | tf.app.run()
420 |
--------------------------------------------------------------------------------
/get_embedding.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import csv
3 | from bert_serving.client import BertClient
4 | from sklearn.svm import LinearSVC
5 | from sklearn.model_selection import train_test_split
6 | from sklearn.decomposition import PCA
7 | import numpy as np
8 | from bert_serving.server.helper import get_args_parser
9 | from bert_serving.server import BertServer
10 | import umap
11 | from collections import defaultdict
12 | import pickle
13 | import time
14 | from warnings import filterwarnings
15 | filterwarnings('ignore')
16 | import pandas as pd
17 | import glob, os
18 |
19 |
20 |
21 | def pause():
22 | int(input("enter a num to cont..."))
23 |
24 |
25 | def read_tsv(input_file):
26 | lines = []
27 | with tf.gfile.Open(input_file, "r") as f:
28 | reader = csv.reader(f, delimiter="\t")
29 | for line in reader:
30 | text = line[0]
31 | new = str(" ".join(text.split()))
32 | new = new.rstrip()
33 | if new != "" or new:
34 | lines.append(new)
35 | # lines.append(text)
36 | return lines[1:]
37 |
38 |
39 | def train_svm(X, Y, key, results):
40 | c = np.logspace(0.00001, 1.0, num=10)
41 |
42 | # Randomly sample 1000 data points from each
43 | # repeat for 100 times
44 | for i in range(100):
45 | np.random.seed(int(round(time.time() * 1000)) % (2**32 - 1))
46 | index = np.random.randint(0, len(X), 1000)
47 |
48 | x = [X[j] for j in index]
49 | y = [Y[j] for j in index]
50 |
51 | X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
52 |
53 | for k in range(len(c)):
54 | clf = LinearSVC(random_state=42, C=c[k],max_iter=100)
55 | clf.fit(X_train, y_train)
56 | acc = clf.score(X_test, y_test)
57 | err_rate = 1-acc
58 | a_dist = 2 * (1 - 2 * err_rate)
59 | results[key].append(a_dist)
60 |
61 | return results
62 |
63 |
64 | def save_emb():
65 |
66 | common = [
67 | '-model_dir', '/home/ydu/BERT/uncased_L-12_H-768_A-12/',
68 | '-num_worker', '2',
69 | '-port', '5555',
70 | '-port_out', '5556',
71 | '-max_seq_len', '128',
72 | '-max_batch_size', '256',
73 | # '-tuned_model_dir', '/home/ydu/BERT/bert_mgpu/pretrain_output/10k-32b-all4data/',
74 | # '-ckpt_name', 'model.ckpt-2500',
75 | ]
76 |
77 | args = get_args_parser().parse_args(common)
78 |
79 | # folder = ['books', 'dvd', 'electronics', 'kitchen']
80 | data_path = '/home/ydu/BERT/DATA/'
81 | data_folder = ['metacritic', 'imdb', 'amazon', 'reddit']
82 |
83 | # model_path = 'home/ydu/BERT/bert_mgpu/results/'
84 | # model_folder = 'amazon-balanced/'
85 | # model_type = 'bert-tune'
86 | data = {}
87 |
88 | # setattr(args, 'tuned_model_dir', '/home/ydu/BERT/bert_mgpu/pretrain_output/reddit-pretrain')
89 | # setattr(args, 'ckpt_name', 'model.ckpt-2500')
90 | setattr(args, 'tuned_model_dir', '/home/ydu/BERT/bert_mgpu/pretrain_output/10k-32b-all4data')
91 | setattr(args, 'ckpt_name', 'model.ckpt-2500')
92 |
93 | for d in data_folder:
94 | fn = data_path + d + '/all.tsv'
95 | print("===========",fn,"================")
96 | text = read_tsv(fn)
97 | server = BertServer(args)
98 | server.start()
99 | print('wait until server is ready...')
100 | time.sleep(20)
101 | print('encoding...')
102 | bc = BertClient()
103 | data[d] = bc.encode(text)
104 | bc.close()
105 | server.close()
106 |
107 | pickle_name = data_path+'EMB/allpre_emb.pickle'
108 | with open(pickle_name, 'wb') as handle:
109 | pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
110 |
111 | return pickle_name
112 |
113 |
114 | def load_emb(pickle_name):
115 | with open(pickle_name, 'rb') as handle:
116 | emb = pickle.load(handle)
117 |
118 | data = ['metacritic', 'imdb', 'amazon', 'reddit']
119 | # model = ['meta', 'amazon','imdb', 'bert']
120 | results = defaultdict(list)
121 |
122 | for i in range(len(data)):
123 | emb1 = emb[data[i]]
124 | for j in range(len(data)):
125 | if j == i:
126 | continue
127 | emb2 = emb[data[j]]
128 |
129 | key = str(data[i]) + '_' + str(data[j])
130 | print(key)
131 |
132 | # dummy label
133 | label1 = np.array([0 for _ in emb1])
134 | label2 = np.array([1 for _ in emb2])
135 |
136 | X = np.concatenate((emb1, emb2), axis=0)
137 | Y = np.concatenate((label1, label2), axis=0)
138 |
139 | # Randomly shuffle data
140 | np.random.seed(int(round(time.time() * 1000)) % (2**32 - 1))
141 | shuffle_indices = np.random.permutation(np.arange(len(Y)))
142 | x_shuffled = X[shuffle_indices]
143 | y_shuffled = Y[shuffle_indices]
144 |
145 | results = train_svm(x_shuffled, y_shuffled, key, results)
146 |
147 | result2 = defaultdict(list)
148 |
149 | for k, i in results.items():
150 | avg = np.mean(i)
151 | std = np.std(i)
152 | result2[k] = [avg, std]
153 | df = pd.DataFrame.from_dict(result2, orient='index', columns=['A-distance', 'Stdev'])
154 | print(df)
155 | tsv_name = pickle_name.replace('_emb.pickle', '_adist.tsv')
156 | print(tsv_name)
157 | df.to_csv(tsv_name, index=True, sep='\t')
158 |
159 | # print("model: {}\ndata_pair: {}, avg: {}, stdev: {}".format(str(pickle_name), str(k), np.mean(i), np.std(i)))
160 |
161 |
162 | def load_baseline_emb(pickle_list):
163 | emb_dict = defaultdict(dict)
164 | keys = []
165 |
166 | for p in pickle_list:
167 | with open(p, 'rb') as handle:
168 | key = p.replace('_emb.pickle','')
169 | emb_dict[key] = pickle.load(handle)
170 | keys.append(key)
171 |
172 | data = ['metacritic', 'amazon', 'imdb']
173 | # model = ['meta', 'amazon','imdb', 'bert']
174 | results = defaultdict(list)
175 |
176 | base_model = emb_dict['metatune']
177 | trans_model = emb_dict['imdbtune']
178 |
179 | emb1 = base_model['metacritic']
180 | emb2 = trans_model['imdb']
181 |
182 | key = str(data[0]) + '-base_' + str(data[2]+'-trans')
183 | print(key)
184 |
185 | # dummy label
186 | label1 = np.array([0 for _ in emb1])
187 | label2 = np.array([1 for _ in emb2])
188 |
189 | X = np.concatenate((emb1, emb2), axis=0)
190 | Y = np.concatenate((label1, label2), axis=0)
191 |
192 | # Randomly shuffle data
193 | np.random.seed(int(round(time.time() * 1000)) % (2**32 - 1))
194 | shuffle_indices = np.random.permutation(np.arange(len(Y)))
195 | x_shuffled = X[shuffle_indices]
196 | y_shuffled = Y[shuffle_indices]
197 |
198 | results = train_svm(x_shuffled, y_shuffled, key, results)
199 |
200 | result2 = defaultdict(list)
201 |
202 | for k, i in results.items():
203 | avg = np.mean(i)
204 | std = np.std(i)
205 | result2[k] = [avg, std]
206 | df = pd.DataFrame.from_dict(result2, orient='index', columns=['A-distance', 'Stdev'])
207 | print(df)
208 | # tsv_name = pickle_base.replace('_emb.pickle', '-base_adist.tsv')
209 | # print(tsv_name)
210 | # df.to_csv(tsv_name, index=True, sep='\t')
211 |
212 |
213 | if __name__ == '__main__':
214 |
215 | pickle_name = save_emb()
216 | load_emb(pickle_name)
217 |
218 | # os.chdir("/home/ydu/BERT/DATA/EMB/")
219 | # pickle_list = []
220 | # for f in glob.glob("*tune*.pickle"):
221 | # pickle_list.append(f)
222 | # print(pickle_list)
223 | # load_baseline_emb(pickle_list)
224 |
--------------------------------------------------------------------------------
/join_result.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import tensorflow as tf
3 | import pandas as pd
4 | import tokenization
5 |
6 | TEST_TSV = '/home/ydu/BERT/DATA/reddit/test.tsv'
7 | RESULTS_TSV = '/home/ydu/BERT/bert_mgpu/predict/081419/no-pretrain-imdbtune/test_results.tsv'
8 | DIR = '/home/ydu/BERT/bert_mgpu/predict/081419/no-pretrain-imdbtune/'
9 |
10 | def _read_tsv(input_file, quotechar=None):
11 | """Reads a tab separated value file."""
12 | with tf.gfile.Open(input_file, "r") as f:
13 | reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
14 | lines = []
15 | for line in reader:
16 | lines.append(line)
17 | return lines
18 |
19 | lines = _read_tsv(TEST_TSV)
20 | lines = [tokenization.convert_to_unicode(line[0]) for line in lines]
21 |
22 | results = open(RESULTS_TSV).readlines()
23 | results = [result.strip().split('\t') for result in results]
24 |
25 | df = pd.DataFrame()
26 | df['text'] = lines
27 | df['0'] = [result[0] for result in results]
28 | df['1'] = [result[1] for result in results]
29 |
30 | df.to_csv(DIR+'imdbtune.tsv', index=False, sep='\t')
--------------------------------------------------------------------------------
/modeling_test.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 |
19 | import collections
20 | import json
21 | import random
22 | import re
23 |
24 | import modeling
25 | import six
26 | import tensorflow as tf
27 |
28 |
29 | class BertModelTest(tf.test.TestCase):
30 |
31 | class BertModelTester(object):
32 |
33 | def __init__(self,
34 | parent,
35 | batch_size=13,
36 | seq_length=7,
37 | is_training=True,
38 | use_input_mask=True,
39 | use_token_type_ids=True,
40 | vocab_size=99,
41 | hidden_size=32,
42 | num_hidden_layers=5,
43 | num_attention_heads=4,
44 | intermediate_size=37,
45 | hidden_act="gelu",
46 | hidden_dropout_prob=0.1,
47 | attention_probs_dropout_prob=0.1,
48 | max_position_embeddings=512,
49 | type_vocab_size=16,
50 | initializer_range=0.02,
51 | scope=None):
52 | self.parent = parent
53 | self.batch_size = batch_size
54 | self.seq_length = seq_length
55 | self.is_training = is_training
56 | self.use_input_mask = use_input_mask
57 | self.use_token_type_ids = use_token_type_ids
58 | self.vocab_size = vocab_size
59 | self.hidden_size = hidden_size
60 | self.num_hidden_layers = num_hidden_layers
61 | self.num_attention_heads = num_attention_heads
62 | self.intermediate_size = intermediate_size
63 | self.hidden_act = hidden_act
64 | self.hidden_dropout_prob = hidden_dropout_prob
65 | self.attention_probs_dropout_prob = attention_probs_dropout_prob
66 | self.max_position_embeddings = max_position_embeddings
67 | self.type_vocab_size = type_vocab_size
68 | self.initializer_range = initializer_range
69 | self.scope = scope
70 |
71 | def create_model(self):
72 | input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length],
73 | self.vocab_size)
74 |
75 | input_mask = None
76 | if self.use_input_mask:
77 | input_mask = BertModelTest.ids_tensor(
78 | [self.batch_size, self.seq_length], vocab_size=2)
79 |
80 | token_type_ids = None
81 | if self.use_token_type_ids:
82 | token_type_ids = BertModelTest.ids_tensor(
83 | [self.batch_size, self.seq_length], self.type_vocab_size)
84 |
85 | config = modeling.BertConfig(
86 | vocab_size=self.vocab_size,
87 | hidden_size=self.hidden_size,
88 | num_hidden_layers=self.num_hidden_layers,
89 | num_attention_heads=self.num_attention_heads,
90 | intermediate_size=self.intermediate_size,
91 | hidden_act=self.hidden_act,
92 | hidden_dropout_prob=self.hidden_dropout_prob,
93 | attention_probs_dropout_prob=self.attention_probs_dropout_prob,
94 | max_position_embeddings=self.max_position_embeddings,
95 | type_vocab_size=self.type_vocab_size,
96 | initializer_range=self.initializer_range)
97 |
98 | model = modeling.BertModel(
99 | config=config,
100 | is_training=self.is_training,
101 | input_ids=input_ids,
102 | input_mask=input_mask,
103 | token_type_ids=token_type_ids,
104 | scope=self.scope)
105 |
106 | outputs = {
107 | "embedding_output": model.get_embedding_output(),
108 | "sequence_output": model.get_sequence_output(),
109 | "pooled_output": model.get_pooled_output(),
110 | "all_encoder_layers": model.get_all_encoder_layers(),
111 | }
112 | return outputs
113 |
114 | def check_output(self, result):
115 | self.parent.assertAllEqual(
116 | result["embedding_output"].shape,
117 | [self.batch_size, self.seq_length, self.hidden_size])
118 |
119 | self.parent.assertAllEqual(
120 | result["sequence_output"].shape,
121 | [self.batch_size, self.seq_length, self.hidden_size])
122 |
123 | self.parent.assertAllEqual(result["pooled_output"].shape,
124 | [self.batch_size, self.hidden_size])
125 |
126 | def test_default(self):
127 | self.run_tester(BertModelTest.BertModelTester(self))
128 |
129 | def test_config_to_json_string(self):
130 | config = modeling.BertConfig(vocab_size=99, hidden_size=37)
131 | obj = json.loads(config.to_json_string())
132 | self.assertEqual(obj["vocab_size"], 99)
133 | self.assertEqual(obj["hidden_size"], 37)
134 |
135 | def run_tester(self, tester):
136 | with self.test_session() as sess:
137 | ops = tester.create_model()
138 | init_op = tf.group(tf.global_variables_initializer(),
139 | tf.local_variables_initializer())
140 | sess.run(init_op)
141 | output_result = sess.run(ops)
142 | tester.check_output(output_result)
143 |
144 | self.assert_all_tensors_reachable(sess, [init_op, ops])
145 |
146 | @classmethod
147 | def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
148 | """Creates a random int32 tensor of the shape within the vocab size."""
149 | if rng is None:
150 | rng = random.Random()
151 |
152 | total_dims = 1
153 | for dim in shape:
154 | total_dims *= dim
155 |
156 | values = []
157 | for _ in range(total_dims):
158 | values.append(rng.randint(0, vocab_size - 1))
159 |
160 | return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name)
161 |
162 | def assert_all_tensors_reachable(self, sess, outputs):
163 | """Checks that all the tensors in the graph are reachable from outputs."""
164 | graph = sess.graph
165 |
166 | ignore_strings = [
167 | "^.*/assert_less_equal/.*$",
168 | "^.*/dilation_rate$",
169 | "^.*/Tensordot/concat$",
170 | "^.*/Tensordot/concat/axis$",
171 | "^testing/.*$",
172 | ]
173 |
174 | ignore_regexes = [re.compile(x) for x in ignore_strings]
175 |
176 | unreachable = self.get_unreachable_ops(graph, outputs)
177 | filtered_unreachable = []
178 | for x in unreachable:
179 | do_ignore = False
180 | for r in ignore_regexes:
181 | m = r.match(x.name)
182 | if m is not None:
183 | do_ignore = True
184 | if do_ignore:
185 | continue
186 | filtered_unreachable.append(x)
187 | unreachable = filtered_unreachable
188 |
189 | self.assertEqual(
190 | len(unreachable), 0, "The following ops are unreachable: %s" %
191 | (" ".join([x.name for x in unreachable])))
192 |
193 | @classmethod
194 | def get_unreachable_ops(cls, graph, outputs):
195 | """Finds all of the tensors in graph that are unreachable from outputs."""
196 | outputs = cls.flatten_recursive(outputs)
197 | output_to_op = collections.defaultdict(list)
198 | op_to_all = collections.defaultdict(list)
199 | assign_out_to_in = collections.defaultdict(list)
200 |
201 | for op in graph.get_operations():
202 | for x in op.inputs:
203 | op_to_all[op.name].append(x.name)
204 | for y in op.outputs:
205 | output_to_op[y.name].append(op.name)
206 | op_to_all[op.name].append(y.name)
207 | if str(op.type) == "Assign":
208 | for y in op.outputs:
209 | for x in op.inputs:
210 | assign_out_to_in[y.name].append(x.name)
211 |
212 | assign_groups = collections.defaultdict(list)
213 | for out_name in assign_out_to_in.keys():
214 | name_group = assign_out_to_in[out_name]
215 | for n1 in name_group:
216 | assign_groups[n1].append(out_name)
217 | for n2 in name_group:
218 | if n1 != n2:
219 | assign_groups[n1].append(n2)
220 |
221 | seen_tensors = {}
222 | stack = [x.name for x in outputs]
223 | while stack:
224 | name = stack.pop()
225 | if name in seen_tensors:
226 | continue
227 | seen_tensors[name] = True
228 |
229 | if name in output_to_op:
230 | for op_name in output_to_op[name]:
231 | if op_name in op_to_all:
232 | for input_name in op_to_all[op_name]:
233 | if input_name not in stack:
234 | stack.append(input_name)
235 |
236 | expanded_names = []
237 | if name in assign_groups:
238 | for assign_name in assign_groups[name]:
239 | expanded_names.append(assign_name)
240 |
241 | for expanded_name in expanded_names:
242 | if expanded_name not in stack:
243 | stack.append(expanded_name)
244 |
245 | unreachable_ops = []
246 | for op in graph.get_operations():
247 | is_unreachable = False
248 | all_names = [x.name for x in op.inputs] + [x.name for x in op.outputs]
249 | for name in all_names:
250 | if name not in seen_tensors:
251 | is_unreachable = True
252 | if is_unreachable:
253 | unreachable_ops.append(op)
254 | return unreachable_ops
255 |
256 | @classmethod
257 | def flatten_recursive(cls, item):
258 | """Flattens (potentially nested) a tuple/dictionary/list to a list."""
259 | output = []
260 | if isinstance(item, list):
261 | output.extend(item)
262 | elif isinstance(item, tuple):
263 | output.extend(list(item))
264 | elif isinstance(item, dict):
265 | for (_, v) in six.iteritems(item):
266 | output.append(v)
267 | else:
268 | return [item]
269 |
270 | flat_output = []
271 | for x in output:
272 | flat_output.extend(cls.flatten_recursive(x))
273 | return flat_output
274 |
275 |
276 | if __name__ == "__main__":
277 | tf.test.main()
278 |
--------------------------------------------------------------------------------
/multilingual.md:
--------------------------------------------------------------------------------
1 | ## Models
2 |
3 | There are two multilingual models currently available. We do not plan to release
4 | more single-language models, but we may release `BERT-Large` versions of these
5 | two in the future:
6 |
7 | * **[`BERT-Base, Multilingual Cased (New, recommended)`](https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip)**:
8 | 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
9 | * **[`BERT-Base, Multilingual Uncased (Orig, not recommended)`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip)**:
10 | 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
11 | * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**:
12 | Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M
13 | parameters
14 |
15 | **The `Multilingual Cased (New)` model also fixes normalization issues in many
16 | languages, so it is recommended in languages with non-Latin alphabets (and is
17 | often better for most languages with Latin alphabets). When using this model,
18 | make sure to pass `--do_lower_case=false` to `run_pretraining.py` and other
19 | scripts.**
20 |
21 | See the [list of languages](#list-of-languages) that the Multilingual model
22 | supports. The Multilingual model does include Chinese (and English), but if your
23 | fine-tuning data is Chinese-only, then the Chinese model will likely produce
24 | better results.
25 |
26 | ## Results
27 |
28 | To evaluate these systems, we use the
29 | [XNLI dataset](https://github.com/facebookresearch/XNLI) dataset, which is a
30 | version of [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) where the
31 | dev and test sets have been translated (by humans) into 15 languages. Note that
32 | the training set was *machine* translated (we used the translations provided by
33 | XNLI, not Google NMT). For clarity, we only report on 6 languages below:
34 |
35 |
36 |
37 | | System | English | Chinese | Spanish | German | Arabic | Urdu |
38 | | --------------------------------- | -------- | -------- | -------- | -------- | -------- | -------- |
39 | | XNLI Baseline - Translate Train | 73.7 | 67.0 | 68.8 | 66.5 | 65.8 | 56.6 |
40 | | XNLI Baseline - Translate Test | 73.7 | 68.3 | 70.7 | 68.7 | 66.8 | 59.3 |
41 | | BERT - Translate Train Cased | **81.9** | **76.6** | **77.8** | **75.9** | **70.7** | 61.6 |
42 | | BERT - Translate Train Uncased | 81.4 | 74.2 | 77.3 | 75.2 | 70.5 | 61.7 |
43 | | BERT - Translate Test Uncased | 81.4 | 70.1 | 74.9 | 74.4 | 70.4 | **62.1** |
44 | | BERT - Zero Shot Uncased | 81.4 | 63.8 | 74.3 | 70.5 | 62.1 | 58.3 |
45 |
46 |
47 |
48 | The first two rows are baselines from the XNLI paper and the last three rows are
49 | our results with BERT.
50 |
51 | **Translate Train** means that the MultiNLI training set was machine translated
52 | from English into the foreign language. So training and evaluation were both
53 | done in the foreign language. Unfortunately, training was done on
54 | machine-translated data, so it is impossible to quantify how much of the lower
55 | accuracy (compared to English) is due to the quality of the machine translation
56 | vs. the quality of the pre-trained model.
57 |
58 | **Translate Test** means that the XNLI test set was machine translated from the
59 | foreign language into English. So training and evaluation were both done on
60 | English. However, test evaluation was done on machine-translated English, so the
61 | accuracy depends on the quality of the machine translation system.
62 |
63 | **Zero Shot** means that the Multilingual BERT system was fine-tuned on English
64 | MultiNLI, and then evaluated on the foreign language XNLI test. In this case,
65 | machine translation was not involved at all in either the pre-training or
66 | fine-tuning.
67 |
68 | Note that the English result is worse than the 84.2 MultiNLI baseline because
69 | this training used Multilingual BERT rather than English-only BERT. This implies
70 | that for high-resource languages, the Multilingual model is somewhat worse than
71 | a single-language model. However, it is not feasible for us to train and
72 | maintain dozens of single-language model. Therefore, if your goal is to maximize
73 | performance with a language other than English or Chinese, you might find it
74 | beneficial to run pre-training for additional steps starting from our
75 | Multilingual model on data from your language of interest.
76 |
77 | Here is a comparison of training Chinese models with the Multilingual
78 | `BERT-Base` and Chinese-only `BERT-Base`:
79 |
80 | System | Chinese
81 | ----------------------- | -------
82 | XNLI Baseline | 67.0
83 | BERT Multilingual Model | 74.2
84 | BERT Chinese-only Model | 77.2
85 |
86 | Similar to English, the single-language model does 3% better than the
87 | Multilingual model.
88 |
89 | ## Fine-tuning Example
90 |
91 | The multilingual model does **not** require any special consideration or API
92 | changes. We did update the implementation of `BasicTokenizer` in
93 | `tokenization.py` to support Chinese character tokenization, so please update if
94 | you forked it. However, we did not change the tokenization API.
95 |
96 | To test the new models, we did modify `run_classifier.py` to add support for the
97 | [XNLI dataset](https://github.com/facebookresearch/XNLI). This is a 15-language
98 | version of MultiNLI where the dev/test sets have been human-translated, and the
99 | training set has been machine-translated.
100 |
101 | To run the fine-tuning code, please download the
102 | [XNLI dev/test set](https://s3.amazonaws.com/xnli/XNLI-1.0.zip) and the
103 | [XNLI machine-translated training set](https://s3.amazonaws.com/xnli/XNLI-MT-1.0.zip)
104 | and then unpack both .zip files into some directory `$XNLI_DIR`.
105 |
106 | To run fine-tuning on XNLI. The language is hard-coded into `run_classifier.py`
107 | (Chinese by default), so please modify `XnliProcessor` if you want to run on
108 | another language.
109 |
110 | This is a large dataset, so this will training will take a few hours on a GPU
111 | (or about 30 minutes on a Cloud TPU). To run an experiment quickly for
112 | debugging, just set `num_train_epochs` to a small value like `0.1`.
113 |
114 | ```shell
115 | export BERT_BASE_DIR=/path/to/bert/chinese_L-12_H-768_A-12 # or multilingual_L-12_H-768_A-12
116 | export XNLI_DIR=/path/to/xnli
117 |
118 | python run_classifier.py \
119 | --task_name=XNLI \
120 | --do_train=true \
121 | --do_eval=true \
122 | --data_dir=$XNLI_DIR \
123 | --vocab_file=$BERT_BASE_DIR/vocab.txt \
124 | --bert_config_file=$BERT_BASE_DIR/bert_config.json \
125 | --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
126 | --max_seq_length=128 \
127 | --train_batch_size=32 \
128 | --learning_rate=5e-5 \
129 | --num_train_epochs=2.0 \
130 | --output_dir=/tmp/xnli_output/
131 | ```
132 |
133 | With the Chinese-only model, the results should look something like this:
134 |
135 | ```
136 | ***** Eval results *****
137 | eval_accuracy = 0.774116
138 | eval_loss = 0.83554
139 | global_step = 24543
140 | loss = 0.74603
141 | ```
142 |
143 | ## Details
144 |
145 | ### Data Source and Sampling
146 |
147 | The languages chosen were the
148 | [top 100 languages with the largest Wikipedias](https://meta.wikimedia.org/wiki/List_of_Wikipedias).
149 | The entire Wikipedia dump for each language (excluding user and talk pages) was
150 | taken as the training data for each language
151 |
152 | However, the size of the Wikipedia for a given language varies greatly, and
153 | therefore low-resource languages may be "under-represented" in terms of the
154 | neural network model (under the assumption that languages are "competing" for
155 | limited model capacity to some extent).
156 |
157 | However, the size of a Wikipedia also correlates with the number of speakers of
158 | a language, and we also don't want to overfit the model by performing thousands
159 | of epochs over a tiny Wikipedia for a particular language.
160 |
161 | To balance these two factors, we performed exponentially smoothed weighting of
162 | the data during pre-training data creation (and WordPiece vocab creation). In
163 | other words, let's say that the probability of a language is *P(L)*, e.g.,
164 | *P(English) = 0.21* means that after concatenating all of the Wikipedias
165 | together, 21% of our data is English. We exponentiate each probability by some
166 | factor *S* and then re-normalize, and sample from that distribution. In our case
167 | we use *S=0.7*. So, high-resource languages like English will be under-sampled,
168 | and low-resource languages like Icelandic will be over-sampled. E.g., in the
169 | original distribution English would be sampled 1000x more than Icelandic, but
170 | after smoothing it's only sampled 100x more.
171 |
172 | ### Tokenization
173 |
174 | For tokenization, we use a 110k shared WordPiece vocabulary. The word counts are
175 | weighted the same way as the data, so low-resource languages are upweighted by
176 | some factor. We intentionally do *not* use any marker to denote the input
177 | language (so that zero-shot training can work).
178 |
179 | Because Chinese (and Japanese Kanji and Korean Hanja) does not have whitespace
180 | characters, we add spaces around every character in the
181 | [CJK Unicode range](https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_\(Unicode_block\))
182 | before applying WordPiece. This means that Chinese is effectively
183 | character-tokenized. Note that the CJK Unicode block only includes
184 | Chinese-origin characters and does *not* include Hangul Korean or
185 | Katakana/Hiragana Japanese, which are tokenized with whitespace+WordPiece like
186 | all other languages.
187 |
188 | For all other languages, we apply the
189 | [same recipe as English](https://github.com/google-research/bert#tokenization):
190 | (a) lower casing+accent removal, (b) punctuation splitting, (c) whitespace
191 | tokenization. We understand that accent markers have substantial meaning in some
192 | languages, but felt that the benefits of reducing the effective vocabulary make
193 | up for this. Generally the strong contextual models of BERT should make up for
194 | any ambiguity introduced by stripping accent markers.
195 |
196 | ### List of Languages
197 |
198 | The multilingual model supports the following languages. These languages were
199 | chosen because they are the top 100 languages with the largest Wikipedias:
200 |
201 | * Afrikaans
202 | * Albanian
203 | * Arabic
204 | * Aragonese
205 | * Armenian
206 | * Asturian
207 | * Azerbaijani
208 | * Bashkir
209 | * Basque
210 | * Bavarian
211 | * Belarusian
212 | * Bengali
213 | * Bishnupriya Manipuri
214 | * Bosnian
215 | * Breton
216 | * Bulgarian
217 | * Burmese
218 | * Catalan
219 | * Cebuano
220 | * Chechen
221 | * Chinese (Simplified)
222 | * Chinese (Traditional)
223 | * Chuvash
224 | * Croatian
225 | * Czech
226 | * Danish
227 | * Dutch
228 | * English
229 | * Estonian
230 | * Finnish
231 | * French
232 | * Galician
233 | * Georgian
234 | * German
235 | * Greek
236 | * Gujarati
237 | * Haitian
238 | * Hebrew
239 | * Hindi
240 | * Hungarian
241 | * Icelandic
242 | * Ido
243 | * Indonesian
244 | * Irish
245 | * Italian
246 | * Japanese
247 | * Javanese
248 | * Kannada
249 | * Kazakh
250 | * Kirghiz
251 | * Korean
252 | * Latin
253 | * Latvian
254 | * Lithuanian
255 | * Lombard
256 | * Low Saxon
257 | * Luxembourgish
258 | * Macedonian
259 | * Malagasy
260 | * Malay
261 | * Malayalam
262 | * Marathi
263 | * Minangkabau
264 | * Nepali
265 | * Newar
266 | * Norwegian (Bokmal)
267 | * Norwegian (Nynorsk)
268 | * Occitan
269 | * Persian (Farsi)
270 | * Piedmontese
271 | * Polish
272 | * Portuguese
273 | * Punjabi
274 | * Romanian
275 | * Russian
276 | * Scots
277 | * Serbian
278 | * Serbo-Croatian
279 | * Sicilian
280 | * Slovak
281 | * Slovenian
282 | * South Azerbaijani
283 | * Spanish
284 | * Sundanese
285 | * Swahili
286 | * Swedish
287 | * Tagalog
288 | * Tajik
289 | * Tamil
290 | * Tatar
291 | * Telugu
292 | * Turkish
293 | * Ukrainian
294 | * Urdu
295 | * Uzbek
296 | * Vietnamese
297 | * Volapük
298 | * Waray-Waray
299 | * Welsh
300 | * West Frisian
301 | * Western Punjabi
302 | * Yoruba
303 |
304 | The **Multilingual Cased (New)** release contains additionally **Thai** and
305 | **Mongolian**, which were not included in the original release.
306 |
--------------------------------------------------------------------------------
/optimization.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Functions and classes related to optimization (weight updates)."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import re
22 | import tensorflow as tf
23 |
24 |
25 | def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
26 | """Creates an optimizer training op."""
27 | global_step = tf.train.get_or_create_global_step()
28 |
29 | learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
30 |
31 | # Implements linear decay of the learning rate.
32 | learning_rate = tf.train.polynomial_decay(
33 | learning_rate,
34 | global_step,
35 | num_train_steps,
36 | end_learning_rate=0.0,
37 | power=1.0,
38 | cycle=False)
39 |
40 | # Implements linear warmup. I.e., if global_step < num_warmup_steps, the
41 | # learning rate will be `global_step/num_warmup_steps * init_lr`.
42 | if num_warmup_steps:
43 | global_steps_int = tf.cast(global_step, tf.int32)
44 | warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
45 |
46 | global_steps_float = tf.cast(global_steps_int, tf.float32)
47 | warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
48 |
49 | warmup_percent_done = global_steps_float / warmup_steps_float
50 | warmup_learning_rate = init_lr * warmup_percent_done
51 |
52 | is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
53 | learning_rate = (
54 | (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
55 |
56 | # It is recommended that you use this optimizer for fine tuning, since this
57 | # is how the model was trained (note that the Adam m/v variables are NOT
58 | # loaded from init_checkpoint.)
59 | optimizer = AdamWeightDecayOptimizer(
60 | learning_rate=learning_rate,
61 | weight_decay_rate=0.01,
62 | beta_1=0.9,
63 | beta_2=0.999,
64 | epsilon=1e-6,
65 | exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
66 |
67 | if use_tpu:
68 | optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
69 |
70 | tvars = tf.trainable_variables()
71 | grads = tf.gradients(loss, tvars)
72 |
73 | # This is how the model was pre-trained.
74 | (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
75 |
76 | train_op = optimizer.apply_gradients(
77 | zip(grads, tvars), global_step=global_step)
78 |
79 | # Normally the global step update is done inside of `apply_gradients`.
80 | # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
81 | # a different optimizer, you should probably take this line out.
82 | new_global_step = global_step + 1
83 | train_op = tf.group(train_op, [global_step.assign(new_global_step)])
84 | return train_op
85 |
86 |
87 | class AdamWeightDecayOptimizer(tf.train.Optimizer):
88 | """A basic Adam optimizer that includes "correct" L2 weight decay."""
89 |
90 | def __init__(self,
91 | learning_rate,
92 | weight_decay_rate=0.0,
93 | beta_1=0.9,
94 | beta_2=0.999,
95 | epsilon=1e-6,
96 | exclude_from_weight_decay=None,
97 | name="AdamWeightDecayOptimizer"):
98 | """Constructs a AdamWeightDecayOptimizer."""
99 | super(AdamWeightDecayOptimizer, self).__init__(False, name)
100 |
101 | self.learning_rate = learning_rate
102 | self.weight_decay_rate = weight_decay_rate
103 | self.beta_1 = beta_1
104 | self.beta_2 = beta_2
105 | self.epsilon = epsilon
106 | self.exclude_from_weight_decay = exclude_from_weight_decay
107 |
108 | def apply_gradients(self, grads_and_vars, global_step=None, name=None):
109 | """See base class."""
110 | assignments = []
111 | for (grad, param) in grads_and_vars:
112 | if grad is None or param is None:
113 | continue
114 |
115 | param_name = self._get_variable_name(param.name)
116 |
117 | m = tf.get_variable(
118 | name=param_name + "/adam_m",
119 | shape=param.shape.as_list(),
120 | dtype=tf.float32,
121 | trainable=False,
122 | initializer=tf.zeros_initializer())
123 | v = tf.get_variable(
124 | name=param_name + "/adam_v",
125 | shape=param.shape.as_list(),
126 | dtype=tf.float32,
127 | trainable=False,
128 | initializer=tf.zeros_initializer())
129 |
130 | # Standard Adam update.
131 | next_m = (
132 | tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
133 | next_v = (
134 | tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
135 | tf.square(grad)))
136 |
137 | update = next_m / (tf.sqrt(next_v) + self.epsilon)
138 |
139 | # Just adding the square of the weights to the loss function is *not*
140 | # the correct way of using L2 regularization/weight decay with Adam,
141 | # since that will interact with the m and v parameters in strange ways.
142 | #
143 | # Instead we want ot decay the weights in a manner that doesn't interact
144 | # with the m/v parameters. This is equivalent to adding the square
145 | # of the weights to the loss with plain (non-momentum) SGD.
146 | if self._do_use_weight_decay(param_name):
147 | update += self.weight_decay_rate * param
148 |
149 | update_with_lr = self.learning_rate * update
150 |
151 | next_param = param - update_with_lr
152 |
153 | assignments.extend(
154 | [param.assign(next_param),
155 | m.assign(next_m),
156 | v.assign(next_v)])
157 | return tf.group(*assignments, name=name)
158 |
159 | def _do_use_weight_decay(self, param_name):
160 | """Whether to use L2 weight decay for `param_name`."""
161 | if not self.weight_decay_rate:
162 | return False
163 | if self.exclude_from_weight_decay:
164 | for r in self.exclude_from_weight_decay:
165 | if re.search(r, param_name) is not None:
166 | return False
167 | return True
168 |
169 | def _get_variable_name(self, param_name):
170 | """Get the variable name from the tensor name."""
171 | m = re.match("^(.*):\\d+$", param_name)
172 | if m is not None:
173 | param_name = m.group(1)
174 | return param_name
175 |
--------------------------------------------------------------------------------
/optimization_hvd.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Functions and classes related to optimization (weight updates)."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import re
22 | import tensorflow as tf
23 |
24 | import horovod.tensorflow as hvd
25 |
26 | def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu, freeze=False):
27 | """Creates an optimizer training op."""
28 | global_step = tf.train.get_or_create_global_step()
29 |
30 | learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
31 |
32 | # Implements linear decay of the learning rate.
33 | learning_rate = tf.train.polynomial_decay(
34 | learning_rate,
35 | global_step,
36 | num_train_steps,
37 | end_learning_rate=0.0,
38 | power=1.0,
39 | cycle=False)
40 |
41 | # Implements linear warmup. I.e., if global_step < num_warmup_steps, the
42 | # learning rate will be `global_step/num_warmup_steps * init_lr`.
43 | if num_warmup_steps:
44 | global_steps_int = tf.cast(global_step, tf.int32)
45 | warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
46 |
47 | global_steps_float = tf.cast(global_steps_int, tf.float32)
48 | warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
49 |
50 | warmup_percent_done = global_steps_float / warmup_steps_float
51 | warmup_learning_rate = init_lr * warmup_percent_done
52 |
53 | is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
54 | learning_rate = (
55 | (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
56 |
57 | # It is recommended that you use this optimizer for fine tuning, since this
58 | # is how the model was trained (note that the Adam m/v variables are NOT
59 | # loaded from init_checkpoint.)
60 | optimizer = AdamWeightDecayOptimizer(
61 | # learning_rate=learning_rate * hvd.size(),
62 | learning_rate=learning_rate,
63 | weight_decay_rate=0.01,
64 | beta_1=0.9,
65 | beta_2=0.999,
66 | epsilon=1e-6,
67 | exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
68 |
69 | optimizer = hvd.DistributedOptimizer(optimizer)
70 |
71 | if use_tpu:
72 | optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
73 |
74 | tvars = tf.trainable_variables()
75 | # freeze
76 | if freeze:
77 | tvars = [tvar for tvar in tvars if not tvar.name.startswith('bert')]
78 | grads_and_vars=optimizer.compute_gradients(loss, tvars)
79 |
80 | # This is how the model was pre-trained.
81 | # train_op = optimizer.apply_gradients(
82 | # grads_and_vars=grads_and_vars, global_step=global_step)
83 |
84 | grads = [grad for grad,var in grads_and_vars]
85 | tvars = [var for grad,var in grads_and_vars]
86 | (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
87 | train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)
88 |
89 | # Normally the global step update is done inside of `apply_gradients`.
90 | # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
91 | # a different optimizer, you should probably take this line out.
92 | new_global_step = global_step + 1
93 | train_op = tf.group(train_op, [global_step.assign(new_global_step)])
94 | return train_op
95 |
96 |
97 | class AdamWeightDecayOptimizer(tf.train.Optimizer):
98 | """A basic Adam optimizer that includes "correct" L2 weight decay."""
99 |
100 | def __init__(self,
101 | learning_rate,
102 | weight_decay_rate=0.0,
103 | beta_1=0.9,
104 | beta_2=0.999,
105 | epsilon=1e-6,
106 | exclude_from_weight_decay=None,
107 | name="AdamWeightDecayOptimizer"):
108 | """Constructs a AdamWeightDecayOptimizer."""
109 | super(AdamWeightDecayOptimizer, self).__init__(False, name)
110 |
111 | self.learning_rate = learning_rate
112 | self.weight_decay_rate = weight_decay_rate
113 | self.beta_1 = beta_1
114 | self.beta_2 = beta_2
115 | self.epsilon = epsilon
116 | self.exclude_from_weight_decay = exclude_from_weight_decay
117 |
118 | def apply_gradients(self, grads_and_vars, global_step=None, name=None):
119 | """See base class."""
120 | assignments = []
121 | for (grad, param) in grads_and_vars:
122 | if grad is None or param is None:
123 | continue
124 |
125 | param_name = self._get_variable_name(param.name)
126 |
127 | m = tf.get_variable(
128 | name=param_name + "/adam_m",
129 | shape=param.shape.as_list(),
130 | dtype=tf.float32,
131 | trainable=False,
132 | initializer=tf.zeros_initializer())
133 | v = tf.get_variable(
134 | name=param_name + "/adam_v",
135 | shape=param.shape.as_list(),
136 | dtype=tf.float32,
137 | trainable=False,
138 | initializer=tf.zeros_initializer())
139 |
140 | # Standard Adam update.
141 | next_m = (
142 | tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
143 | next_v = (
144 | tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
145 | tf.square(grad)))
146 |
147 | update = next_m / (tf.sqrt(next_v) + self.epsilon)
148 |
149 | # Just adding the square of the weights to the loss function is *not*
150 | # the correct way of using L2 regularization/weight decay with Adam,
151 | # since that will interact with the m and v parameters in strange ways.
152 | #
153 | # Instead we want ot decay the weights in a manner that doesn't interact
154 | # with the m/v parameters. This is equivalent to adding the square
155 | # of the weights to the loss with plain (non-momentum) SGD.
156 | if self._do_use_weight_decay(param_name):
157 | update += self.weight_decay_rate * param
158 |
159 | update_with_lr = self.learning_rate * update
160 |
161 | next_param = param - update_with_lr
162 |
163 | assignments.extend(
164 | [param.assign(next_param),
165 | m.assign(next_m),
166 | v.assign(next_v)])
167 | return tf.group(*assignments, name=name)
168 |
169 | def _do_use_weight_decay(self, param_name):
170 | """Whether to use L2 weight decay for `param_name`."""
171 | if not self.weight_decay_rate:
172 | return False
173 | if self.exclude_from_weight_decay:
174 | for r in self.exclude_from_weight_decay:
175 | if re.search(r, param_name) is not None:
176 | return False
177 | return True
178 |
179 | def _get_variable_name(self, param_name):
180 | """Get the variable name from the tensor name."""
181 | m = re.match("^(.*):\\d+$", param_name)
182 | if m is not None:
183 | param_name = m.group(1)
184 | return param_name
185 |
--------------------------------------------------------------------------------
/optimization_test.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 |
19 | import optimization
20 | import tensorflow as tf
21 |
22 |
23 | class OptimizationTest(tf.test.TestCase):
24 |
25 | def test_adam(self):
26 | with self.test_session() as sess:
27 | w = tf.get_variable(
28 | "w",
29 | shape=[3],
30 | initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
31 | x = tf.constant([0.4, 0.2, -0.5])
32 | loss = tf.reduce_mean(tf.square(x - w))
33 | tvars = tf.trainable_variables()
34 | grads = tf.gradients(loss, tvars)
35 | global_step = tf.train.get_or_create_global_step()
36 | optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
37 | train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
38 | init_op = tf.group(tf.global_variables_initializer(),
39 | tf.local_variables_initializer())
40 | sess.run(init_op)
41 | for _ in range(100):
42 | sess.run(train_op)
43 | w_np = sess.run(w)
44 | self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
45 |
46 |
47 | if __name__ == "__main__":
48 | tf.test.main()
49 |
--------------------------------------------------------------------------------
/pretrain_data/sample_text.txt:
--------------------------------------------------------------------------------
1 | This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত
2 | Text should be one-sentence-per-line, with empty lines between documents.
3 | This sample text is public domain and was randomly selected from Project Guttenberg.
4 |
5 | The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors.
6 | Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity.
7 | Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them.
8 | "Cass" Beard had risen early that morning, but not with a view to discovery.
9 | A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets.
10 | The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency.
11 | This was nearly opposite.
12 | Mr. Cassius crossed the highway, and stopped suddenly.
13 | Something glittered in the nearest red pool before him.
14 | Gold, surely!
15 | But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring.
16 | Looking at it more attentively, he saw that it bore the inscription, "May to Cass."
17 | Like most of his fellow gold-seekers, Cass was superstitious.
18 |
19 | The fountain of classic wisdom, Hypatia herself.
20 | As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge.
21 | From my youth I felt in me a soul above the matter-entangled herd.
22 | She revealed to me the glorious fact, that I am a spark of Divinity itself.
23 | A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's.
24 | There is a philosophic pleasure in opening one's treasures to the modest young.
25 | Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street.
26 | Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide;
27 | but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind.
28 | Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now.
29 | His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert;
30 | while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts.
31 | At last they reached the quay at the opposite end of the street;
32 | and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers.
33 | He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
34 |
--------------------------------------------------------------------------------
/read_data.py:
--------------------------------------------------------------------------------
1 | import gzip
2 | import pandas as pd
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import json
6 | # from sklearn.model_selection import train_test_split
7 | from collections import Counter
8 | import csv
9 | import tensorflow as tf
10 | import os.path
11 | # from os import listdir
12 | from tensorflow import keras
13 | import os
14 | import re
15 | import spacy
16 | import time
17 | import math
18 | import xml.etree.ElementTree as ET
19 | import codecs
20 | from collections import defaultdict
21 |
22 |
23 | def pause():
24 | int(input("enter a num to cont..."))
25 |
26 |
27 | def clean(path, dataset=None):
28 | filename = 'reviews_clean.json'
29 | with open(filename, 'w') as f:
30 | for line in open(path):
31 | if dataset == 'meta':
32 | line = line.replace('\\r', ' ')
33 | elif dataset == 'amazon':
34 | line = line.replace('\000','')
35 | f.write(line)
36 | return filename
37 |
38 |
39 | def parse(path):
40 | # g = gzip.open(path, 'rb')
41 | g = open(path, 'r')
42 | for l in g:
43 | # yield eval(l)
44 | yield json.loads(l) # deal with null
45 |
46 |
47 | def getDF(path):
48 | i = 0
49 | df = {}
50 | for d in parse(path):
51 | df[i] = d
52 | i += 1
53 | return pd.DataFrame.from_dict(df, orient='index')
54 |
55 |
56 | def get_child(child, df, nlp, f):
57 | if isinstance(child, float):
58 | f.write('\n')
59 | return
60 |
61 | for c in child.split():
62 | if c in df.keys():
63 | line = str(df[c]['clean_text'])
64 | doc = nlp(line)
65 | for sent in doc.sents:
66 | f.write(str(sent).rstrip())
67 | f.write('\n')
68 | df[c]['visited'] = True
69 |
70 | return get_child(df[c]['children_ids'],
71 | df, nlp, f)
72 |
73 |
74 | def read_tsv(input_file):
75 | with tf.gfile.Open(input_file, "r") as f:
76 | reader = csv.reader(f, delimiter="\t")
77 | lines = []
78 | for line in reader:
79 | lines.append(line)
80 | return lines
81 |
82 |
83 | def train_test_split(counts, pct_train=0.7, pct_dev=0.2, pct_test=0.1):
84 | total = sum([v for _, v in counts.items()])
85 | n_train = int(total * pct_train)
86 | n_dev = int(total * pct_dev)
87 | # n_test = int(total * pct_test)
88 |
89 | train = []
90 | dev = []
91 | test = []
92 |
93 | current_train = 0
94 | current_dev = 0
95 |
96 | for k, v in counts.items():
97 | if current_train + v <= n_train:
98 | train.append(k)
99 | current_train += v
100 | elif current_dev + v <= n_dev:
101 | dev.append(k)
102 | current_dev += v
103 | else:
104 | test.append(k)
105 |
106 | return train, dev, test
107 |
108 |
109 | def over_sample(df, col_name):
110 | labels, values = zip(*Counter(df[col_name].values).items())
111 | seed = int(np.max(values) / np.min(values))
112 | oversample = df.loc[df[col_name] == labels[values.index(np.min(values))]]
113 | for i in range(seed - 1):
114 | df = df.append(oversample)
115 | # shuffle
116 | df = df.sample(frac=1)
117 | return df
118 |
119 |
120 | def read_meta(argv=None):
121 | review_clean = clean('/home/ydu/BERT/DATA/metacritic/reviews.json', 'meta')
122 |
123 | df = getDF(review_clean)
124 |
125 | df = df[['title','text','score']]
126 | df.score = df.score.astype(int)
127 |
128 | df['senti'] = -1
129 | df['senti'][df.score >= 7] = 1
130 | df['senti'][df.score <= 4] = 0
131 | df = df.loc[df['senti'] != -1]
132 | df = df.drop(columns=['score'])
133 |
134 | counts = dict(Counter(df.title.values))
135 | train_labels, dev_labels, test_labels = train_test_split(counts)
136 |
137 | # oversample in training
138 | train = df.loc[df['title'].isin(train_labels)]
139 | train = over_sample(train, 'senti')
140 |
141 | # oversample in dev
142 | dev = df.loc[df['title'].isin(dev_labels)]
143 | dev = over_sample(dev, 'senti')
144 |
145 | test = df.loc[df['title'].isin(test_labels)]
146 |
147 | train = train.drop(columns=['title'])
148 | dev = dev.drop(columns=['title'])
149 | test = test.drop(columns=['title'])
150 | df = df.drop(columns=['title'])
151 |
152 |
153 | train.to_csv('/home/ydu/BERT/DATA/metacritic/train.tsv', index=False, sep='\t')
154 | dev.to_csv('/home/ydu/BERT/DATA/metacritic/dev.tsv', index=False, sep='\t')
155 | test.to_csv('/home/ydu/BERT/DATA/metacritic/test.tsv', index=False, sep='\t')
156 | df.to_csv('/home/ydu/BERT/DATA/metacritic/all.tsv', index=False, sep='\t')
157 |
158 |
159 | def read_reddit(argv=None):
160 | df = pd.read_csv('/home/ydu/BERT/DATA/reddit/posts_with_ids.csv')
161 | df = df.dropna(subset=['text'])
162 | df['clean_text'] = df['text'].apply(lambda x: ' '.join(x.split()))
163 | df['visited'] = False
164 | df.set_index('post_id', inplace=True)
165 | df = df.drop(columns=['text'])
166 |
167 | df = df.to_dict(orient='index')
168 |
169 | nlp = spacy.load('en_core_web_sm')
170 |
171 | start_time = time.time()
172 |
173 | f = open('pretrain_data/txt/pretrain_texttree.txt', 'w')
174 |
175 | for k, _ in df.items():
176 | if not df[k]['visited']:
177 | doc = nlp(str(df[k]['clean_text']))
178 | for sent in doc.sents:
179 | f.write(str(sent).rstrip())
180 | f.write('\n')
181 | df[k]['visited'] = True
182 | get_child(df[k]['children_ids'], df, nlp, f)
183 |
184 | f.close()
185 |
186 | print("--- %s sec ---" % (time.time() - start_time))
187 |
188 |
189 | def read_amazon(argv=None):
190 | review_clean = clean('/home/ydu/BERT/DATA/amazon/aggressive_dedup_video_games.json', 'amazon')
191 | df = getDF(review_clean)
192 | df = df[['asin','reviewText','overall']]
193 | df.overall = df.overall.astype(int)
194 | df['senti'] = -1
195 | df['senti'][df.overall <= 2] = 0
196 | df['senti'][df.overall >= 4] = 1
197 | df = df.loc[df['senti'] != -1]
198 | df = df.drop(columns=['overall'])
199 |
200 | df = df.rename(columns={"reviewText": "text"})
201 |
202 | counts = dict(Counter(df.asin.values))
203 | train_labels, dev_labels, test_labels = train_test_split(counts)
204 |
205 | # oversample in training
206 | train = df.loc[df['asin'].isin(train_labels)]
207 | train = over_sample(train, 'senti')
208 | # oversample in dev
209 | dev = df.loc[df['asin'].isin(dev_labels)]
210 | dev = over_sample(dev, 'senti')
211 |
212 | test = df.loc[df['asin'].isin(test_labels)]
213 |
214 | train = train.drop(columns=['asin'])
215 | dev = dev.drop(columns=['asin'])
216 | test = test.drop(columns=['asin'])
217 | df = df.drop(columns=['asin'])
218 |
219 | train.to_csv('/home/ydu/BERT/DATA/amazon/train.tsv', index=False, sep='\t')
220 | dev.to_csv('/home/ydu/BERT/DATA/amazon/dev.tsv', index=False, sep='\t')
221 | test.to_csv('/home/ydu/BERT/DATA/amazon/test.tsv', index=False, sep='\t')
222 | df.to_csv('/home/ydu/BERT/DATA/amazon/all.tsv', index=False, sep='\t')
223 |
224 |
225 | # Load all files from a directory in a DataFrame.
226 | def load_directory_data(directory):
227 | data = {}
228 | data["text"] = []
229 | for file_path in os.listdir(directory):
230 | with tf.gfile.GFile(os.path.join(directory, file_path), "r") as f:
231 | data["text"].append(f.read())
232 | return pd.DataFrame.from_dict(data)
233 |
234 | # Merge positive and negative examples, add a polarity column and shuffle.
235 | def load_dataset(directory):
236 | pos_df = load_directory_data(os.path.join(directory, "pos"))
237 | neg_df = load_directory_data(os.path.join(directory, "neg"))
238 | pos_df["senti"] = 1
239 | neg_df["senti"] = 0
240 | return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)
241 |
242 | # Download and process the dataset files.
243 | def download_and_load_datasets(force_download=False):
244 | dataset = tf.keras.utils.get_file(
245 | fname="aclImdb.tar.gz",
246 | origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
247 | extract=True)
248 |
249 | train_df = load_dataset(os.path.join(os.path.dirname(dataset),
250 | "aclImdb", "train"))
251 | test_df = load_dataset(os.path.join(os.path.dirname(dataset),
252 | "aclImdb", "test"))
253 | df = pd.concat([train_df,test_df]).sample(frac=1).reset_index(drop=True)
254 |
255 | return train_df, test_df, df
256 |
257 |
258 | def read_imdb():
259 | train, dev, df = download_and_load_datasets()
260 | train.to_csv('/home/ydu/BERT/DATA/imdb/train.tsv', index=False, sep='\t')
261 | dev.to_csv('/home/ydu/BERT/DATA/imdb/dev.tsv', index=False, sep='\t')
262 | df.to_csv('/home/ydu/BERT/DATA/imdb/all.tsv', index=False, sep='\t')
263 |
264 |
265 | def read_amazon_xml(argv=None):
266 | filepath = '/home/ydu/BERT/bengio/sorted_data/'
267 | save_to = '/home/ydu/BERT/bengio/data_all/'
268 |
269 | f = open(save_to+'all_text/amazon_pretrain_text.txt', 'w')
270 |
271 | # get all review text for pre-training
272 | # num_l = 0
273 | # start_time = time.time()
274 |
275 | for folder in os.listdir(filepath):
276 | path = filepath + folder
277 | fn = path+'/all.review'
278 | print(fn)
279 |
280 | if os.path.exists(fn):
281 | with codecs.open(fn,'r+',encoding='utf-8', errors='ignore') as ff:
282 | test_data = ff.readlines()
283 |
284 | test_data = [line.rstrip() for line in test_data] # All lines including the blank ones
285 | test_data = [line for line in test_data if line] # Non-blank lines
286 |
287 | count = 0
288 | i=0
289 | while i < len(test_data):
290 | # start_time = time.time()
291 | # num_l += 1
292 | line = test_data[i]
293 | j = i+1
294 | i+=1
295 | if line == '':
296 | nextline = test_data[j]
297 | while nextline != '':
298 | f.write(nextline)
299 | f.write('\n')
300 | j+=1
301 | nextline = test_data[j]
302 | count+=1
303 | f.write('\n')
304 | i += count
305 | # if num_l % 10000 == 0:
306 | # print("--- %s sec ---" % (time.time() - start_time))
307 | # start_time = time.time()
308 |
309 | f.close()
310 |
311 | # raise SystemExit
312 |
313 | # get training data for classification using benchmark dataset
314 | category = ['books/','kitchen/', 'electronics/','dvd/']
315 | filename = ['negative.review','positive.review']
316 | senti = [0, 1]
317 |
318 | for c in category:
319 | path = filepath + c
320 | count = 0
321 | train = pd.DataFrame()
322 | # dev = pd.DataFrame()
323 | # test = pd.DataFrame()
324 |
325 | for i in range(len(filename)):
326 | fn = path + filename[i]
327 | print(fn)
328 | text = defaultdict(list)
329 | asin = []
330 | label = []
331 |
332 | with codecs.open(fn,'r+',encoding='utf-8', errors='ignore') as f:
333 | test_data = f.readlines()
334 |
335 | while test_data:
336 | line = test_data.pop(0).strip()
337 |
338 | if line == '':
339 | nextline = test_data.pop(0).strip()
340 | while nextline != '':
341 | asin.append(nextline)
342 | nextline = test_data.pop(0).strip()
343 | if line == '':
344 | nextline = test_data.pop(0).strip()
345 | while nextline != '':
346 | text[count].append(nextline)
347 | nextline = test_data.pop(0).strip()
348 | label.append(senti[i])
349 | count+=1
350 |
351 | for k, _ in text.items():
352 | text[k] = ''.join(text[k])
353 | df = pd.DataFrame.from_dict(text, orient='index')
354 | df = df.rename(columns={0: "text"})
355 | df['asin'] = asin
356 | df['senti'] = label
357 | df = df[['asin','text','senti']]
358 |
359 | # counts = dict(Counter(df.asin.values))
360 | # train_labels, dev_labels, test_labels = train_test_split(counts)
361 |
362 | # train = pd.concat([train, df.loc[df['asin'].isin(train_labels)]], ignore_index=True)
363 | # dev = pd.concat([dev, df.loc[df['asin'].isin(dev_labels)]], ignore_index=True)
364 | # test = pd.concat([test, df.loc[df['asin'].isin(test_labels)]], ignore_index=True)
365 |
366 | train = pd.concat([train, df], ignore_index=True)
367 |
368 | if not os.path.exists(save_to+c):
369 | os.makedirs(save_to+c)
370 |
371 | train.to_csv(save_to+c+'train.tsv', index=False, sep='\t')
372 |
373 | # train.to_csv(save_to+c+'train.tsv', index=False, sep='\t')
374 | # dev.to_csv(save_to+c+'dev.tsv', index=False, sep='\t')
375 | # test.to_csv(save_to+c+'test.tsv', index=False, sep='\t')
376 |
377 |
378 | def read_all_pretrain():
379 | dataset = ['amazon/', 'metacritic/','imdb/']
380 | filename = ['train.tsv','dev.tsv','test.tsv']
381 | path = '/home/ydu/BERT/DATA/'
382 | text = []
383 |
384 | for d in dataset:
385 | folder = path + d
386 | for f in filename:
387 | fn = folder + f
388 | if os.path.exists(fn):
389 | print(fn)
390 | df = pd.read_csv(fn, sep='\t')
391 | text.append(df[df.columns[0]].tolist())
392 | text = [t for sublist in text for t in sublist]
393 |
394 | nlp = spacy.load('en_core_web_sm')
395 |
396 | f = open('/home/ydu/BERT/DATA/all4data/all4data.txt', 'w')
397 |
398 | count=0
399 | start_time = time.time()
400 | for t in text:
401 | count+=1
402 | doc = nlp(str(t))
403 | for sent in doc.sents:
404 | f.write(str(sent).rstrip())
405 | f.write('\n')
406 | f.write('\n')
407 | if count%10000==0:
408 | print("--- %s sec ---" % (time.time() - start_time))
409 | start_time = time.time()
410 |
411 | f.close()
412 |
413 | # reddit data has tree structure,
414 | # append from pretrain_data/txt/pretrain_texttree.txt
415 | f = open('/home/ydu/BERT/DATA/all4data/all4data.txt', 'a')
416 | ff = open('/home/ydu/BERT/bert_mgpu/pretrain_data/txt/pretrain_texttree.txt', 'r')
417 |
418 | for line in ff:
419 | f.write(line)
420 |
421 | f.close()
422 | ff.close()
423 |
424 |
425 | def read_ami_train():
426 | dataset = ['amazon/', 'metacritic/','imdb/']
427 | filename = ['train.tsv','dev.tsv','test.tsv']
428 | path = '/home/ydu/BERT/DATA/'
429 |
430 | for f in filename:
431 | df = pd.DataFrame()
432 | for d in dataset:
433 | fn = path + d + f
434 | if os.path.exists(fn):
435 | print(fn)
436 | df = pd.concat([df, pd.read_csv(fn, sep='\t')]).sample(frac=1).reset_index(drop=True)
437 | df.to_csv(path+'all4data/'+f, index=False, sep='\t')
438 |
439 |
440 | def split_txt():
441 | lines_per_file = 3000000
442 | smallfile = None
443 |
444 | count=0
445 | start_time = time.time()
446 |
447 | with open('/home/ydu/BERT/DATA/all4data/all4data.txt') as bigfile:
448 | for lineno, line in enumerate(bigfile):
449 | if lineno % lines_per_file == 0:
450 | if smallfile:
451 | smallfile.close()
452 | small_filename = '/home/ydu/BERT/DATA/all4data/all4data_{}.txt'.format(lineno + lines_per_file)
453 | smallfile = open(small_filename, "w")
454 | smallfile.write(line)
455 |
456 | count+=1
457 | if count % 1000000 == 0:
458 | print("--- %s sec ---" % (time.time() - start_time))
459 | start_time = time.time()
460 |
461 | if smallfile:
462 | smallfile.close()
463 |
464 |
465 | if __name__ == '__main__':
466 | read_meta()
467 | # read_amazon()
468 | # read_imdb()
469 | # read_reddit()
470 |
471 | # read_all_pretrain() # read text from all 4 dataset as pre-train (no senti label)
472 | # split_txt() # naive split all4data.txt into small chunks
473 |
474 | # read_ami_train()
475 |
476 | # read_amazon_xml() # bengio's experiments
477 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow >= 1.11.0 # CPU Version of TensorFlow.
2 | # tensorflow-gpu >= 1.11.0 # GPU version of TensorFlow.
3 |
--------------------------------------------------------------------------------
/run_classifier.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """BERT finetuning runner."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import collections
22 | import csv
23 | import os
24 | import modeling
25 | import optimization
26 | import tokenization
27 | import tensorflow as tf
28 |
29 | flags = tf.flags
30 |
31 | FLAGS = flags.FLAGS
32 |
33 | ## Required parameters
34 | flags.DEFINE_string(
35 | "data_dir", None,
36 | "The input data dir. Should contain the .tsv files (or other data files) "
37 | "for the task.")
38 |
39 | flags.DEFINE_string(
40 | "bert_config_file", None,
41 | "The config json file corresponding to the pre-trained BERT model. "
42 | "This specifies the model architecture.")
43 |
44 | flags.DEFINE_string("task_name", None, "The name of the task to train.")
45 |
46 | flags.DEFINE_string("vocab_file", None,
47 | "The vocabulary file that the BERT model was trained on.")
48 |
49 | flags.DEFINE_string(
50 | "output_dir", None,
51 | "The output directory where the model checkpoints will be written.")
52 |
53 | ## Other parameters
54 |
55 | flags.DEFINE_string(
56 | "init_checkpoint", None,
57 | "Initial checkpoint (usually from a pre-trained BERT model).")
58 |
59 | flags.DEFINE_bool(
60 | "do_lower_case", True,
61 | "Whether to lower case the input text. Should be True for uncased "
62 | "models and False for cased models.")
63 |
64 | flags.DEFINE_integer(
65 | "max_seq_length", 128,
66 | "The maximum total input sequence length after WordPiece tokenization. "
67 | "Sequences longer than this will be truncated, and sequences shorter "
68 | "than this will be padded.")
69 |
70 | flags.DEFINE_bool("do_train", False, "Whether to run training.")
71 |
72 | flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
73 |
74 | flags.DEFINE_bool(
75 | "do_predict", False,
76 | "Whether to run the model in inference mode on the test set.")
77 |
78 | flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
79 |
80 | flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
81 |
82 | flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
83 |
84 | flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
85 |
86 | flags.DEFINE_float("num_train_epochs", 3.0,
87 | "Total number of training epochs to perform.")
88 |
89 | flags.DEFINE_float(
90 | "warmup_proportion", 0.1,
91 | "Proportion of training to perform linear learning rate warmup for. "
92 | "E.g., 0.1 = 10% of training.")
93 |
94 | flags.DEFINE_integer("save_checkpoints_steps", 1000,
95 | "How often to save the model checkpoint.")
96 |
97 | flags.DEFINE_integer("iterations_per_loop", 1000,
98 | "How many steps to make in each estimator call.")
99 |
100 | flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
101 |
102 | tf.flags.DEFINE_string(
103 | "tpu_name", None,
104 | "The Cloud TPU to use for training. This should be either the name "
105 | "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
106 | "url.")
107 |
108 | tf.flags.DEFINE_string(
109 | "tpu_zone", None,
110 | "[Optional] GCE zone where the Cloud TPU is located in. If not "
111 | "specified, we will attempt to automatically detect the GCE project from "
112 | "metadata.")
113 |
114 | tf.flags.DEFINE_string(
115 | "gcp_project", None,
116 | "[Optional] Project name for the Cloud TPU-enabled project. If not "
117 | "specified, we will attempt to automatically detect the GCE project from "
118 | "metadata.")
119 |
120 | tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
121 |
122 | flags.DEFINE_integer(
123 | "num_tpu_cores", 8,
124 | "Only used if `use_tpu` is True. Total number of TPU cores to use.")
125 |
126 |
127 | class InputExample(object):
128 | """A single training/test example for simple sequence classification."""
129 |
130 | def __init__(self, guid, text_a, text_b=None, label=None):
131 | """Constructs a InputExample.
132 |
133 | Args:
134 | guid: Unique id for the example.
135 | text_a: string. The untokenized text of the first sequence. For single
136 | sequence tasks, only this sequence must be specified.
137 | text_b: (Optional) string. The untokenized text of the second sequence.
138 | Only must be specified for sequence pair tasks.
139 | label: (Optional) string. The label of the example. This should be
140 | specified for train and dev examples, but not for test examples.
141 | """
142 | self.guid = guid
143 | self.text_a = text_a
144 | self.text_b = text_b
145 | self.label = label
146 |
147 |
148 | class PaddingInputExample(object):
149 | """Fake example so the num input examples is a multiple of the batch size.
150 |
151 | When running eval/predict on the TPU, we need to pad the number of examples
152 | to be a multiple of the batch size, because the TPU requires a fixed batch
153 | size. The alternative is to drop the last batch, which is bad because it means
154 | the entire output data won't be generated.
155 |
156 | We use this class instead of `None` because treating `None` as padding
157 | battches could cause silent errors.
158 | """
159 |
160 |
161 | class InputFeatures(object):
162 | """A single set of features of data."""
163 |
164 | def __init__(self,
165 | input_ids,
166 | input_mask,
167 | segment_ids,
168 | label_id,
169 | is_real_example=True):
170 | self.input_ids = input_ids
171 | self.input_mask = input_mask
172 | self.segment_ids = segment_ids
173 | self.label_id = label_id
174 | self.is_real_example = is_real_example
175 |
176 |
177 | class DataProcessor(object):
178 | """Base class for data converters for sequence classification data sets."""
179 |
180 | def get_train_examples(self, data_dir):
181 | """Gets a collection of `InputExample`s for the train set."""
182 | raise NotImplementedError()
183 |
184 | def get_dev_examples(self, data_dir):
185 | """Gets a collection of `InputExample`s for the dev set."""
186 | raise NotImplementedError()
187 |
188 | def get_test_examples(self, data_dir):
189 | """Gets a collection of `InputExample`s for prediction."""
190 | raise NotImplementedError()
191 |
192 | def get_labels(self):
193 | """Gets the list of labels for this data set."""
194 | raise NotImplementedError()
195 |
196 | @classmethod
197 | def _read_tsv(cls, input_file, quotechar=None):
198 | """Reads a tab separated value file."""
199 | with tf.gfile.Open(input_file, "r") as f:
200 | reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
201 | lines = []
202 | for line in reader:
203 | lines.append(line)
204 | return lines
205 |
206 |
207 | class XnliProcessor(DataProcessor):
208 | """Processor for the XNLI data set."""
209 |
210 | def __init__(self):
211 | self.language = "zh"
212 |
213 | def get_train_examples(self, data_dir):
214 | """See base class."""
215 | lines = self._read_tsv(
216 | os.path.join(data_dir, "multinli",
217 | "multinli.train.%s.tsv" % self.language))
218 | examples = []
219 | for (i, line) in enumerate(lines):
220 | if i == 0:
221 | continue
222 | guid = "train-%d" % (i)
223 | text_a = tokenization.convert_to_unicode(line[0])
224 | text_b = tokenization.convert_to_unicode(line[1])
225 | label = tokenization.convert_to_unicode(line[2])
226 | if label == tokenization.convert_to_unicode("contradictory"):
227 | label = tokenization.convert_to_unicode("contradiction")
228 | examples.append(
229 | InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
230 | return examples
231 |
232 | def get_dev_examples(self, data_dir):
233 | """See base class."""
234 | lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
235 | examples = []
236 | for (i, line) in enumerate(lines):
237 | if i == 0:
238 | continue
239 | guid = "dev-%d" % (i)
240 | language = tokenization.convert_to_unicode(line[0])
241 | if language != tokenization.convert_to_unicode(self.language):
242 | continue
243 | text_a = tokenization.convert_to_unicode(line[6])
244 | text_b = tokenization.convert_to_unicode(line[7])
245 | label = tokenization.convert_to_unicode(line[1])
246 | examples.append(
247 | InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
248 | return examples
249 |
250 | def get_labels(self):
251 | """See base class."""
252 | return ["contradiction", "entailment", "neutral"]
253 |
254 |
255 | class MnliProcessor(DataProcessor):
256 | """Processor for the MultiNLI data set (GLUE version)."""
257 |
258 | def get_train_examples(self, data_dir):
259 | """See base class."""
260 | return self._create_examples(
261 | self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
262 |
263 | def get_dev_examples(self, data_dir):
264 | """See base class."""
265 | return self._create_examples(
266 | self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
267 | "dev_matched")
268 |
269 | def get_test_examples(self, data_dir):
270 | """See base class."""
271 | return self._create_examples(
272 | self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
273 |
274 | def get_labels(self):
275 | """See base class."""
276 | return ["contradiction", "entailment", "neutral"]
277 |
278 | def _create_examples(self, lines, set_type):
279 | """Creates examples for the training and dev sets."""
280 | examples = []
281 | for (i, line) in enumerate(lines):
282 | if i == 0:
283 | continue
284 | guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
285 | text_a = tokenization.convert_to_unicode(line[8])
286 | text_b = tokenization.convert_to_unicode(line[9])
287 | if set_type == "test":
288 | label = "contradiction"
289 | else:
290 | label = tokenization.convert_to_unicode(line[-1])
291 | examples.append(
292 | InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
293 | return examples
294 |
295 |
296 | class MrpcProcessor(DataProcessor):
297 | """Processor for the MRPC data set (GLUE version)."""
298 |
299 | def get_train_examples(self, data_dir):
300 | """See base class."""
301 | return self._create_examples(
302 | self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
303 |
304 | def get_dev_examples(self, data_dir):
305 | """See base class."""
306 | return self._create_examples(
307 | self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
308 |
309 | def get_test_examples(self, data_dir):
310 | """See base class."""
311 | return self._create_examples(
312 | self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
313 |
314 | def get_labels(self):
315 | """See base class."""
316 | return ["0", "1"]
317 |
318 | def _create_examples(self, lines, set_type):
319 | """Creates examples for the training and dev sets."""
320 | examples = []
321 | for (i, line) in enumerate(lines):
322 | if i == 0:
323 | continue
324 | guid = "%s-%s" % (set_type, i)
325 | text_a = tokenization.convert_to_unicode(line[3])
326 | text_b = tokenization.convert_to_unicode(line[4])
327 | if set_type == "test":
328 | label = "0"
329 | else:
330 | label = tokenization.convert_to_unicode(line[0])
331 | examples.append(
332 | InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
333 | return examples
334 |
335 |
336 | class ColaProcessor(DataProcessor):
337 | """Processor for the CoLA data set (GLUE version)."""
338 |
339 | def get_train_examples(self, data_dir):
340 | """See base class."""
341 | return self._create_examples(
342 | self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
343 |
344 | def get_dev_examples(self, data_dir):
345 | """See base class."""
346 | return self._create_examples(
347 | self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
348 |
349 | def get_test_examples(self, data_dir):
350 | """See base class."""
351 | return self._create_examples(
352 | self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
353 |
354 | def get_labels(self):
355 | """See base class."""
356 | return ["0", "1"]
357 |
358 | def _create_examples(self, lines, set_type):
359 | """Creates examples for the training and dev sets."""
360 | examples = []
361 | for (i, line) in enumerate(lines):
362 | # Only the test set has a header
363 | if set_type == "test" and i == 0:
364 | continue
365 | guid = "%s-%s" % (set_type, i)
366 | if set_type == "test":
367 | text_a = tokenization.convert_to_unicode(line[1])
368 | label = "0"
369 | else:
370 | text_a = tokenization.convert_to_unicode(line[3])
371 | label = tokenization.convert_to_unicode(line[1])
372 | examples.append(
373 | InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
374 | return examples
375 |
376 |
377 | def convert_single_example(ex_index, example, label_list, max_seq_length,
378 | tokenizer):
379 | """Converts a single `InputExample` into a single `InputFeatures`."""
380 |
381 | if isinstance(example, PaddingInputExample):
382 | return InputFeatures(
383 | input_ids=[0] * max_seq_length,
384 | input_mask=[0] * max_seq_length,
385 | segment_ids=[0] * max_seq_length,
386 | label_id=0,
387 | is_real_example=False)
388 |
389 | label_map = {}
390 | for (i, label) in enumerate(label_list):
391 | label_map[label] = i
392 |
393 | tokens_a = tokenizer.tokenize(example.text_a)
394 | tokens_b = None
395 | if example.text_b:
396 | tokens_b = tokenizer.tokenize(example.text_b)
397 |
398 | if tokens_b:
399 | # Modifies `tokens_a` and `tokens_b` in place so that the total
400 | # length is less than the specified length.
401 | # Account for [CLS], [SEP], [SEP] with "- 3"
402 | _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
403 | else:
404 | # Account for [CLS] and [SEP] with "- 2"
405 | if len(tokens_a) > max_seq_length - 2:
406 | tokens_a = tokens_a[0:(max_seq_length - 2)]
407 |
408 | # The convention in BERT is:
409 | # (a) For sequence pairs:
410 | # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
411 | # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
412 | # (b) For single sequences:
413 | # tokens: [CLS] the dog is hairy . [SEP]
414 | # type_ids: 0 0 0 0 0 0 0
415 | #
416 | # Where "type_ids" are used to indicate whether this is the first
417 | # sequence or the second sequence. The embedding vectors for `type=0` and
418 | # `type=1` were learned during pre-training and are added to the wordpiece
419 | # embedding vector (and position vector). This is not *strictly* necessary
420 | # since the [SEP] token unambiguously separates the sequences, but it makes
421 | # it easier for the model to learn the concept of sequences.
422 | #
423 | # For classification tasks, the first vector (corresponding to [CLS]) is
424 | # used as the "sentence vector". Note that this only makes sense because
425 | # the entire model is fine-tuned.
426 | tokens = []
427 | segment_ids = []
428 | tokens.append("[CLS]")
429 | segment_ids.append(0)
430 | for token in tokens_a:
431 | tokens.append(token)
432 | segment_ids.append(0)
433 | tokens.append("[SEP]")
434 | segment_ids.append(0)
435 |
436 | if tokens_b:
437 | for token in tokens_b:
438 | tokens.append(token)
439 | segment_ids.append(1)
440 | tokens.append("[SEP]")
441 | segment_ids.append(1)
442 |
443 | input_ids = tokenizer.convert_tokens_to_ids(tokens)
444 |
445 | # The mask has 1 for real tokens and 0 for padding tokens. Only real
446 | # tokens are attended to.
447 | input_mask = [1] * len(input_ids)
448 |
449 | # Zero-pad up to the sequence length.
450 | while len(input_ids) < max_seq_length:
451 | input_ids.append(0)
452 | input_mask.append(0)
453 | segment_ids.append(0)
454 |
455 | assert len(input_ids) == max_seq_length
456 | assert len(input_mask) == max_seq_length
457 | assert len(segment_ids) == max_seq_length
458 |
459 | label_id = label_map[example.label]
460 | if ex_index < 5:
461 | tf.logging.info("*** Example ***")
462 | tf.logging.info("guid: %s" % (example.guid))
463 | tf.logging.info("tokens: %s" % " ".join(
464 | [tokenization.printable_text(x) for x in tokens]))
465 | tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
466 | tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
467 | tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
468 | tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
469 |
470 | feature = InputFeatures(
471 | input_ids=input_ids,
472 | input_mask=input_mask,
473 | segment_ids=segment_ids,
474 | label_id=label_id,
475 | is_real_example=True)
476 | return feature
477 |
478 |
479 | def file_based_convert_examples_to_features(
480 | examples, label_list, max_seq_length, tokenizer, output_file):
481 | """Convert a set of `InputExample`s to a TFRecord file."""
482 |
483 | writer = tf.python_io.TFRecordWriter(output_file)
484 |
485 | for (ex_index, example) in enumerate(examples):
486 | if ex_index % 10000 == 0:
487 | tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
488 |
489 | feature = convert_single_example(ex_index, example, label_list,
490 | max_seq_length, tokenizer)
491 |
492 | def create_int_feature(values):
493 | f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
494 | return f
495 |
496 | features = collections.OrderedDict()
497 | features["input_ids"] = create_int_feature(feature.input_ids)
498 | features["input_mask"] = create_int_feature(feature.input_mask)
499 | features["segment_ids"] = create_int_feature(feature.segment_ids)
500 | features["label_ids"] = create_int_feature([feature.label_id])
501 | features["is_real_example"] = create_int_feature(
502 | [int(feature.is_real_example)])
503 |
504 | tf_example = tf.train.Example(features=tf.train.Features(feature=features))
505 | writer.write(tf_example.SerializeToString())
506 | writer.close()
507 |
508 |
509 | def file_based_input_fn_builder(input_file, seq_length, is_training,
510 | drop_remainder):
511 | """Creates an `input_fn` closure to be passed to TPUEstimator."""
512 |
513 | name_to_features = {
514 | "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
515 | "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
516 | "segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
517 | "label_ids": tf.FixedLenFeature([], tf.int64),
518 | "is_real_example": tf.FixedLenFeature([], tf.int64),
519 | }
520 |
521 | def _decode_record(record, name_to_features):
522 | """Decodes a record to a TensorFlow example."""
523 | example = tf.parse_single_example(record, name_to_features)
524 |
525 | # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
526 | # So cast all int64 to int32.
527 | for name in list(example.keys()):
528 | t = example[name]
529 | if t.dtype == tf.int64:
530 | t = tf.to_int32(t)
531 | example[name] = t
532 |
533 | return example
534 |
535 | def input_fn(params):
536 | """The actual input function."""
537 | batch_size = params["batch_size"]
538 |
539 | # For training, we want a lot of parallel reading and shuffling.
540 | # For eval, we want no shuffling and parallel reading doesn't matter.
541 | d = tf.data.TFRecordDataset(input_file)
542 | if is_training:
543 | d = d.repeat()
544 | d = d.shuffle(buffer_size=100)
545 |
546 | d = d.apply(
547 | tf.contrib.data.map_and_batch(
548 | lambda record: _decode_record(record, name_to_features),
549 | batch_size=batch_size,
550 | drop_remainder=drop_remainder))
551 |
552 | return d
553 |
554 | return input_fn
555 |
556 |
557 | def _truncate_seq_pair(tokens_a, tokens_b, max_length):
558 | """Truncates a sequence pair in place to the maximum length."""
559 |
560 | # This is a simple heuristic which will always truncate the longer sequence
561 | # one token at a time. This makes more sense than truncating an equal percent
562 | # of tokens from each, since if one sequence is very short then each token
563 | # that's truncated likely contains more information than a longer sequence.
564 | while True:
565 | total_length = len(tokens_a) + len(tokens_b)
566 | if total_length <= max_length:
567 | break
568 | if len(tokens_a) > len(tokens_b):
569 | tokens_a.pop()
570 | else:
571 | tokens_b.pop()
572 |
573 |
574 | def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
575 | labels, num_labels, use_one_hot_embeddings):
576 | """Creates a classification model."""
577 | model = modeling.BertModel(
578 | config=bert_config,
579 | is_training=is_training,
580 | input_ids=input_ids,
581 | input_mask=input_mask,
582 | token_type_ids=segment_ids,
583 | use_one_hot_embeddings=use_one_hot_embeddings)
584 |
585 | # In the demo, we are doing a simple classification task on the entire
586 | # segment.
587 | #
588 | # If you want to use the token-level output, use model.get_sequence_output()
589 | # instead.
590 | output_layer = model.get_pooled_output()
591 |
592 | hidden_size = output_layer.shape[-1].value
593 |
594 | output_weights = tf.get_variable(
595 | "output_weights", [num_labels, hidden_size],
596 | initializer=tf.truncated_normal_initializer(stddev=0.02))
597 |
598 | output_bias = tf.get_variable(
599 | "output_bias", [num_labels], initializer=tf.zeros_initializer())
600 |
601 | with tf.variable_scope("loss"):
602 | if is_training:
603 | # I.e., 0.1 dropout
604 | output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
605 |
606 | logits = tf.matmul(output_layer, output_weights, transpose_b=True)
607 | logits = tf.nn.bias_add(logits, output_bias)
608 | probabilities = tf.nn.softmax(logits, axis=-1)
609 | log_probs = tf.nn.log_softmax(logits, axis=-1)
610 |
611 | one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
612 |
613 | per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
614 | loss = tf.reduce_mean(per_example_loss)
615 |
616 | return (loss, per_example_loss, logits, probabilities)
617 |
618 |
619 | def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
620 | num_train_steps, num_warmup_steps, use_tpu,
621 | use_one_hot_embeddings):
622 | """Returns `model_fn` closure for TPUEstimator."""
623 |
624 | def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
625 | """The `model_fn` for TPUEstimator."""
626 |
627 | tf.logging.info("*** Features ***")
628 | for name in sorted(features.keys()):
629 | tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
630 |
631 | input_ids = features["input_ids"]
632 | input_mask = features["input_mask"]
633 | segment_ids = features["segment_ids"]
634 | label_ids = features["label_ids"]
635 | is_real_example = None
636 | if "is_real_example" in features:
637 | is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
638 | else:
639 | is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
640 |
641 | is_training = (mode == tf.estimator.ModeKeys.TRAIN)
642 |
643 | (total_loss, per_example_loss, logits, probabilities) = create_model(
644 | bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
645 | num_labels, use_one_hot_embeddings)
646 |
647 | tvars = tf.trainable_variables()
648 | initialized_variable_names = {}
649 | scaffold_fn = None
650 | if init_checkpoint:
651 | (assignment_map, initialized_variable_names
652 | ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
653 | if use_tpu:
654 |
655 | def tpu_scaffold():
656 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
657 | return tf.train.Scaffold()
658 |
659 | scaffold_fn = tpu_scaffold
660 | else:
661 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
662 |
663 | tf.logging.info("**** Trainable Variables ****")
664 | for var in tvars:
665 | init_string = ""
666 | if var.name in initialized_variable_names:
667 | init_string = ", *INIT_FROM_CKPT*"
668 | tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
669 | init_string)
670 |
671 | output_spec = None
672 | if mode == tf.estimator.ModeKeys.TRAIN:
673 |
674 | train_op = optimization.create_optimizer(
675 | total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
676 |
677 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
678 | mode=mode,
679 | loss=total_loss,
680 | train_op=train_op,
681 | scaffold_fn=scaffold_fn)
682 | elif mode == tf.estimator.ModeKeys.EVAL:
683 |
684 | def metric_fn(per_example_loss, label_ids, logits, is_real_example):
685 | predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
686 | accuracy = tf.metrics.accuracy(
687 | labels=label_ids, predictions=predictions, weights=is_real_example)
688 | loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
689 | return {
690 | "eval_accuracy": accuracy,
691 | "eval_loss": loss,
692 | }
693 |
694 | eval_metrics = (metric_fn,
695 | [per_example_loss, label_ids, logits, is_real_example])
696 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
697 | mode=mode,
698 | loss=total_loss,
699 | eval_metrics=eval_metrics,
700 | scaffold_fn=scaffold_fn)
701 | else:
702 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
703 | mode=mode,
704 | predictions={"probabilities": probabilities},
705 | scaffold_fn=scaffold_fn)
706 | return output_spec
707 |
708 | return model_fn
709 |
710 |
711 | # This function is not used by this file but is still used by the Colab and
712 | # people who depend on it.
713 | def input_fn_builder(features, seq_length, is_training, drop_remainder):
714 | """Creates an `input_fn` closure to be passed to TPUEstimator."""
715 |
716 | all_input_ids = []
717 | all_input_mask = []
718 | all_segment_ids = []
719 | all_label_ids = []
720 |
721 | for feature in features:
722 | all_input_ids.append(feature.input_ids)
723 | all_input_mask.append(feature.input_mask)
724 | all_segment_ids.append(feature.segment_ids)
725 | all_label_ids.append(feature.label_id)
726 |
727 | def input_fn(params):
728 | """The actual input function."""
729 | batch_size = params["batch_size"]
730 |
731 | num_examples = len(features)
732 |
733 | # This is for demo purposes and does NOT scale to large data sets. We do
734 | # not use Dataset.from_generator() because that uses tf.py_func which is
735 | # not TPU compatible. The right way to load data is with TFRecordReader.
736 | d = tf.data.Dataset.from_tensor_slices({
737 | "input_ids":
738 | tf.constant(
739 | all_input_ids, shape=[num_examples, seq_length],
740 | dtype=tf.int32),
741 | "input_mask":
742 | tf.constant(
743 | all_input_mask,
744 | shape=[num_examples, seq_length],
745 | dtype=tf.int32),
746 | "segment_ids":
747 | tf.constant(
748 | all_segment_ids,
749 | shape=[num_examples, seq_length],
750 | dtype=tf.int32),
751 | "label_ids":
752 | tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
753 | })
754 |
755 | if is_training:
756 | d = d.repeat()
757 | d = d.shuffle(buffer_size=100)
758 |
759 | d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
760 | return d
761 |
762 | return input_fn
763 |
764 |
765 | # This function is not used by this file but is still used by the Colab and
766 | # people who depend on it.
767 | def convert_examples_to_features(examples, label_list, max_seq_length,
768 | tokenizer):
769 | """Convert a set of `InputExample`s to a list of `InputFeatures`."""
770 |
771 | features = []
772 | for (ex_index, example) in enumerate(examples):
773 | if ex_index % 10000 == 0:
774 | tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
775 |
776 | feature = convert_single_example(ex_index, example, label_list,
777 | max_seq_length, tokenizer)
778 |
779 | features.append(feature)
780 | return features
781 |
782 |
783 | def main(_):
784 | tf.logging.set_verbosity(tf.logging.INFO)
785 |
786 | processors = {
787 | "cola": ColaProcessor,
788 | "mnli": MnliProcessor,
789 | "mrpc": MrpcProcessor,
790 | "xnli": XnliProcessor,
791 | }
792 |
793 | tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
794 | FLAGS.init_checkpoint)
795 |
796 | if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
797 | raise ValueError(
798 | "At least one of `do_train`, `do_eval` or `do_predict' must be True.")
799 |
800 | bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
801 |
802 | if FLAGS.max_seq_length > bert_config.max_position_embeddings:
803 | raise ValueError(
804 | "Cannot use sequence length %d because the BERT model "
805 | "was only trained up to sequence length %d" %
806 | (FLAGS.max_seq_length, bert_config.max_position_embeddings))
807 |
808 | tf.gfile.MakeDirs(FLAGS.output_dir)
809 |
810 | task_name = FLAGS.task_name.lower()
811 |
812 | if task_name not in processors:
813 | raise ValueError("Task not found: %s" % (task_name))
814 |
815 | processor = processors[task_name]()
816 |
817 | label_list = processor.get_labels()
818 |
819 | tokenizer = tokenization.FullTokenizer(
820 | vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
821 |
822 | tpu_cluster_resolver = None
823 | if FLAGS.use_tpu and FLAGS.tpu_name:
824 | tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
825 | FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
826 |
827 | is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
828 | run_config = tf.contrib.tpu.RunConfig(
829 | cluster=tpu_cluster_resolver,
830 | master=FLAGS.master,
831 | model_dir=FLAGS.output_dir,
832 | save_checkpoints_steps=FLAGS.save_checkpoints_steps,
833 | tpu_config=tf.contrib.tpu.TPUConfig(
834 | iterations_per_loop=FLAGS.iterations_per_loop,
835 | num_shards=FLAGS.num_tpu_cores,
836 | per_host_input_for_training=is_per_host))
837 |
838 | train_examples = None
839 | num_train_steps = None
840 | num_warmup_steps = None
841 | if FLAGS.do_train:
842 | train_examples = processor.get_train_examples(FLAGS.data_dir)
843 | num_train_steps = int(
844 | len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
845 | num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
846 |
847 | model_fn = model_fn_builder(
848 | bert_config=bert_config,
849 | num_labels=len(label_list),
850 | init_checkpoint=FLAGS.init_checkpoint,
851 | learning_rate=FLAGS.learning_rate,
852 | num_train_steps=num_train_steps,
853 | num_warmup_steps=num_warmup_steps,
854 | use_tpu=FLAGS.use_tpu,
855 | use_one_hot_embeddings=FLAGS.use_tpu)
856 |
857 | # If TPU is not available, this will fall back to normal Estimator on CPU
858 | # or GPU.
859 | estimator = tf.contrib.tpu.TPUEstimator(
860 | use_tpu=FLAGS.use_tpu,
861 | model_fn=model_fn,
862 | config=run_config,
863 | train_batch_size=FLAGS.train_batch_size,
864 | eval_batch_size=FLAGS.eval_batch_size,
865 | predict_batch_size=FLAGS.predict_batch_size)
866 |
867 | if FLAGS.do_train:
868 | train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
869 | file_based_convert_examples_to_features(
870 | train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
871 | tf.logging.info("***** Running training *****")
872 | tf.logging.info(" Num examples = %d", len(train_examples))
873 | tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
874 | tf.logging.info(" Num steps = %d", num_train_steps)
875 | train_input_fn = file_based_input_fn_builder(
876 | input_file=train_file,
877 | seq_length=FLAGS.max_seq_length,
878 | is_training=True,
879 | drop_remainder=True)
880 | estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
881 |
882 | if FLAGS.do_eval:
883 | eval_examples = processor.get_dev_examples(FLAGS.data_dir)
884 | num_actual_eval_examples = len(eval_examples)
885 | if FLAGS.use_tpu:
886 | # TPU requires a fixed batch size for all batches, therefore the number
887 | # of examples must be a multiple of the batch size, or else examples
888 | # will get dropped. So we pad with fake examples which are ignored
889 | # later on. These do NOT count towards the metric (all tf.metrics
890 | # support a per-instance weight, and these get a weight of 0.0).
891 | while len(eval_examples) % FLAGS.eval_batch_size != 0:
892 | eval_examples.append(PaddingInputExample())
893 |
894 | eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
895 | file_based_convert_examples_to_features(
896 | eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
897 |
898 | tf.logging.info("***** Running evaluation *****")
899 | tf.logging.info(" Num examples = %d (%d actual, %d padding)",
900 | len(eval_examples), num_actual_eval_examples,
901 | len(eval_examples) - num_actual_eval_examples)
902 | tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
903 |
904 | # This tells the estimator to run through the entire set.
905 | eval_steps = None
906 | # However, if running eval on the TPU, you will need to specify the
907 | # number of steps.
908 | if FLAGS.use_tpu:
909 | assert len(eval_examples) % FLAGS.eval_batch_size == 0
910 | eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
911 |
912 | eval_drop_remainder = True if FLAGS.use_tpu else False
913 | eval_input_fn = file_based_input_fn_builder(
914 | input_file=eval_file,
915 | seq_length=FLAGS.max_seq_length,
916 | is_training=False,
917 | drop_remainder=eval_drop_remainder)
918 |
919 | result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
920 |
921 | output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
922 | with tf.gfile.GFile(output_eval_file, "w") as writer:
923 | tf.logging.info("***** Eval results *****")
924 | for key in sorted(result.keys()):
925 | tf.logging.info(" %s = %s", key, str(result[key]))
926 | writer.write("%s = %s\n" % (key, str(result[key])))
927 |
928 | if FLAGS.do_predict:
929 | predict_examples = processor.get_test_examples(FLAGS.data_dir)
930 | num_actual_predict_examples = len(predict_examples)
931 | if FLAGS.use_tpu:
932 | # TPU requires a fixed batch size for all batches, therefore the number
933 | # of examples must be a multiple of the batch size, or else examples
934 | # will get dropped. So we pad with fake examples which are ignored
935 | # later on.
936 | while len(predict_examples) % FLAGS.predict_batch_size != 0:
937 | predict_examples.append(PaddingInputExample())
938 |
939 | predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
940 | file_based_convert_examples_to_features(predict_examples, label_list,
941 | FLAGS.max_seq_length, tokenizer,
942 | predict_file)
943 |
944 | tf.logging.info("***** Running prediction*****")
945 | tf.logging.info(" Num examples = %d (%d actual, %d padding)",
946 | len(predict_examples), num_actual_predict_examples,
947 | len(predict_examples) - num_actual_predict_examples)
948 | tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
949 |
950 | predict_drop_remainder = True if FLAGS.use_tpu else False
951 | predict_input_fn = file_based_input_fn_builder(
952 | input_file=predict_file,
953 | seq_length=FLAGS.max_seq_length,
954 | is_training=False,
955 | drop_remainder=predict_drop_remainder)
956 |
957 | result = estimator.predict(input_fn=predict_input_fn)
958 |
959 | output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
960 | with tf.gfile.GFile(output_predict_file, "w") as writer:
961 | num_written_lines = 0
962 | tf.logging.info("***** Predict results *****")
963 | for (i, prediction) in enumerate(result):
964 | probabilities = prediction["probabilities"]
965 | if i >= num_actual_predict_examples:
966 | break
967 | output_line = "\t".join(
968 | str(class_probability)
969 | for class_probability in probabilities) + "\n"
970 | writer.write(output_line)
971 | num_written_lines += 1
972 | assert num_written_lines == num_actual_predict_examples
973 |
974 |
975 | if __name__ == "__main__":
976 | flags.mark_flag_as_required("data_dir")
977 | flags.mark_flag_as_required("task_name")
978 | flags.mark_flag_as_required("vocab_file")
979 | flags.mark_flag_as_required("bert_config_file")
980 | flags.mark_flag_as_required("output_dir")
981 | tf.app.run()
982 |
--------------------------------------------------------------------------------
/run_meta.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # TO TRAIN
4 | export BERT_BASE_DIR=/uncased_L-12_H-768_A-12 # download BERT model from Google repo first
5 | export DATA_DIR=/data/metacritic
6 |
7 | # the following line runs 4 workers (if you have multiple GPUS)
8 | mpirun -np 4 \
9 | -H localhost:4 \
10 | -bind-to none -map-by slot \
11 | -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \
12 | -mca pml ob1 -mca btl ^openib \
13 | python run_classifier_hvd.py \
14 | --task_name=meta \
15 | --do_train=True \
16 | --do_eval=True \
17 | --data_dir=$DATA_DIR \
18 | --vocab_file=$BERT_BASE_DIR/vocab.txt \
19 | --bert_config_file=$BERT_BASE_DIR/bert_config.json \
20 | --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
21 | --max_seq_length=128 \
22 | --train_batch_size=32 \
23 | --learning_rate=2e-5 \
24 | --num_train_epochs=4.0 \
25 | --output_dir=results/
26 | # you can also freeze the BERT layers by adding --freeze=True
--------------------------------------------------------------------------------
/run_predict.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # TO PREDICT
4 | export BERT_BASE_DIR=/uncased_L-12_H-768_A-12 # download BERT model from Google repo first
5 | export DATA_DIR= # the data you want use for prediction
6 | export TRAINED_CLASSIFIER= # directory of your trained model, for example, reddit data
7 |
8 | # the following line runs 4 workers (if you have multiple GPUS)
9 | mpirun -np 4 \
10 | -H localhost:4 \
11 | -bind-to none -map-by slot \
12 | -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \
13 | -mca pml ob1 -mca btl ^openib \
14 | python run_classifier_hvd.py \
15 | --task_name=reddit \
16 | --do_predict=True \
17 | --data_dir=$DATA_DIR \
18 | --vocab_file=$BERT_BASE_DIR/vocab.txt \
19 | --bert_config_file=$BERT_BASE_DIR/bert_config.json \
20 | --init_checkpoint=$TRAINED_CLASSIFIER \
21 | --max_seq_length=128 \
22 | --output_dir=results/
--------------------------------------------------------------------------------
/run_pretraining.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Run masked LM/next sentence masked_lm pre-training for BERT."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import os
22 | import modeling
23 | import optimization
24 | import tensorflow as tf
25 |
26 | flags = tf.flags
27 |
28 | FLAGS = flags.FLAGS
29 |
30 | ## Required parameters
31 | flags.DEFINE_string(
32 | "bert_config_file", None,
33 | "The config json file corresponding to the pre-trained BERT model. "
34 | "This specifies the model architecture.")
35 |
36 | flags.DEFINE_string(
37 | "input_file", None,
38 | "Input TF example files (can be a glob or comma separated).")
39 |
40 | flags.DEFINE_string(
41 | "output_dir", None,
42 | "The output directory where the model checkpoints will be written.")
43 |
44 | ## Other parameters
45 | flags.DEFINE_string(
46 | "init_checkpoint", None,
47 | "Initial checkpoint (usually from a pre-trained BERT model).")
48 |
49 | flags.DEFINE_integer(
50 | "max_seq_length", 128,
51 | "The maximum total input sequence length after WordPiece tokenization. "
52 | "Sequences longer than this will be truncated, and sequences shorter "
53 | "than this will be padded. Must match data generation.")
54 |
55 | flags.DEFINE_integer(
56 | "max_predictions_per_seq", 20,
57 | "Maximum number of masked LM predictions per sequence. "
58 | "Must match data generation.")
59 |
60 | flags.DEFINE_bool("do_train", False, "Whether to run training.")
61 |
62 | flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
63 |
64 | flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
65 |
66 | flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
67 |
68 | flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
69 |
70 | flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
71 |
72 | flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
73 |
74 | flags.DEFINE_integer("save_checkpoints_steps", 1000,
75 | "How often to save the model checkpoint.")
76 |
77 | flags.DEFINE_integer("iterations_per_loop", 1000,
78 | "How many steps to make in each estimator call.")
79 |
80 | flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
81 |
82 | flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
83 |
84 | tf.flags.DEFINE_string(
85 | "tpu_name", None,
86 | "The Cloud TPU to use for training. This should be either the name "
87 | "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
88 | "url.")
89 |
90 | tf.flags.DEFINE_string(
91 | "tpu_zone", None,
92 | "[Optional] GCE zone where the Cloud TPU is located in. If not "
93 | "specified, we will attempt to automatically detect the GCE project from "
94 | "metadata.")
95 |
96 | tf.flags.DEFINE_string(
97 | "gcp_project", None,
98 | "[Optional] Project name for the Cloud TPU-enabled project. If not "
99 | "specified, we will attempt to automatically detect the GCE project from "
100 | "metadata.")
101 |
102 | tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
103 |
104 | flags.DEFINE_integer(
105 | "num_tpu_cores", 8,
106 | "Only used if `use_tpu` is True. Total number of TPU cores to use.")
107 |
108 |
109 | def model_fn_builder(bert_config, init_checkpoint, learning_rate,
110 | num_train_steps, num_warmup_steps, use_tpu,
111 | use_one_hot_embeddings):
112 | """Returns `model_fn` closure for TPUEstimator."""
113 |
114 | def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
115 | """The `model_fn` for TPUEstimator."""
116 |
117 | tf.logging.info("*** Features ***")
118 | for name in sorted(features.keys()):
119 | tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
120 |
121 | input_ids = features["input_ids"]
122 | input_mask = features["input_mask"]
123 | segment_ids = features["segment_ids"]
124 | masked_lm_positions = features["masked_lm_positions"]
125 | masked_lm_ids = features["masked_lm_ids"]
126 | masked_lm_weights = features["masked_lm_weights"]
127 | next_sentence_labels = features["next_sentence_labels"]
128 |
129 | is_training = (mode == tf.estimator.ModeKeys.TRAIN)
130 |
131 | model = modeling.BertModel(
132 | config=bert_config,
133 | is_training=is_training,
134 | input_ids=input_ids,
135 | input_mask=input_mask,
136 | token_type_ids=segment_ids,
137 | use_one_hot_embeddings=use_one_hot_embeddings)
138 |
139 | (masked_lm_loss,
140 | masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
141 | bert_config, model.get_sequence_output(), model.get_embedding_table(),
142 | masked_lm_positions, masked_lm_ids, masked_lm_weights)
143 |
144 | (next_sentence_loss, next_sentence_example_loss,
145 | next_sentence_log_probs) = get_next_sentence_output(
146 | bert_config, model.get_pooled_output(), next_sentence_labels)
147 |
148 | total_loss = masked_lm_loss + next_sentence_loss
149 |
150 | tvars = tf.trainable_variables()
151 |
152 | initialized_variable_names = {}
153 | scaffold_fn = None
154 | if init_checkpoint:
155 | (assignment_map, initialized_variable_names
156 | ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
157 | if use_tpu:
158 |
159 | def tpu_scaffold():
160 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
161 | return tf.train.Scaffold()
162 |
163 | scaffold_fn = tpu_scaffold
164 | else:
165 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
166 |
167 | tf.logging.info("**** Trainable Variables ****")
168 | for var in tvars:
169 | init_string = ""
170 | if var.name in initialized_variable_names:
171 | init_string = ", *INIT_FROM_CKPT*"
172 | tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
173 | init_string)
174 |
175 | output_spec = None
176 | if mode == tf.estimator.ModeKeys.TRAIN:
177 | train_op = optimization.create_optimizer(
178 | total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
179 |
180 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
181 | mode=mode,
182 | loss=total_loss,
183 | train_op=train_op,
184 | scaffold_fn=scaffold_fn)
185 | elif mode == tf.estimator.ModeKeys.EVAL:
186 |
187 | def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
188 | masked_lm_weights, next_sentence_example_loss,
189 | next_sentence_log_probs, next_sentence_labels):
190 | """Computes the loss and accuracy of the model."""
191 | masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
192 | [-1, masked_lm_log_probs.shape[-1]])
193 | masked_lm_predictions = tf.argmax(
194 | masked_lm_log_probs, axis=-1, output_type=tf.int32)
195 | masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
196 | masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
197 | masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
198 | masked_lm_accuracy = tf.metrics.accuracy(
199 | labels=masked_lm_ids,
200 | predictions=masked_lm_predictions,
201 | weights=masked_lm_weights)
202 | masked_lm_mean_loss = tf.metrics.mean(
203 | values=masked_lm_example_loss, weights=masked_lm_weights)
204 |
205 | next_sentence_log_probs = tf.reshape(
206 | next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
207 | next_sentence_predictions = tf.argmax(
208 | next_sentence_log_probs, axis=-1, output_type=tf.int32)
209 | next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
210 | next_sentence_accuracy = tf.metrics.accuracy(
211 | labels=next_sentence_labels, predictions=next_sentence_predictions)
212 | next_sentence_mean_loss = tf.metrics.mean(
213 | values=next_sentence_example_loss)
214 |
215 | return {
216 | "masked_lm_accuracy": masked_lm_accuracy,
217 | "masked_lm_loss": masked_lm_mean_loss,
218 | "next_sentence_accuracy": next_sentence_accuracy,
219 | "next_sentence_loss": next_sentence_mean_loss,
220 | }
221 |
222 | eval_metrics = (metric_fn, [
223 | masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
224 | masked_lm_weights, next_sentence_example_loss,
225 | next_sentence_log_probs, next_sentence_labels
226 | ])
227 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
228 | mode=mode,
229 | loss=total_loss,
230 | eval_metrics=eval_metrics,
231 | scaffold_fn=scaffold_fn)
232 | else:
233 | raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
234 |
235 | return output_spec
236 |
237 | return model_fn
238 |
239 |
240 | def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
241 | label_ids, label_weights):
242 | """Get loss and log probs for the masked LM."""
243 | input_tensor = gather_indexes(input_tensor, positions)
244 |
245 | with tf.variable_scope("cls/predictions"):
246 | # We apply one more non-linear transformation before the output layer.
247 | # This matrix is not used after pre-training.
248 | with tf.variable_scope("transform"):
249 | input_tensor = tf.layers.dense(
250 | input_tensor,
251 | units=bert_config.hidden_size,
252 | activation=modeling.get_activation(bert_config.hidden_act),
253 | kernel_initializer=modeling.create_initializer(
254 | bert_config.initializer_range))
255 | input_tensor = modeling.layer_norm(input_tensor)
256 |
257 | # The output weights are the same as the input embeddings, but there is
258 | # an output-only bias for each token.
259 | output_bias = tf.get_variable(
260 | "output_bias",
261 | shape=[bert_config.vocab_size],
262 | initializer=tf.zeros_initializer())
263 | logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
264 | logits = tf.nn.bias_add(logits, output_bias)
265 | log_probs = tf.nn.log_softmax(logits, axis=-1)
266 |
267 | label_ids = tf.reshape(label_ids, [-1])
268 | label_weights = tf.reshape(label_weights, [-1])
269 |
270 | one_hot_labels = tf.one_hot(
271 | label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
272 |
273 | # The `positions` tensor might be zero-padded (if the sequence is too
274 | # short to have the maximum number of predictions). The `label_weights`
275 | # tensor has a value of 1.0 for every real prediction and 0.0 for the
276 | # padding predictions.
277 | per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
278 | numerator = tf.reduce_sum(label_weights * per_example_loss)
279 | denominator = tf.reduce_sum(label_weights) + 1e-5
280 | loss = numerator / denominator
281 |
282 | return (loss, per_example_loss, log_probs)
283 |
284 |
285 | def get_next_sentence_output(bert_config, input_tensor, labels):
286 | """Get loss and log probs for the next sentence prediction."""
287 |
288 | # Simple binary classification. Note that 0 is "next sentence" and 1 is
289 | # "random sentence". This weight matrix is not used after pre-training.
290 | with tf.variable_scope("cls/seq_relationship"):
291 | output_weights = tf.get_variable(
292 | "output_weights",
293 | shape=[2, bert_config.hidden_size],
294 | initializer=modeling.create_initializer(bert_config.initializer_range))
295 | output_bias = tf.get_variable(
296 | "output_bias", shape=[2], initializer=tf.zeros_initializer())
297 |
298 | logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
299 | logits = tf.nn.bias_add(logits, output_bias)
300 | log_probs = tf.nn.log_softmax(logits, axis=-1)
301 | labels = tf.reshape(labels, [-1])
302 | one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
303 | per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
304 | loss = tf.reduce_mean(per_example_loss)
305 | return (loss, per_example_loss, log_probs)
306 |
307 |
308 | def gather_indexes(sequence_tensor, positions):
309 | """Gathers the vectors at the specific positions over a minibatch."""
310 | sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
311 | batch_size = sequence_shape[0]
312 | seq_length = sequence_shape[1]
313 | width = sequence_shape[2]
314 |
315 | flat_offsets = tf.reshape(
316 | tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
317 | flat_positions = tf.reshape(positions + flat_offsets, [-1])
318 | flat_sequence_tensor = tf.reshape(sequence_tensor,
319 | [batch_size * seq_length, width])
320 | output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
321 | return output_tensor
322 |
323 |
324 | def input_fn_builder(input_files,
325 | max_seq_length,
326 | max_predictions_per_seq,
327 | is_training,
328 | num_cpu_threads=4):
329 | """Creates an `input_fn` closure to be passed to TPUEstimator."""
330 |
331 | def input_fn(params):
332 | """The actual input function."""
333 | batch_size = params["batch_size"]
334 |
335 | name_to_features = {
336 | "input_ids":
337 | tf.FixedLenFeature([max_seq_length], tf.int64),
338 | "input_mask":
339 | tf.FixedLenFeature([max_seq_length], tf.int64),
340 | "segment_ids":
341 | tf.FixedLenFeature([max_seq_length], tf.int64),
342 | "masked_lm_positions":
343 | tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
344 | "masked_lm_ids":
345 | tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
346 | "masked_lm_weights":
347 | tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
348 | "next_sentence_labels":
349 | tf.FixedLenFeature([1], tf.int64),
350 | }
351 |
352 | # For training, we want a lot of parallel reading and shuffling.
353 | # For eval, we want no shuffling and parallel reading doesn't matter.
354 | if is_training:
355 | d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
356 | d = d.repeat()
357 | d = d.shuffle(buffer_size=len(input_files))
358 |
359 | # `cycle_length` is the number of parallel files that get read.
360 | cycle_length = min(num_cpu_threads, len(input_files))
361 |
362 | # `sloppy` mode means that the interleaving is not exact. This adds
363 | # even more randomness to the training pipeline.
364 | d = d.apply(
365 | tf.contrib.data.parallel_interleave(
366 | tf.data.TFRecordDataset,
367 | sloppy=is_training,
368 | cycle_length=cycle_length))
369 | d = d.shuffle(buffer_size=100)
370 | else:
371 | d = tf.data.TFRecordDataset(input_files)
372 | # Since we evaluate for a fixed number of steps we don't want to encounter
373 | # out-of-range exceptions.
374 | d = d.repeat()
375 |
376 | # We must `drop_remainder` on training because the TPU requires fixed
377 | # size dimensions. For eval, we assume we are evaluating on the CPU or GPU
378 | # and we *don't* want to drop the remainder, otherwise we wont cover
379 | # every sample.
380 | d = d.apply(
381 | tf.contrib.data.map_and_batch(
382 | lambda record: _decode_record(record, name_to_features),
383 | batch_size=batch_size,
384 | num_parallel_batches=num_cpu_threads,
385 | drop_remainder=True))
386 | return d
387 |
388 | return input_fn
389 |
390 |
391 | def _decode_record(record, name_to_features):
392 | """Decodes a record to a TensorFlow example."""
393 | example = tf.parse_single_example(record, name_to_features)
394 |
395 | # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
396 | # So cast all int64 to int32.
397 | for name in list(example.keys()):
398 | t = example[name]
399 | if t.dtype == tf.int64:
400 | t = tf.to_int32(t)
401 | example[name] = t
402 |
403 | return example
404 |
405 |
406 | def main(_):
407 | tf.logging.set_verbosity(tf.logging.INFO)
408 |
409 | if not FLAGS.do_train and not FLAGS.do_eval:
410 | raise ValueError("At least one of `do_train` or `do_eval` must be True.")
411 |
412 | bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
413 |
414 | tf.gfile.MakeDirs(FLAGS.output_dir)
415 |
416 | input_files = []
417 | for input_pattern in FLAGS.input_file.split(","):
418 | input_files.extend(tf.gfile.Glob(input_pattern))
419 |
420 | tf.logging.info("*** Input Files ***")
421 | for input_file in input_files:
422 | tf.logging.info(" %s" % input_file)
423 |
424 | tpu_cluster_resolver = None
425 | if FLAGS.use_tpu and FLAGS.tpu_name:
426 | tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
427 | FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
428 |
429 | is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
430 | run_config = tf.contrib.tpu.RunConfig(
431 | cluster=tpu_cluster_resolver,
432 | master=FLAGS.master,
433 | model_dir=FLAGS.output_dir,
434 | save_checkpoints_steps=FLAGS.save_checkpoints_steps,
435 | tpu_config=tf.contrib.tpu.TPUConfig(
436 | iterations_per_loop=FLAGS.iterations_per_loop,
437 | num_shards=FLAGS.num_tpu_cores,
438 | per_host_input_for_training=is_per_host))
439 |
440 | model_fn = model_fn_builder(
441 | bert_config=bert_config,
442 | init_checkpoint=FLAGS.init_checkpoint,
443 | learning_rate=FLAGS.learning_rate,
444 | num_train_steps=FLAGS.num_train_steps,
445 | num_warmup_steps=FLAGS.num_warmup_steps,
446 | use_tpu=FLAGS.use_tpu,
447 | use_one_hot_embeddings=FLAGS.use_tpu)
448 |
449 | # If TPU is not available, this will fall back to normal Estimator on CPU
450 | # or GPU.
451 | estimator = tf.contrib.tpu.TPUEstimator(
452 | use_tpu=FLAGS.use_tpu,
453 | model_fn=model_fn,
454 | config=run_config,
455 | train_batch_size=FLAGS.train_batch_size,
456 | eval_batch_size=FLAGS.eval_batch_size)
457 |
458 | if FLAGS.do_train:
459 | tf.logging.info("***** Running training *****")
460 | tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
461 | train_input_fn = input_fn_builder(
462 | input_files=input_files,
463 | max_seq_length=FLAGS.max_seq_length,
464 | max_predictions_per_seq=FLAGS.max_predictions_per_seq,
465 | is_training=True)
466 | estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
467 |
468 | if FLAGS.do_eval:
469 | tf.logging.info("***** Running evaluation *****")
470 | tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
471 |
472 | eval_input_fn = input_fn_builder(
473 | input_files=input_files,
474 | max_seq_length=FLAGS.max_seq_length,
475 | max_predictions_per_seq=FLAGS.max_predictions_per_seq,
476 | is_training=False)
477 |
478 | result = estimator.evaluate(
479 | input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
480 |
481 | output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
482 | with tf.gfile.GFile(output_eval_file, "w") as writer:
483 | tf.logging.info("***** Eval results *****")
484 | for key in sorted(result.keys()):
485 | tf.logging.info(" %s = %s", key, str(result[key]))
486 | writer.write("%s = %s\n" % (key, str(result[key])))
487 |
488 |
489 | if __name__ == "__main__":
490 | flags.mark_flag_as_required("input_file")
491 | flags.mark_flag_as_required("bert_config_file")
492 | flags.mark_flag_as_required("output_dir")
493 | tf.app.run()
494 |
--------------------------------------------------------------------------------
/run_pretraining.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # TO CREATE PRETRAINNING DATA
4 | # This outputs tfrecord file
5 | export BERT_BASE_DIR=/uncased_L-12_H-768_A-12 # download BERT model from Google repo first
6 | export DATA_DIR=/pretrain_data/reddit_pretraintext.txt
7 | export OUT_DATA_DIR=pretrain_results/
8 |
9 | python create_pretraining_data.py \
10 | --input_file=$DATA_DIR \
11 | --output_file=$OUT_DATA_DIR \
12 | --vocab_file=$BERT_BASE_DIR/vocab.txt \
13 | --do_lower_case=True \
14 | --max_seq_length=128 \
15 | --max_predictions_per_seq=20 \
16 | --masked_lm_prob=0.15 \
17 | --random_seed=12345 \
18 | --dupe_factor=3
19 |
20 |
21 | # TO RUN PRETRAINING
22 | export BERT_BASE_DIR=/home/ydu/BERT/uncased_L-12_H-768_A-12 # download BERT model from Google repo first
23 | export INPUT_DIR=pretrain_results/ # where your $OUT_DATA_DIR is
24 | export DATA_DIR=model_results/ # where you want to save the pretraining model
25 |
26 | # the following line runs 4 workers (if you have multiple GPUS)
27 | mpirun -np 4 \
28 | -H localhost:4 \
29 | -bind-to none -map-by slot \
30 | -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \
31 | -mca pml ob1 -mca btl ^openib \
32 | python run_pretraining_hvd.py \
33 | --input_file=$INPUT_DIR/tf_examples.tfrecord \
34 | --output_dir=$DATA_DIR/ \
35 | --do_train=True \
36 | --do_eval=True \
37 | --bert_config_file=$BERT_BASE_DIR/bert_config.json \
38 | --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
39 | --train_batch_size=32 \
40 | --max_seq_length=128 \
41 | --max_predictions_per_seq=20 \
42 | --num_train_steps=10000 \
43 | --num_warmup_steps=10 \
44 | --learning_rate=2e-5 \
--------------------------------------------------------------------------------
/run_pretraining_hvd.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Run masked LM/next sentence masked_lm pre-training for BERT."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import os
22 | import modeling
23 | import optimization_hvd
24 | import tensorflow as tf
25 |
26 | import horovod.tensorflow as hvd
27 |
28 | flags = tf.flags
29 |
30 | FLAGS = flags.FLAGS
31 |
32 | ## Required parameters
33 | flags.DEFINE_string(
34 | "bert_config_file", None,
35 | "The config json file corresponding to the pre-trained BERT model. "
36 | "This specifies the model architecture.")
37 |
38 | flags.DEFINE_string(
39 | "input_file", None,
40 | "Input TF example files (can be a glob or comma separated).")
41 |
42 | flags.DEFINE_string(
43 | "output_dir", None,
44 | "The output directory where the model checkpoints will be written.")
45 |
46 | ## Other parameters
47 | flags.DEFINE_string(
48 | "init_checkpoint", None,
49 | "Initial checkpoint (usually from a pre-trained BERT model).")
50 |
51 | flags.DEFINE_integer(
52 | "max_seq_length", 128,
53 | "The maximum total input sequence length after WordPiece tokenization. "
54 | "Sequences longer than this will be truncated, and sequences shorter "
55 | "than this will be padded. Must match data generation.")
56 |
57 | flags.DEFINE_integer(
58 | "max_predictions_per_seq", 20,
59 | "Maximum number of masked LM predictions per sequence. "
60 | "Must match data generation.")
61 |
62 | flags.DEFINE_bool("do_train", False, "Whether to run training.")
63 |
64 | flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
65 |
66 | flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
67 |
68 | flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
69 |
70 | flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
71 |
72 | flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
73 |
74 | flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
75 |
76 | flags.DEFINE_integer("save_checkpoints_steps", 1000,
77 | "How often to save the model checkpoint.")
78 |
79 | flags.DEFINE_integer("iterations_per_loop", 1000,
80 | "How many steps to make in each estimator call.")
81 |
82 | flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
83 |
84 | flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
85 |
86 | flags.DEFINE_bool("freeze", False, "Whether to freeze transferred hidden layers.")
87 |
88 | tf.flags.DEFINE_string(
89 | "tpu_name", None,
90 | "The Cloud TPU to use for training. This should be either the name "
91 | "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
92 | "url.")
93 |
94 | tf.flags.DEFINE_string(
95 | "tpu_zone", None,
96 | "[Optional] GCE zone where the Cloud TPU is located in. If not "
97 | "specified, we will attempt to automatically detect the GCE project from "
98 | "metadata.")
99 |
100 | tf.flags.DEFINE_string(
101 | "gcp_project", None,
102 | "[Optional] Project name for the Cloud TPU-enabled project. If not "
103 | "specified, we will attempt to automatically detect the GCE project from "
104 | "metadata.")
105 |
106 | tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
107 |
108 | flags.DEFINE_integer(
109 | "num_tpu_cores", 8,
110 | "Only used if `use_tpu` is True. Total number of TPU cores to use.")
111 |
112 |
113 | def model_fn_builder(bert_config, init_checkpoint, learning_rate,
114 | num_train_steps, num_warmup_steps, use_tpu,
115 | use_one_hot_embeddings, freeze):
116 | """Returns `model_fn` closure for TPUEstimator."""
117 |
118 | def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
119 | """The `model_fn` for TPUEstimator."""
120 |
121 | tf.logging.info("*** Features ***")
122 | for name in sorted(features.keys()):
123 | tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
124 |
125 | input_ids = features["input_ids"]
126 | input_mask = features["input_mask"]
127 | segment_ids = features["segment_ids"]
128 | masked_lm_positions = features["masked_lm_positions"]
129 | masked_lm_ids = features["masked_lm_ids"]
130 | masked_lm_weights = features["masked_lm_weights"]
131 | next_sentence_labels = features["next_sentence_labels"]
132 |
133 | is_training = (mode == tf.estimator.ModeKeys.TRAIN)
134 |
135 | model = modeling.BertModel(
136 | config=bert_config,
137 | is_training=is_training,
138 | input_ids=input_ids,
139 | input_mask=input_mask,
140 | token_type_ids=segment_ids,
141 | use_one_hot_embeddings=use_one_hot_embeddings)
142 |
143 | (masked_lm_loss,
144 | masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
145 | bert_config, model.get_sequence_output(), model.get_embedding_table(),
146 | masked_lm_positions, masked_lm_ids, masked_lm_weights)
147 |
148 | (next_sentence_loss, next_sentence_example_loss,
149 | next_sentence_log_probs) = get_next_sentence_output(
150 | bert_config, model.get_pooled_output(), next_sentence_labels)
151 |
152 | total_loss = masked_lm_loss + next_sentence_loss
153 |
154 | tvars = tf.trainable_variables()
155 |
156 | initialized_variable_names = {}
157 | scaffold_fn = None
158 | if init_checkpoint:
159 | (assignment_map, initialized_variable_names
160 | ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
161 | if use_tpu:
162 |
163 | def tpu_scaffold():
164 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
165 | return tf.train.Scaffold()
166 |
167 | scaffold_fn = tpu_scaffold
168 | else:
169 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
170 |
171 | tf.logging.info("**** Trainable Variables ****")
172 | for var in tvars:
173 | init_string = ""
174 | if var.name in initialized_variable_names:
175 | init_string = ", *INIT_FROM_CKPT*"
176 | tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
177 | init_string)
178 |
179 | output_spec = None
180 | if mode == tf.estimator.ModeKeys.TRAIN:
181 | train_op = optimization_hvd.create_optimizer(
182 | total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, freeze)
183 |
184 | if freeze:
185 | tf.logging.info("**** Freeze Layers ****")
186 |
187 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
188 | mode=mode,
189 | loss=total_loss,
190 | train_op=train_op,
191 | scaffold_fn=scaffold_fn)
192 | elif mode == tf.estimator.ModeKeys.EVAL:
193 |
194 | def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
195 | masked_lm_weights, next_sentence_example_loss,
196 | next_sentence_log_probs, next_sentence_labels):
197 | """Computes the loss and accuracy of the model."""
198 | masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
199 | [-1, masked_lm_log_probs.shape[-1]])
200 | masked_lm_predictions = tf.argmax(
201 | masked_lm_log_probs, axis=-1, output_type=tf.int32)
202 | masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
203 | masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
204 | masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
205 | masked_lm_accuracy = tf.metrics.accuracy(
206 | labels=masked_lm_ids,
207 | predictions=masked_lm_predictions,
208 | weights=masked_lm_weights)
209 | masked_lm_mean_loss = tf.metrics.mean(
210 | values=masked_lm_example_loss, weights=masked_lm_weights)
211 |
212 | next_sentence_log_probs = tf.reshape(
213 | next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
214 | next_sentence_predictions = tf.argmax(
215 | next_sentence_log_probs, axis=-1, output_type=tf.int32)
216 | next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
217 | next_sentence_accuracy = tf.metrics.accuracy(
218 | labels=next_sentence_labels, predictions=next_sentence_predictions)
219 | next_sentence_mean_loss = tf.metrics.mean(
220 | values=next_sentence_example_loss)
221 |
222 | return {
223 | "masked_lm_accuracy": masked_lm_accuracy,
224 | "masked_lm_loss": masked_lm_mean_loss,
225 | "next_sentence_accuracy": next_sentence_accuracy,
226 | "next_sentence_loss": next_sentence_mean_loss,
227 | }
228 |
229 | eval_metrics = (metric_fn, [
230 | masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
231 | masked_lm_weights, next_sentence_example_loss,
232 | next_sentence_log_probs, next_sentence_labels
233 | ])
234 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
235 | mode=mode,
236 | loss=total_loss,
237 | eval_metrics=eval_metrics,
238 | scaffold_fn=scaffold_fn)
239 | else:
240 | raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
241 |
242 | return output_spec
243 |
244 | return model_fn
245 |
246 |
247 | def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
248 | label_ids, label_weights):
249 | """Get loss and log probs for the masked LM."""
250 | input_tensor = gather_indexes(input_tensor, positions)
251 |
252 | with tf.variable_scope("cls/predictions"):
253 | # We apply one more non-linear transformation before the output layer.
254 | # This matrix is not used after pre-training.
255 | with tf.variable_scope("transform"):
256 | input_tensor = tf.layers.dense(
257 | input_tensor,
258 | units=bert_config.hidden_size,
259 | activation=modeling.get_activation(bert_config.hidden_act),
260 | kernel_initializer=modeling.create_initializer(
261 | bert_config.initializer_range))
262 | input_tensor = modeling.layer_norm(input_tensor)
263 |
264 | # The output weights are the same as the input embeddings, but there is
265 | # an output-only bias for each token.
266 | output_bias = tf.get_variable(
267 | "output_bias",
268 | shape=[bert_config.vocab_size],
269 | initializer=tf.zeros_initializer())
270 | logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
271 | logits = tf.nn.bias_add(logits, output_bias)
272 | log_probs = tf.nn.log_softmax(logits, axis=-1)
273 |
274 | label_ids = tf.reshape(label_ids, [-1])
275 | label_weights = tf.reshape(label_weights, [-1])
276 |
277 | one_hot_labels = tf.one_hot(
278 | label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
279 |
280 | # The `positions` tensor might be zero-padded (if the sequence is too
281 | # short to have the maximum number of predictions). The `label_weights`
282 | # tensor has a value of 1.0 for every real prediction and 0.0 for the
283 | # padding predictions.
284 | per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
285 | numerator = tf.reduce_sum(label_weights * per_example_loss)
286 | denominator = tf.reduce_sum(label_weights) + 1e-5
287 | loss = numerator / denominator
288 |
289 | return (loss, per_example_loss, log_probs)
290 |
291 |
292 | def get_next_sentence_output(bert_config, input_tensor, labels):
293 | """Get loss and log probs for the next sentence prediction."""
294 |
295 | # Simple binary classification. Note that 0 is "next sentence" and 1 is
296 | # "random sentence". This weight matrix is not used after pre-training.
297 | with tf.variable_scope("cls/seq_relationship"):
298 | output_weights = tf.get_variable(
299 | "output_weights",
300 | shape=[2, bert_config.hidden_size],
301 | initializer=modeling.create_initializer(bert_config.initializer_range))
302 | output_bias = tf.get_variable(
303 | "output_bias", shape=[2], initializer=tf.zeros_initializer())
304 |
305 | logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
306 | logits = tf.nn.bias_add(logits, output_bias)
307 | log_probs = tf.nn.log_softmax(logits, axis=-1)
308 | labels = tf.reshape(labels, [-1])
309 | one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
310 | per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
311 | loss = tf.reduce_mean(per_example_loss)
312 | return (loss, per_example_loss, log_probs)
313 |
314 |
315 | def gather_indexes(sequence_tensor, positions):
316 | """Gathers the vectors at the specific positions over a minibatch."""
317 | sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
318 | batch_size = sequence_shape[0]
319 | seq_length = sequence_shape[1]
320 | width = sequence_shape[2]
321 |
322 | flat_offsets = tf.reshape(
323 | tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
324 | flat_positions = tf.reshape(positions + flat_offsets, [-1])
325 | flat_sequence_tensor = tf.reshape(sequence_tensor,
326 | [batch_size * seq_length, width])
327 | output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
328 | return output_tensor
329 |
330 |
331 | def input_fn_builder(input_files,
332 | max_seq_length,
333 | max_predictions_per_seq,
334 | is_training,
335 | num_cpu_threads=4):
336 | """Creates an `input_fn` closure to be passed to TPUEstimator."""
337 |
338 | def input_fn(params):
339 | """The actual input function."""
340 | batch_size = params["batch_size"]
341 |
342 | name_to_features = {
343 | "input_ids":
344 | tf.FixedLenFeature([max_seq_length], tf.int64),
345 | "input_mask":
346 | tf.FixedLenFeature([max_seq_length], tf.int64),
347 | "segment_ids":
348 | tf.FixedLenFeature([max_seq_length], tf.int64),
349 | "masked_lm_positions":
350 | tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
351 | "masked_lm_ids":
352 | tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
353 | "masked_lm_weights":
354 | tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
355 | "next_sentence_labels":
356 | tf.FixedLenFeature([1], tf.int64),
357 | }
358 |
359 | # For training, we want a lot of parallel reading and shuffling.
360 | # For eval, we want no shuffling and parallel reading doesn't matter.
361 | if is_training:
362 | d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
363 | d = d.repeat()
364 | d = d.shuffle(buffer_size=len(input_files))
365 |
366 | # `cycle_length` is the number of parallel files that get read.
367 | cycle_length = min(num_cpu_threads, len(input_files))
368 |
369 | # `sloppy` mode means that the interleaving is not exact. This adds
370 | # even more randomness to the training pipeline.
371 | d = d.apply(
372 | tf.contrib.data.parallel_interleave(
373 | tf.data.TFRecordDataset,
374 | sloppy=is_training,
375 | cycle_length=cycle_length))
376 | d = d.shuffle(buffer_size=100)
377 | else:
378 | d = tf.data.TFRecordDataset(input_files)
379 | # Since we evaluate for a fixed number of steps we don't want to encounter
380 | # out-of-range exceptions.
381 | d = d.repeat()
382 |
383 | # We must `drop_remainder` on training because the TPU requires fixed
384 | # size dimensions. For eval, we assume we are evaluating on the CPU or GPU
385 | # and we *don't* want to drop the remainder, otherwise we wont cover
386 | # every sample.
387 | d = d.apply(
388 | tf.contrib.data.map_and_batch(
389 | lambda record: _decode_record(record, name_to_features),
390 | batch_size=batch_size,
391 | num_parallel_batches=num_cpu_threads,
392 | drop_remainder=True))
393 | return d
394 |
395 | return input_fn
396 |
397 |
398 | def _decode_record(record, name_to_features):
399 | """Decodes a record to a TensorFlow example."""
400 | example = tf.parse_single_example(record, name_to_features)
401 |
402 | # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
403 | # So cast all int64 to int32.
404 | for name in list(example.keys()):
405 | t = example[name]
406 | if t.dtype == tf.int64:
407 | t = tf.to_int32(t)
408 | example[name] = t
409 |
410 | return example
411 |
412 |
413 | def main(_):
414 | hvd.init()
415 | FLAGS.output_dir = FLAGS.output_dir if hvd.rank() == 0 else os.path.join(FLAGS.output_dir, str(hvd.rank()))
416 | FLAGS.num_train_steps = FLAGS.num_train_steps // hvd.size()
417 | FLAGS.num_warmup_steps = FLAGS.num_warmup_steps // hvd.size()
418 |
419 | tf.logging.set_verbosity(tf.logging.INFO)
420 |
421 | if not FLAGS.do_train and not FLAGS.do_eval:
422 | raise ValueError("At least one of `do_train` or `do_eval` must be True.")
423 |
424 | bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
425 |
426 | tf.gfile.MakeDirs(FLAGS.output_dir)
427 |
428 | input_files = []
429 | for input_pattern in FLAGS.input_file.split(","):
430 | input_files.extend(tf.gfile.Glob(input_pattern))
431 |
432 | tf.logging.info("*** Input Files ***")
433 | for input_file in input_files:
434 | tf.logging.info(" %s" % input_file)
435 |
436 | tpu_cluster_resolver = None
437 | if FLAGS.use_tpu and FLAGS.tpu_name:
438 | tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
439 | FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
440 |
441 | is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
442 |
443 | config = tf.ConfigProto()
444 | config.gpu_options.visible_device_list = str(hvd.local_rank())
445 | config.gpu_options.allow_growth=True
446 |
447 | run_config = tf.contrib.tpu.RunConfig(
448 | cluster=tpu_cluster_resolver,
449 | master=FLAGS.master,
450 | model_dir=FLAGS.output_dir,
451 | save_checkpoints_steps=FLAGS.save_checkpoints_steps,
452 | tpu_config=tf.contrib.tpu.TPUConfig(
453 | iterations_per_loop=FLAGS.iterations_per_loop,
454 | num_shards=FLAGS.num_tpu_cores,
455 | per_host_input_for_training=is_per_host),
456 | log_step_count_steps=25,
457 | session_config=config)
458 |
459 | model_fn = model_fn_builder(
460 | bert_config=bert_config,
461 | init_checkpoint=FLAGS.init_checkpoint,
462 | learning_rate=FLAGS.learning_rate,
463 | num_train_steps=FLAGS.num_train_steps,
464 | num_warmup_steps=FLAGS.num_warmup_steps,
465 | use_tpu=FLAGS.use_tpu,
466 | use_one_hot_embeddings=FLAGS.use_tpu,
467 | freeze=FLAGS.freeze)
468 |
469 | # If TPU is not available, this will fall back to normal Estimator on CPU
470 | # or GPU.
471 | estimator = tf.contrib.tpu.TPUEstimator(
472 | use_tpu=FLAGS.use_tpu,
473 | model_fn=model_fn,
474 | config=run_config,
475 | train_batch_size=FLAGS.train_batch_size,
476 | eval_batch_size=FLAGS.eval_batch_size)
477 |
478 | if FLAGS.do_train:
479 | tf.logging.info("***** Running training *****")
480 | tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
481 | train_input_fn = input_fn_builder(
482 | input_files=input_files,
483 | max_seq_length=FLAGS.max_seq_length,
484 | max_predictions_per_seq=FLAGS.max_predictions_per_seq,
485 | is_training=True)
486 |
487 | hooks = [hvd.BroadcastGlobalVariablesHook(0)]
488 | estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks)
489 |
490 | if FLAGS.do_eval:
491 | tf.logging.info("***** Running evaluation *****")
492 | tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
493 |
494 | eval_input_fn = input_fn_builder(
495 | input_files=input_files,
496 | max_seq_length=FLAGS.max_seq_length,
497 | max_predictions_per_seq=FLAGS.max_predictions_per_seq,
498 | is_training=False)
499 |
500 | result = estimator.evaluate(
501 | input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
502 |
503 | output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
504 | with tf.gfile.GFile(output_eval_file, "w") as writer:
505 | tf.logging.info("***** Eval results *****")
506 | for key in sorted(result.keys()):
507 | tf.logging.info(" %s = %s", key, str(result[key]))
508 | writer.write("%s = %s\n" % (key, str(result[key])))
509 |
510 |
511 | if __name__ == "__main__":
512 | flags.mark_flag_as_required("input_file")
513 | flags.mark_flag_as_required("bert_config_file")
514 | flags.mark_flag_as_required("output_dir")
515 | tf.app.run()
516 |
--------------------------------------------------------------------------------
/tokenization.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Tokenization classes."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import collections
22 | import re
23 | import unicodedata
24 | import six
25 | import tensorflow as tf
26 |
27 |
28 | def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
29 | """Checks whether the casing config is consistent with the checkpoint name."""
30 |
31 | # The casing has to be passed in by the user and there is no explicit check
32 | # as to whether it matches the checkpoint. The casing information probably
33 | # should have been stored in the bert_config.json file, but it's not, so
34 | # we have to heuristically detect it to validate.
35 |
36 | if not init_checkpoint:
37 | return
38 |
39 | m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
40 | if m is None:
41 | return
42 |
43 | model_name = m.group(1)
44 |
45 | lower_models = [
46 | "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
47 | "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
48 | ]
49 |
50 | cased_models = [
51 | "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
52 | "multi_cased_L-12_H-768_A-12"
53 | ]
54 |
55 | is_bad_config = False
56 | if model_name in lower_models and not do_lower_case:
57 | is_bad_config = True
58 | actual_flag = "False"
59 | case_name = "lowercased"
60 | opposite_flag = "True"
61 |
62 | if model_name in cased_models and do_lower_case:
63 | is_bad_config = True
64 | actual_flag = "True"
65 | case_name = "cased"
66 | opposite_flag = "False"
67 |
68 | if is_bad_config:
69 | raise ValueError(
70 | "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
71 | "However, `%s` seems to be a %s model, so you "
72 | "should pass in `--do_lower_case=%s` so that the fine-tuning matches "
73 | "how the model was pre-training. If this error is wrong, please "
74 | "just comment out this check." % (actual_flag, init_checkpoint,
75 | model_name, case_name, opposite_flag))
76 |
77 |
78 | def convert_to_unicode(text):
79 | """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
80 | if six.PY3:
81 | if isinstance(text, str):
82 | return text
83 | elif isinstance(text, bytes):
84 | return text.decode("utf-8", "ignore")
85 | else:
86 | raise ValueError("Unsupported string type: %s" % (type(text)))
87 | elif six.PY2:
88 | if isinstance(text, str):
89 | return text.decode("utf-8", "ignore")
90 | elif isinstance(text, unicode):
91 | return text
92 | else:
93 | raise ValueError("Unsupported string type: %s" % (type(text)))
94 | else:
95 | raise ValueError("Not running on Python2 or Python 3?")
96 |
97 |
98 | def printable_text(text):
99 | """Returns text encoded in a way suitable for print or `tf.logging`."""
100 |
101 | # These functions want `str` for both Python2 and Python3, but in one case
102 | # it's a Unicode string and in the other it's a byte string.
103 | if six.PY3:
104 | if isinstance(text, str):
105 | return text
106 | elif isinstance(text, bytes):
107 | return text.decode("utf-8", "ignore")
108 | else:
109 | raise ValueError("Unsupported string type: %s" % (type(text)))
110 | elif six.PY2:
111 | if isinstance(text, str):
112 | return text
113 | elif isinstance(text, unicode):
114 | return text.encode("utf-8")
115 | else:
116 | raise ValueError("Unsupported string type: %s" % (type(text)))
117 | else:
118 | raise ValueError("Not running on Python2 or Python 3?")
119 |
120 |
121 | def load_vocab(vocab_file):
122 | """Loads a vocabulary file into a dictionary."""
123 | vocab = collections.OrderedDict()
124 | index = 0
125 | with tf.gfile.GFile(vocab_file, "r") as reader:
126 | while True:
127 | token = convert_to_unicode(reader.readline())
128 | if not token:
129 | break
130 | token = token.strip()
131 | vocab[token] = index
132 | index += 1
133 | return vocab
134 |
135 |
136 | def convert_by_vocab(vocab, items):
137 | """Converts a sequence of [tokens|ids] using the vocab."""
138 | output = []
139 | for item in items:
140 | output.append(vocab[item])
141 | return output
142 |
143 |
144 | def convert_tokens_to_ids(vocab, tokens):
145 | return convert_by_vocab(vocab, tokens)
146 |
147 |
148 | def convert_ids_to_tokens(inv_vocab, ids):
149 | return convert_by_vocab(inv_vocab, ids)
150 |
151 |
152 | def whitespace_tokenize(text):
153 | """Runs basic whitespace cleaning and splitting on a piece of text."""
154 | text = text.strip()
155 | if not text:
156 | return []
157 | tokens = text.split()
158 | return tokens
159 |
160 |
161 | class FullTokenizer(object):
162 | """Runs end-to-end tokenziation."""
163 |
164 | def __init__(self, vocab_file, do_lower_case=True):
165 | self.vocab = load_vocab(vocab_file)
166 | self.inv_vocab = {v: k for k, v in self.vocab.items()}
167 | self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
168 | self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
169 |
170 | def tokenize(self, text):
171 | split_tokens = []
172 | for token in self.basic_tokenizer.tokenize(text):
173 | for sub_token in self.wordpiece_tokenizer.tokenize(token):
174 | split_tokens.append(sub_token)
175 |
176 | return split_tokens
177 |
178 | def convert_tokens_to_ids(self, tokens):
179 | return convert_by_vocab(self.vocab, tokens)
180 |
181 | def convert_ids_to_tokens(self, ids):
182 | return convert_by_vocab(self.inv_vocab, ids)
183 |
184 |
185 | class BasicTokenizer(object):
186 | """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
187 |
188 | def __init__(self, do_lower_case=True):
189 | """Constructs a BasicTokenizer.
190 |
191 | Args:
192 | do_lower_case: Whether to lower case the input.
193 | """
194 | self.do_lower_case = do_lower_case
195 |
196 | def tokenize(self, text):
197 | """Tokenizes a piece of text."""
198 | text = convert_to_unicode(text)
199 | text = self._clean_text(text)
200 |
201 | # This was added on November 1st, 2018 for the multilingual and Chinese
202 | # models. This is also applied to the English models now, but it doesn't
203 | # matter since the English models were not trained on any Chinese data
204 | # and generally don't have any Chinese data in them (there are Chinese
205 | # characters in the vocabulary because Wikipedia does have some Chinese
206 | # words in the English Wikipedia.).
207 | text = self._tokenize_chinese_chars(text)
208 |
209 | orig_tokens = whitespace_tokenize(text)
210 | split_tokens = []
211 | for token in orig_tokens:
212 | if self.do_lower_case:
213 | token = token.lower()
214 | token = self._run_strip_accents(token)
215 | split_tokens.extend(self._run_split_on_punc(token))
216 |
217 | output_tokens = whitespace_tokenize(" ".join(split_tokens))
218 | return output_tokens
219 |
220 | def _run_strip_accents(self, text):
221 | """Strips accents from a piece of text."""
222 | text = unicodedata.normalize("NFD", text)
223 | output = []
224 | for char in text:
225 | cat = unicodedata.category(char)
226 | if cat == "Mn":
227 | continue
228 | output.append(char)
229 | return "".join(output)
230 |
231 | def _run_split_on_punc(self, text):
232 | """Splits punctuation on a piece of text."""
233 | chars = list(text)
234 | i = 0
235 | start_new_word = True
236 | output = []
237 | while i < len(chars):
238 | char = chars[i]
239 | if _is_punctuation(char):
240 | output.append([char])
241 | start_new_word = True
242 | else:
243 | if start_new_word:
244 | output.append([])
245 | start_new_word = False
246 | output[-1].append(char)
247 | i += 1
248 |
249 | return ["".join(x) for x in output]
250 |
251 | def _tokenize_chinese_chars(self, text):
252 | """Adds whitespace around any CJK character."""
253 | output = []
254 | for char in text:
255 | cp = ord(char)
256 | if self._is_chinese_char(cp):
257 | output.append(" ")
258 | output.append(char)
259 | output.append(" ")
260 | else:
261 | output.append(char)
262 | return "".join(output)
263 |
264 | def _is_chinese_char(self, cp):
265 | """Checks whether CP is the codepoint of a CJK character."""
266 | # This defines a "chinese character" as anything in the CJK Unicode block:
267 | # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
268 | #
269 | # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
270 | # despite its name. The modern Korean Hangul alphabet is a different block,
271 | # as is Japanese Hiragana and Katakana. Those alphabets are used to write
272 | # space-separated words, so they are not treated specially and handled
273 | # like the all of the other languages.
274 | if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
275 | (cp >= 0x3400 and cp <= 0x4DBF) or #
276 | (cp >= 0x20000 and cp <= 0x2A6DF) or #
277 | (cp >= 0x2A700 and cp <= 0x2B73F) or #
278 | (cp >= 0x2B740 and cp <= 0x2B81F) or #
279 | (cp >= 0x2B820 and cp <= 0x2CEAF) or
280 | (cp >= 0xF900 and cp <= 0xFAFF) or #
281 | (cp >= 0x2F800 and cp <= 0x2FA1F)): #
282 | return True
283 |
284 | return False
285 |
286 | def _clean_text(self, text):
287 | """Performs invalid character removal and whitespace cleanup on text."""
288 | output = []
289 | for char in text:
290 | cp = ord(char)
291 | if cp == 0 or cp == 0xfffd or _is_control(char):
292 | continue
293 | if _is_whitespace(char):
294 | output.append(" ")
295 | else:
296 | output.append(char)
297 | return "".join(output)
298 |
299 |
300 | class WordpieceTokenizer(object):
301 | """Runs WordPiece tokenziation."""
302 |
303 | def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
304 | self.vocab = vocab
305 | self.unk_token = unk_token
306 | self.max_input_chars_per_word = max_input_chars_per_word
307 |
308 | def tokenize(self, text):
309 | """Tokenizes a piece of text into its word pieces.
310 |
311 | This uses a greedy longest-match-first algorithm to perform tokenization
312 | using the given vocabulary.
313 |
314 | For example:
315 | input = "unaffable"
316 | output = ["un", "##aff", "##able"]
317 |
318 | Args:
319 | text: A single token or whitespace separated tokens. This should have
320 | already been passed through `BasicTokenizer.
321 |
322 | Returns:
323 | A list of wordpiece tokens.
324 | """
325 |
326 | text = convert_to_unicode(text)
327 |
328 | output_tokens = []
329 | for token in whitespace_tokenize(text):
330 | chars = list(token)
331 | if len(chars) > self.max_input_chars_per_word:
332 | output_tokens.append(self.unk_token)
333 | continue
334 |
335 | is_bad = False
336 | start = 0
337 | sub_tokens = []
338 | while start < len(chars):
339 | end = len(chars)
340 | cur_substr = None
341 | while start < end:
342 | substr = "".join(chars[start:end])
343 | if start > 0:
344 | substr = "##" + substr
345 | if substr in self.vocab:
346 | cur_substr = substr
347 | break
348 | end -= 1
349 | if cur_substr is None:
350 | is_bad = True
351 | break
352 | sub_tokens.append(cur_substr)
353 | start = end
354 |
355 | if is_bad:
356 | output_tokens.append(self.unk_token)
357 | else:
358 | output_tokens.extend(sub_tokens)
359 | return output_tokens
360 |
361 |
362 | def _is_whitespace(char):
363 | """Checks whether `chars` is a whitespace character."""
364 | # \t, \n, and \r are technically contorl characters but we treat them
365 | # as whitespace since they are generally considered as such.
366 | if char == " " or char == "\t" or char == "\n" or char == "\r":
367 | return True
368 | cat = unicodedata.category(char)
369 | if cat == "Zs":
370 | return True
371 | return False
372 |
373 |
374 | def _is_control(char):
375 | """Checks whether `chars` is a control character."""
376 | # These are technically control characters but we count them as whitespace
377 | # characters.
378 | if char == "\t" or char == "\n" or char == "\r":
379 | return False
380 | cat = unicodedata.category(char)
381 | if cat.startswith("C"):
382 | return True
383 | return False
384 |
385 |
386 | def _is_punctuation(char):
387 | """Checks whether `chars` is a punctuation character."""
388 | cp = ord(char)
389 | # We treat all non-letter/number ASCII as punctuation.
390 | # Characters such as "^", "$", and "`" are not in the Unicode
391 | # Punctuation class but we treat them as punctuation anyways, for
392 | # consistency.
393 | if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
394 | (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
395 | return True
396 | cat = unicodedata.category(char)
397 | if cat.startswith("P"):
398 | return True
399 | return False
400 |
--------------------------------------------------------------------------------
/tokenization_test.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 |
19 | import os
20 | import tempfile
21 | import six
22 | import tensorflow as tf
23 | import tokenization
24 |
25 |
26 | class TokenizationTest(tf.test.TestCase):
27 |
28 | def test_full_tokenizer(self):
29 | vocab_tokens = [
30 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
31 | "##ing", ","
32 | ]
33 | with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
34 | if six.PY2:
35 | vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
36 | else:
37 | vocab_writer.write("".join(
38 | [x + "\n" for x in vocab_tokens]).encode("utf-8"))
39 |
40 | vocab_file = vocab_writer.name
41 |
42 | tokenizer = tokenization.FullTokenizer(vocab_file)
43 | os.unlink(vocab_file)
44 |
45 | tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
46 | self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
47 |
48 | self.assertAllEqual(
49 | tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
50 |
51 | def test_chinese(self):
52 | tokenizer = tokenization.BasicTokenizer()
53 |
54 | self.assertAllEqual(
55 | tokenizer.tokenize(u"ah\u535A\u63A8zz"),
56 | [u"ah", u"\u535A", u"\u63A8", u"zz"])
57 |
58 | def test_basic_tokenizer_lower(self):
59 | tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
60 |
61 | self.assertAllEqual(
62 | tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
63 | ["hello", "!", "how", "are", "you", "?"])
64 | self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
65 |
66 | def test_basic_tokenizer_no_lower(self):
67 | tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
68 |
69 | self.assertAllEqual(
70 | tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
71 | ["HeLLo", "!", "how", "Are", "yoU", "?"])
72 |
73 | def test_wordpiece_tokenizer(self):
74 | vocab_tokens = [
75 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
76 | "##ing"
77 | ]
78 |
79 | vocab = {}
80 | for (i, token) in enumerate(vocab_tokens):
81 | vocab[token] = i
82 | tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
83 |
84 | self.assertAllEqual(tokenizer.tokenize(""), [])
85 |
86 | self.assertAllEqual(
87 | tokenizer.tokenize("unwanted running"),
88 | ["un", "##want", "##ed", "runn", "##ing"])
89 |
90 | self.assertAllEqual(
91 | tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
92 |
93 | def test_convert_tokens_to_ids(self):
94 | vocab_tokens = [
95 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
96 | "##ing"
97 | ]
98 |
99 | vocab = {}
100 | for (i, token) in enumerate(vocab_tokens):
101 | vocab[token] = i
102 |
103 | self.assertAllEqual(
104 | tokenization.convert_tokens_to_ids(
105 | vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
106 |
107 | def test_is_whitespace(self):
108 | self.assertTrue(tokenization._is_whitespace(u" "))
109 | self.assertTrue(tokenization._is_whitespace(u"\t"))
110 | self.assertTrue(tokenization._is_whitespace(u"\r"))
111 | self.assertTrue(tokenization._is_whitespace(u"\n"))
112 | self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
113 |
114 | self.assertFalse(tokenization._is_whitespace(u"A"))
115 | self.assertFalse(tokenization._is_whitespace(u"-"))
116 |
117 | def test_is_control(self):
118 | self.assertTrue(tokenization._is_control(u"\u0005"))
119 |
120 | self.assertFalse(tokenization._is_control(u"A"))
121 | self.assertFalse(tokenization._is_control(u" "))
122 | self.assertFalse(tokenization._is_control(u"\t"))
123 | self.assertFalse(tokenization._is_control(u"\r"))
124 |
125 | def test_is_punctuation(self):
126 | self.assertTrue(tokenization._is_punctuation(u"-"))
127 | self.assertTrue(tokenization._is_punctuation(u"$"))
128 | self.assertTrue(tokenization._is_punctuation(u"`"))
129 | self.assertTrue(tokenization._is_punctuation(u"."))
130 |
131 | self.assertFalse(tokenization._is_punctuation(u"A"))
132 | self.assertFalse(tokenization._is_punctuation(u" "))
133 |
134 |
135 | if __name__ == "__main__":
136 | tf.test.main()
137 |
--------------------------------------------------------------------------------