├── diagnostic_classifier ├── classify_all.sh ├── dl_data.sh ├── README.md ├── classifier.py ├── prepare_all.sh └── prepare_classification.py ├── get_finnish_data.sh ├── .gitignore ├── README.md ├── cloze.py ├── LICENSE └── generate.py /diagnostic_classifier/classify_all.sh: -------------------------------------------------------------------------------- 1 | echo "---" >> exp_classification.log 2 | ls data/*_train_data.npy | sed "s/_train_data.npy//" | while read LANG; 3 | do 4 | python classifier.py $LANG 5 | done 6 | -------------------------------------------------------------------------------- /get_finnish_data.sh: -------------------------------------------------------------------------------- 1 | curl -o fi_tdt-ud-train.conllu https://raw.githubusercontent.com/UniversalDependencies/UD_Finnish-TDT/master/fi_tdt-ud-train.conllu 2 | 3 | cat fi_tdt-ud-train.conllu | grep -P '^# text =' | perl -pe 's/# text = //g' > finnish_sentences.txt 4 | -------------------------------------------------------------------------------- /diagnostic_classifier/dl_data.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | 3 | mkdir data 4 | cd data 5 | git clone https://github.com/UniversalDependencies/UD_Danish-DDT.git 6 | git clone https://github.com/UniversalDependencies/UD_English-EWT.git 7 | git clone https://github.com/UniversalDependencies/UD_Finnish-TDT.git 8 | git clone https://github.com/UniversalDependencies/UD_German-HDT.git 9 | git clone https://github.com/UniversalDependencies/UD_Norwegian-Bokmaal.git 10 | git clone https://github.com/UniversalDependencies/UD_Norwegian-Nynorsk.git 11 | git clone https://github.com/UniversalDependencies/UD_Swedish-Talbanken.git 12 | 13 | -------------------------------------------------------------------------------- /diagnostic_classifier/README.md: -------------------------------------------------------------------------------- 1 | # Diagnostic Classifier Experiments 2 | 3 | 4 | ## Requirements 5 | 6 | `pip3 install keras numpy torch pytorch_transformers` 7 | 8 | Download treebanks: 9 | 10 | `sh dl_data.sh` 11 | 12 | The languages used in the experiments are: Danish, English, Finnish, German, Norwegian (Bokmaal and Nynorsk), and Swedish. 13 | 14 | 15 | ## Main/Non-Main Auxiliary Classification 16 | 17 | Prepare train, dev and test sets for all language and BERT version (m-BERT, BERT-en, BERT-de) pairs: 18 | 19 | `sh prepare_all.sh` 20 | 21 | Train and evaluate classifier for all pairs: 22 | 23 | `sh classify_all.sh` 24 | 25 | Experiment results are written to `exp_classification.log`. 26 | -------------------------------------------------------------------------------- /diagnostic_classifier/classifier.py: -------------------------------------------------------------------------------- 1 | from keras.models import Model 2 | from keras.layers import Input, Dense 3 | import numpy as np 4 | import sys 5 | 6 | print("Loading", sys.argv[1]) 7 | data = np.load(sys.argv[1]+'_train_data.npy') 8 | labels = np.load(sys.argv[1]+'_train_labels.npy') 9 | val_data = np.load(sys.argv[1]+'_dev_data.npy') 10 | val_labels = np.load(sys.argv[1]+'_dev_labels.npy') 11 | test_data = np.load(sys.argv[1]+'_test_data.npy') 12 | test_labels = np.load(sys.argv[1]+'_test_labels.npy') 13 | 14 | LIMIT = 3031 15 | data = data[:LIMIT,:] 16 | labels = labels[:LIMIT,:] 17 | 18 | test_accs = [] 19 | dev_accs = [] 20 | with open("exp_classification.log",'a') as log: 21 | for exp_i in range(5): 22 | input = Input(shape=(data.shape[1],)) 23 | output = Dense(2, activation='softmax')(input) 24 | model = Model(inputs=input, outputs=output) 25 | 26 | model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) 27 | 28 | hist = model.fit(data, labels, batch_size=32, verbose=1, epochs=50, validation_data=(val_data, val_labels)) 29 | dev_accs.append(hist.history['val_acc'][-1]) 30 | test_accs.append(model.evaluate(test_data, test_labels, batch_size=32)[1]) 31 | 32 | log.write("%s\tlen:%d\tdev_acc:%.4f\ttest_acc:%.4f\tbase:%.4f\n" % (sys.argv[1], len(labels), np.mean(dev_accs), np.mean(test_accs), max(sum(labels)/sum(sum(labels))))) 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Code for evaluating BERT 2 | 3 | This repository contains the code used for the experiments presented in the paper ["Is Multilingual BERT Fluent in Language Generation?"](https://www.aclweb.org/anthology/W19-6204/), and cosists of three tasks of increasing complexity and designed to test the capabilities of BERT with a focus on text generation. The tasks are: 4 | 5 | * [Diagonistic classifier: main/non-main auxiliary prediction](https://github.com/TurkuNLP/bert-eval/tree/master/diagnostic_classifier) 6 | * Cloze test word prediction 7 | * Sentence generation given left and right-hand context 8 | 9 | If you find this useful, please cite our paper as: 10 | ``` 11 | @inproceedings{ronnqvist-2019-bert-eval, 12 | title = "Is Multilingual BERT Fluent in Language Generation?", 13 | author = "R\"onnqvist, Samuel and Kanerva, Jenna and Salakoski, Tapio and Ginter, Filip", 14 | booktitle = "Proceedings of the First NLPL Workshop on Deep Learning for Natural Language Processing", 15 | month = oct, 16 | year = "2019", 17 | address = "Turku, Finland", 18 | publisher = "Association for Computational Linguistics" 19 | } 20 | ``` 21 | 22 | ## Requirements 23 | 24 | `pip3 install pytorch-transformers` 25 | 26 | 27 | ## Generation (toy example) 28 | 29 | Generate text from a given seed. 30 | 31 | Usage: `python3 generate.py --model /path/to/mybert-model --vocab /path/to/mybert-model/mybert-model.vocab.wp` 32 | 33 | Where `mybert-model` is a directory containing `pytorch_model.bin` and `config.json`. Use `--mask_len` to define how many subwords to generate (default is 30). 34 | 35 | 36 | ## Cloze test 37 | 38 | We mask a random 15\% of words in each sentence and try to predict them back. In case a word is composed of several subwords, all subwords are masked. Accuracy is measured on subword level, i.e.\ how many times the model gives the highest confidence score for the original subword. 39 | 40 | Usage: `python3 cloze.py --model /path/to/mybert-model --vocab /path/to/mybert-model/mybert-model.vocab.wp --test_sentences sentences.txt --max_sent 1000` 41 | 42 | Run `python3 cloze.py -h` for more options. 43 | 44 | #### Finnish data 45 | 46 | Finnish evaluation data can be downloaded with `./get_finnish_data.sh`, this greps UD_Finnish-TDT training sentences, and saves them under a file name `finnish_sentences.txt`. 47 | -------------------------------------------------------------------------------- /diagnostic_classifier/prepare_all.sh: -------------------------------------------------------------------------------- 1 | MODEL_DE='bert-base-german-cased' 2 | MODEL_EN_U='bert-base-uncased' 3 | MODEL_EN_C='bert-base-cased' 4 | MODEL_ALL='bert-base-multilingual-cased' 5 | 6 | 7 | python prepare_classification.py $MODEL_ALL data/UD_English-EWT/en_ewt-ud-train.conllu data/en_mbert_train 8 | python prepare_classification.py $MODEL_ALL data/UD_English-EWT/en_ewt-ud-dev.conllu data/en_mbert_dev 9 | python prepare_classification.py $MODEL_ALL data/UD_English-EWT/en_ewt-ud-test.conllu data/en_mbert_test 10 | 11 | python prepare_classification.py $MODEL_EN_U data/UD_English-EWT/en_ewt-ud-train.conllu data/en_uncased_train 12 | python prepare_classification.py $MODEL_EN_U data/UD_English-EWT/en_ewt-ud-dev.conllu data/en_uncased_dev 13 | python prepare_classification.py $MODEL_EN_U data/UD_English-EWT/en_ewt-ud-test.conllu data/en_uncased_test 14 | 15 | #python prepare_classification.py $MODEL_EN_C data/UD_English-EWT/en_ewt-ud-train.conllu data/en_cased_train 16 | #python prepare_classification.py $MODEL_EN_C data/UD_English-EWT/en_ewt-ud-dev.conllu data/en_cased_dev 17 | #python prepare_classification.py $MODEL_EN_C data/UD_English-EWT/en_ewt-ud-test.conllu data/en_cased_test 18 | 19 | python prepare_classification.py $MODEL_ALL data/UD_German-HDT/de_hdt-ud-train-a.conllu data/de_mbert_train 20 | python prepare_classification.py $MODEL_ALL data/UD_German-HDT/de_hdt-ud-dev.conllu data/de_mbert_dev 21 | python prepare_classification.py $MODEL_ALL data/UD_German-HDT/de_hdt-ud-test.conllu data/de_mbert_test 22 | 23 | python prepare_classification.py $MODEL_DE data/UD_German-HDT/de_hdt-ud-train-a.conllu data/de_train 24 | python prepare_classification.py $MODEL_DE data/UD_German-HDT/de_hdt-ud-dev.conllu data/de_dev 25 | python prepare_classification.py $MODEL_DE data/UD_German-HDT/de_hdt-ud-test.conllu data/de_test 26 | 27 | python prepare_classification.py $MODEL_ALL data/UD_Danish-DDT/da_ddt-ud-train.conllu data/da_train 28 | python prepare_classification.py $MODEL_ALL data/UD_Danish-DDT/da_ddt-ud-dev.conllu data/da_dev 29 | python prepare_classification.py $MODEL_ALL data/UD_Danish-DDT/da_ddt-ud-test.conllu data/da_test 30 | 31 | python prepare_classification.py $MODEL_ALL data/UD_Finnish-TDT/fi_tdt-ud-train.conllu data/fi_train 32 | python prepare_classification.py $MODEL_ALL data/UD_Finnish-TDT/fi_tdt-ud-dev.conllu data/fi_dev 33 | python prepare_classification.py $MODEL_ALL data/UD_Finnish-TDT/fi_tdt-ud-test.conllu data/fi_test 34 | 35 | python prepare_classification.py $MODEL_ALL data/UD_Norwegian-Bokmaal/no_bokmaal-ud-train.conllu data/nb_train 36 | python prepare_classification.py $MODEL_ALL data/UD_Norwegian-Bokmaal/no_bokmaal-ud-dev.conllu data/nb_dev 37 | python prepare_classification.py $MODEL_ALL data/UD_Norwegian-Bokmaal/no_bokmaal-ud-test.conllu data/nb_test 38 | 39 | python prepare_classification.py $MODEL_ALL data/UD_Norwegian-Nynorsk/no_nynorsk-ud-train.conllu data/nn_train 40 | python prepare_classification.py $MODEL_ALL data/UD_Norwegian-Nynorsk/no_nynorsk-ud-dev.conllu data/nn_dev 41 | python prepare_classification.py $MODEL_ALL data/UD_Norwegian-Nynorsk/no_nynorsk-ud-test.conllu data/nn_test 42 | 43 | python prepare_classification.py $MODEL_ALL data/UD_Swedish-Talbanken/sv_talbanken-ud-train.conllu data/sv_train 44 | python prepare_classification.py $MODEL_ALL data/UD_Swedish-Talbanken/sv_talbanken-ud-dev.conllu data/sv_dev 45 | python prepare_classification.py $MODEL_ALL data/UD_Swedish-Talbanken/sv_talbanken-ud-test.conllu data/sv_test 46 | -------------------------------------------------------------------------------- /diagnostic_classifier/prepare_classification.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | 4 | MODEL = sys.argv[1] 5 | PATH = sys.argv[2] #'data/UD_Finnish-TDT/fi_tdt-ud-train.conllu' 6 | OUT_PREFIX = sys.argv[3] 7 | if len(sys.argv) >= 5: 8 | LIMIT = int(sys.argv[4]) 9 | else: 10 | LIMIT = 1000 11 | 12 | #MODEL = 'bert-base-german-cased' 13 | #MODEL = 'bert-base-cased' 14 | #MODEL = 'bert-base-multilingual-cased' 15 | 16 | ### Load m-BERT 17 | 18 | from pytorch_transformers import BertModel, BertTokenizer 19 | import torch 20 | 21 | 22 | bert = BertModel.from_pretrained(MODEL) 23 | tokenizer = BertTokenizer.from_pretrained(MODEL) 24 | 25 | 26 | ### Read CoNLL-U 27 | print("---", file=sys.stderr) 28 | print("MODEL:", MODEL, file=sys.stderr) 29 | print("FILE:", PATH, file=sys.stderr) 30 | 31 | ID,FORM,LEMMA,UPOS,_,FEATS,HEAD,DEPREL,DEPS,MISC=range(10) 32 | def get_sentences(f, limit=None): 33 | """conllu reader""" 34 | sent=[] 35 | comment=[] 36 | sent_cnt = 0 37 | for line in f: 38 | line=line.strip() 39 | if not line: # new sentence 40 | if sent: 41 | yield comment,sent 42 | sent_cnt += 1 43 | if limit and sent_cnt >= limit: 44 | return 45 | comment=[] 46 | sent=[] 47 | elif line.startswith("#"): 48 | comment.append(line) 49 | else: #normal line 50 | sent.append(line.split("\t")) 51 | else: 52 | if sent: 53 | yield comment, sent 54 | 55 | 56 | cntr0, cntr1 = 0,0 57 | 58 | examples = [] 59 | for comment, sent in get_sentences(open(PATH), limit=None): 60 | tokens = {} 61 | root = None 62 | for token in sent: 63 | tokens[token[ID]] = token 64 | if token[DEPREL] == 'root': 65 | root = token[ID] 66 | surface = ' '.join([t[FORM] for t in tokens.values()])#.replace(':n', ' : n')#.replace(' ,', ',').replace(' .', '.').replace(' !', '!').replace(' ?', '?').replace(' ;', ';') 67 | for token in tokens.values(): 68 | if token[DEPREL] in ['cop', 'aux']: 69 | if token[HEAD] == root: 70 | examples.append({'class': 1, 'sent': surface, 'token_idx': int(token[ID]), 'token': token[FORM]}) 71 | cntr1 += 1 72 | else: 73 | examples.append({'class': 0, 'sent': surface, 'token_idx': int(token[ID]), 'token': token[FORM]}) 74 | cntr0 += 1 75 | if len(examples) >= LIMIT: 76 | break 77 | 78 | print("Class counts:", file=sys.stderr) 79 | print("0:", cntr0, file=sys.stderr) 80 | print("1:", cntr1, file=sys.stderr) 81 | print("0+1:", cntr1+cntr0, file=sys.stderr) 82 | print() 83 | 84 | data = [] 85 | labels = [] 86 | 87 | bert.to('cuda') 88 | 89 | skips = 0 90 | for ex in examples: 91 | input_ids = torch.tensor([tokenizer.encode(ex['sent'])]).to('cuda') 92 | with torch.no_grad(): 93 | try: 94 | last_hidden_states = bert(input_ids[:511])[0] # Models outputs are now tuples 95 | except RuntimeError: 96 | continue 97 | pieces = [tokenizer.ids_to_tokens[int(x)] for x in input_ids[0]] 98 | ex['pieces'] = pieces 99 | word_nr = 0 100 | matches = [] 101 | # Look for ex['token_idx'] in piece sequence 102 | for piece_idx, piece in enumerate(pieces): 103 | if not piece.startswith('##'): 104 | word_nr += 1 105 | if word_nr == ex['token_idx']: 106 | break 107 | 108 | #print("Searching:", pieces[piece_idx:]) 109 | in_token = False 110 | for pi, p in enumerate(pieces[piece_idx:]): 111 | if in_token: 112 | matches.append({'piece_idx': piece_idx+pi, 'piece': p}) 113 | if ex['token'].endswith(p.strip('#')) or pi > 6: 114 | break 115 | elif ex['token'].startswith(p): 116 | matches.append({'piece_idx': piece_idx+pi, 'piece': p}) 117 | if ex['token'] == p: 118 | break 119 | else: 120 | in_token = True 121 | 122 | for k,v in ex.items(): 123 | if k=='pieces': 124 | v = ' '.join(v) 125 | #print("%s:" % k,v) 126 | 127 | for match in matches: 128 | #print(match) 129 | data.append(last_hidden_states[0][match['piece_idx']]) 130 | labels.append(ex['class']) 131 | 132 | if not matches: 133 | #print("No match.") 134 | skips += 1 135 | #print() 136 | 137 | 138 | print("Skips:", skips, file=sys.stderr) 139 | print("Input-output pairs:", len(data), file=sys.stderr) 140 | print("Class 1:", sum(labels), '/', len(labels), "(%.2f%%)" % (sum(labels)/len(labels)*100), file=sys.stderr) 141 | 142 | from keras.utils import np_utils 143 | data = np.array([np.array(x.cpu()) for x in data]) 144 | np.save(OUT_PREFIX+"_data.npy", data) 145 | np.save(OUT_PREFIX+"_labels.npy", np_utils.to_categorical(labels)) 146 | -------------------------------------------------------------------------------- /cloze.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | #!pip3 install pytorch_pretrained_bert 4 | #!pip3 install pytorch_transformers 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from pytorch_transformers import BertTokenizer, BertModel, BertForMaskedLM 10 | 11 | import random 12 | import math 13 | import time 14 | import copy 15 | import sys 16 | 17 | 18 | 19 | class BertGeneration(object): 20 | 21 | 22 | def __init__(self, model_directory, vocab_file, lower=False): 23 | 24 | 25 | # Load pre-trained model (weights) 26 | 27 | self.model = BertForMaskedLM.from_pretrained(model_directory) 28 | self.model.eval() 29 | self.cuda = torch.cuda.is_available() 30 | if self.cuda: 31 | self.model = self.model.cuda() 32 | 33 | # Load pre-trained model tokenizer (vocabulary) 34 | self.tokenizer = BertTokenizer(vocab_file=vocab_file, do_lower_case=lower) 35 | 36 | self.CLS = '[CLS]' 37 | self.SEP = '[SEP]' 38 | self.MASK = '[MASK]' 39 | self.mask_id = self.tokenizer.convert_tokens_to_ids([self.MASK])[0] 40 | self.sep_id = self.tokenizer.convert_tokens_to_ids([self.SEP])[0] 41 | self.cls_id = self.tokenizer.convert_tokens_to_ids([self.CLS])[0] 42 | 43 | 44 | 45 | def tokenize_batch(self, batch): 46 | return [self.tokenizer.convert_tokens_to_ids(sent) for sent in batch] 47 | 48 | def untokenize_batch(self, batch): 49 | return [self.tokenizer.convert_ids_to_tokens(sent) for sent in batch] 50 | 51 | def detokenize(self, sent): 52 | """ Roughly detokenizes (mainly undoes wordpiece) """ 53 | new_sent = [] 54 | for i, tok in enumerate(sent): 55 | if tok.startswith("##"): 56 | new_sent[len(new_sent) - 1] = new_sent[len(new_sent) - 1] + tok[2:] 57 | else: 58 | new_sent.append(tok) 59 | return new_sent 60 | 61 | 62 | def printer(self, sent, should_detokenize=True): 63 | if should_detokenize: 64 | sent = self.detokenize(sent)[1:-1] 65 | print(" ".join(sent)) 66 | 67 | 68 | 69 | def predict_masked(self, sent): 70 | tokens=['[CLS]'] + sent + ['[SEP]'] 71 | target_indices = [i for i, x in enumerate(tokens) if x == '[MASK]'] 72 | input_ids=self.tokenizer.convert_tokens_to_ids(tokens) 73 | tens=torch.LongTensor(input_ids).unsqueeze(0) 74 | if self.cuda: 75 | tens = tens.cuda() 76 | try: 77 | res=self.model(tens)[0] 78 | except RuntimeError: # Error in the model vocabulary, remove when a corret model is trained 79 | return None 80 | target_tensor = torch.LongTensor(target_indices) 81 | if self.cuda: 82 | target_tensor = target_tensor.cuda() 83 | res = (torch.index_select(res, 1, target_tensor)) 84 | res = torch.narrow(torch.argsort(res, dim=-1, descending=True), -1, 0, 5) 85 | 86 | predicted = [] 87 | for mask in res[0,]: 88 | candidates = self.tokenizer.convert_ids_to_tokens([i.item() for i in mask]) 89 | 90 | predicted.append(candidates) 91 | 92 | return predicted 93 | 94 | 95 | 96 | 97 | class DataMangler(object): 98 | 99 | def __init__(self, file_name, min_len, max_len, max_sent): 100 | 101 | self.sentences = self.read_sentences(file_name, min_len, max_len) 102 | self.max_eval_sentences = max_sent 103 | 104 | 105 | 106 | def read_sentences(self, file_name, min_len, max_len): 107 | sentences = [] 108 | with open(file_name, "rt", encoding="utf-8") as f: 109 | for line in f: 110 | if len(line.split(" ")) < min_len or len(line.split(" ")) > max_len: 111 | continue 112 | sentences.append(line.strip()) 113 | random.shuffle(sentences) 114 | return sentences 115 | 116 | 117 | 118 | def glue_tokenized(self, tokenized): 119 | tokenized_words = [] 120 | for subword in tokenized: 121 | if not subword.startswith("##"): # new word starts 122 | tokenized_words.append([]) 123 | tokenized_words[-1].append(subword) 124 | return tokenized_words 125 | 126 | 127 | def random_mask(self, sent, p=0.15): 128 | if len(sent) > 512 - 2: # bert max seq len 129 | return None 130 | num_tokens = int(round(len(sent)*p)) 131 | if num_tokens == 0: 132 | return None 133 | indices = random.sample(range(len(sent)), num_tokens) 134 | return indices 135 | 136 | def mask_sent(self, sent, mask_indices): 137 | masked_sentence = [] 138 | for i,token in enumerate(sent): 139 | for subword in token: 140 | if i in mask_indices: 141 | masked_sentence.append("[MASK]") 142 | else: 143 | masked_sentence.append(subword) 144 | return masked_sentence 145 | 146 | def unmask_sent(self, sent, mask_indices, predicted, mark=True): 147 | unmasked_sentence = [] 148 | for i,token in enumerate(sent): 149 | for j, subword in enumerate(token): 150 | if i in mask_indices: 151 | p_ = predicted.pop(0)[0] 152 | if j == 0: 153 | p_ = "**"+p_ 154 | if j == len(token)-1: 155 | p_ += "**" 156 | unmasked_sentence.append(p_) 157 | else: 158 | unmasked_sentence.append(subword) 159 | return self.my_detokenizer(unmasked_sentence) 160 | 161 | 162 | def my_detokenizer(self, sent): 163 | new_sent = [] 164 | for i, tok in enumerate(sent): 165 | if tok.startswith("##"): 166 | new_sent[len(new_sent) - 1] = new_sent[len(new_sent) - 1] + tok[2:] 167 | elif tok.startswith("**##"): 168 | new_sent[len(new_sent) - 1] = new_sent[len(new_sent) - 1] + "**" + tok[4:] 169 | else: 170 | new_sent.append(tok) 171 | 172 | return " ".join(new_sent).replace("****", "").replace("** **", " ") 173 | 174 | 175 | def compare_subwords(self, glued_tokenized, predicted, mask_index): 176 | correct = 0 177 | for i, word in enumerate(glued_tokenized): 178 | if i in mask_index: 179 | for j, subword in enumerate(word): 180 | pred = predicted.pop(0)[0] 181 | if subword == pred: 182 | correct +=1 183 | return correct 184 | 185 | 186 | def predict_iterator(self, bert_model): 187 | 188 | counter = 0 189 | 190 | for sentence in self.sentences: 191 | 192 | tokenized_sentence = bert_model.tokenizer.tokenize(sentence) 193 | 194 | glued = self.glue_tokenized(tokenized_sentence) 195 | 196 | mask_index = self.random_mask(glued) 197 | if mask_index == None: # sentence is too short 198 | continue 199 | masked_sentence = self.mask_sent(glued, mask_index) 200 | 201 | 202 | # run bert 203 | predicted = bert_model.predict_masked(masked_sentence) 204 | if not predicted: 205 | continue 206 | 207 | correct_subwords = self.compare_subwords(glued, copy.copy(predicted), mask_index) # number of correctly predicted subwords 208 | total_subwords = sum([len(glued[mi]) for mi in mask_index]) # number of subwords 209 | 210 | 211 | yield correct_subwords, total_subwords, (" ".join(masked_sentence), sentence, self.unmask_sent(glued, mask_index, predicted, mark=True)) 212 | counter += 1 213 | 214 | if self.max_eval_sentences != 0 and counter > self.max_eval_sentences: 215 | break 216 | 217 | 218 | def main(args): 219 | 220 | correct_subwords = 0 221 | total_subwords = 0 222 | 223 | dataset = DataMangler(args.test_sentences, args.min_len, args.max_len, args.max_sentences) 224 | bert_model = BertGeneration(args.model_directory, args.vocab_file, args.lowercase) 225 | 226 | 227 | for correct_, total_, prediction_ in dataset.predict_iterator(bert_model): 228 | 229 | correct_subwords += correct_ 230 | total_subwords += total_ 231 | 232 | if args.verbose: 233 | 234 | print("Input:",prediction_[0], file=sys.stdout) 235 | print("Orig:",prediction_[1], file=sys.stdout) 236 | print("Pred:",prediction_[2], file=sys.stdout) 237 | print(file=sys.stdout) 238 | 239 | 240 | print("Correct:", correct_subwords, "Total:", total_subwords, "Accuracy:", (correct_subwords/total_subwords)*100, file=sys.stderr) 241 | 242 | 243 | 244 | if __name__=="__main__": 245 | import argparse 246 | argparser = argparse.ArgumentParser(description='') 247 | argparser.add_argument('--model_directory', required=True, type=str, help='Directory with pytorch_model.bin and config.yaml') 248 | argparser.add_argument('--vocab_file', required=True, type=str, help='Path to the vocabulary file.') 249 | argparser.add_argument('--lowercase', default=False, action="store_true", help='Lowercase text (Default: False)') 250 | argparser.add_argument('--test_sentences', required=True, type=str, help='File with test sentences one sentence per line (untokenized raw text).') 251 | argparser.add_argument('--min_len', default=5, type=int, help='Minumum sentence length used in evaluation (Default: 5 tokens, as counted with whitespace tokenizer)') 252 | argparser.add_argument('--max_len', default=50, type=int, help='Maximum sentence length used in evalaution (Default: 50 tokens, as counted with whitespace tokenizer)') 253 | argparser.add_argument('--max_sentences', default=0, type=int, help='How many sentences to use in evaluation (Default: 0, use all))') 254 | argparser.add_argument('--verbose', default=False, action="store_true", help='Print the original and predicted sentences.') 255 | args = argparser.parse_args() 256 | 257 | main(args) 258 | 259 | 260 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /generate.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Modified version of https://github.com/nyu-dl/bert-gen (https://arxiv.org/abs/1902.04094) 4 | 5 | 6 | #!pip3 install pytorch_pretrained_bert 7 | #!pip3 install pytorch_transformers 8 | 9 | import numpy as np 10 | import torch 11 | 12 | from pytorch_transformers import BertTokenizer, BertModel, BertForMaskedLM 13 | 14 | import random 15 | import math 16 | import time 17 | 18 | 19 | 20 | class BertGeneration(object): 21 | 22 | 23 | def __init__(self, model_directory, vocab_file, lower=False): 24 | 25 | 26 | # Load pre-trained model (weights) 27 | 28 | self.model = BertForMaskedLM.from_pretrained(model_directory) 29 | self.model.eval() 30 | self.cuda = torch.cuda.is_available() 31 | if self.cuda: 32 | self.model = self.model.cuda() 33 | 34 | # Load pre-trained model tokenizer (vocabulary) 35 | self.tokenizer = BertTokenizer(vocab_file=vocab_file, do_lower_case=lower) 36 | 37 | self.CLS = '[CLS]' 38 | self.SEP = '[SEP]' 39 | self.MASK = '[MASK]' 40 | self.mask_id = self.tokenizer.convert_tokens_to_ids([self.MASK])[0] 41 | self.sep_id = self.tokenizer.convert_tokens_to_ids([self.SEP])[0] 42 | self.cls_id = self.tokenizer.convert_tokens_to_ids([self.CLS])[0] 43 | 44 | 45 | 46 | def tokenize_batch(self, batch): 47 | return [self.tokenizer.convert_tokens_to_ids(sent) for sent in batch] 48 | 49 | def untokenize_batch(self, batch): 50 | return [self.tokenizer.convert_ids_to_tokens(sent) for sent in batch] 51 | 52 | def detokenize(self, sent): 53 | """ Roughly detokenizes (mainly undoes wordpiece) """ 54 | new_sent = [] 55 | for i, tok in enumerate(sent): 56 | if tok.startswith("##"): 57 | new_sent[len(new_sent) - 1] = new_sent[len(new_sent) - 1] + tok[2:] 58 | else: 59 | new_sent.append(tok) 60 | return new_sent 61 | 62 | 63 | 64 | def generate_step(self, out, gen_idx, temperature=None, top_k=0, sample=False, return_list=True): 65 | """ Generate a word from from out[gen_idx] 66 | 67 | args: 68 | - out (torch.Tensor): tensor of logits of size batch_size x seq_len x vocab_size 69 | - gen_idx (int): location for which to generate for 70 | - top_k (int): if >0, only sample from the top k most probable words 71 | - sample (Bool): if True, sample from full distribution. Overridden by top_k 72 | """ 73 | logits = out[:, gen_idx] 74 | if temperature is not None: 75 | logits = logits / temperature 76 | if top_k > 0: 77 | kth_vals, kth_idx = logits.topk(top_k, dim=-1) 78 | dist = torch.distributions.categorical.Categorical(logits=kth_vals) 79 | idx = kth_idx.gather(dim=1, index=dist.sample().unsqueeze(-1)).squeeze(-1) 80 | elif sample: 81 | dist = torch.distributions.categorical.Categorical(logits=logits) 82 | idx = dist.sample().squeeze(-1) 83 | else: 84 | idx = torch.argmax(logits, dim=-1) 85 | return idx.tolist() if return_list else idx 86 | 87 | 88 | def get_init_text(self, seed_text, max_len, batch_size = 1, rand_init=False): 89 | """ Get initial sentence by padding seed_text with either masks or random words to max_len """ 90 | batch = [seed_text + [self.MASK] * max_len + [self.SEP] for _ in range(batch_size)] 91 | #if rand_init: 92 | # for ii in range(max_len): 93 | # init_idx[seed_len+ii] = np.random.randint(0, len(tokenizer.vocab)) 94 | 95 | return self.tokenize_batch(batch) 96 | 97 | def printer(self, sent, should_detokenize=True): 98 | if should_detokenize: 99 | sent = self.detokenize(sent)[1:-1] 100 | print(" ".join(sent)) 101 | 102 | 103 | # This is the meat of the algorithm. The general idea is 104 | # 1. start from all masks 105 | # 2. repeatedly pick a location, mask the token at that location, and generate from the probability distribution given by BERT 106 | # 3. stop when converged or tired of waiting 107 | 108 | # We consider three "modes" of generating: 109 | # - generate a single token for a position chosen uniformly at random for a chosen number of time steps 110 | # - generate in sequential order (L->R), one token at a time 111 | # - generate for all positions at once for a chosen number of time steps 112 | 113 | # The `generate` function wraps and batches these three generation modes. In practice, we find that the first leads to the most fluent samples. 114 | 115 | # Generation modes as functions 116 | 117 | 118 | def parallel_sequential_generation(self, seed_text, batch_size=10, max_len=15, top_k=0, temperature=None, max_iter=300, burnin=200, 119 | cuda=False, print_every=10, verbose=True): 120 | """ Generate for one random position at a timestep 121 | 122 | args: 123 | - burnin: during burn-in period, sample from full distribution; afterwards take argmax 124 | """ 125 | seed_len = len(seed_text) 126 | batch = self.get_init_text(seed_text, max_len, batch_size) 127 | 128 | for ii in range(max_iter): 129 | kk = np.random.randint(0, max_len) 130 | for jj in range(batch_size): 131 | batch[jj][seed_len+kk] = self.mask_id 132 | inp = torch.tensor(batch).cuda() if cuda else torch.tensor(batch) 133 | out = self.model(inp)[0] 134 | topk = top_k if (ii >= burnin) else 0 135 | idxs = self.generate_step(out, gen_idx=seed_len+kk, top_k=topk, temperature=temperature, sample=(ii < burnin)) 136 | for jj in range(batch_size): 137 | batch[jj][seed_len+kk] = idxs[jj] 138 | 139 | if verbose and np.mod(ii+1, print_every) == 0: 140 | for_print = self.tokenizer.convert_ids_to_tokens(batch[0]) 141 | for_print = for_print[:seed_len+kk+1] + ['(*)'] + for_print[seed_len+kk+1:] 142 | print("iter", ii+1, " ".join(for_print)) 143 | 144 | return self.untokenize_batch(batch) 145 | 146 | def parallel_generation(self, seed_text, batch_size=10, max_len=15, top_k=0, temperature=None, max_iter=300, sample=True, 147 | cuda=False, print_every=10, verbose=True): 148 | """ Generate for all positions at each time step """ 149 | seed_len = len(seed_text) 150 | batch = self.get_init_text(seed_text, max_len, batch_size) 151 | 152 | for ii in range(max_iter): 153 | inp = torch.tensor(batch).cuda() if cuda else torch.tensor(batch) 154 | out = self.model(inp)[0] 155 | for kk in range(max_len): 156 | idxs = self.generate_step(out, gen_idx=seed_len+kk, top_k=top_k, temperature=temperature, sample=sample) 157 | for jj in range(batch_size): 158 | batch[jj][seed_len+kk] = idxs[jj] 159 | 160 | if verbose and np.mod(ii, print_every) == 0: 161 | print("iter", ii+1, " ".join(self.tokenizer.convert_ids_to_tokens(batch[0]))) 162 | 163 | return self.untokenize_batch(batch) 164 | 165 | def sequential_generation(self, seed_text, batch_size=10, max_len=15, leed_out_len=15, 166 | top_k=0, temperature=None, sample=True, cuda=False): 167 | """ Generate one word at a time, in L->R order """ 168 | seed_len = len(seed_text) 169 | batch = self.get_init_text(seed_text, max_len, batch_size) 170 | 171 | for ii in range(max_len): 172 | inp = [sent[:seed_len+ii+leed_out_len]+[self.sep_id] for sent in batch] 173 | inp = torch.tensor(batch).cuda() if cuda else torch.tensor(batch) 174 | out = self.model(inp)[0] 175 | idxs = self.generate_step(out, gen_idx=seed_len+ii, top_k=top_k, temperature=temperature, sample=sample) 176 | for jj in range(batch_size): 177 | batch[jj][seed_len+ii] = idxs[jj] 178 | 179 | return self.untokenize_batch(batch) 180 | 181 | 182 | def generate(self, n_samples, seed_text="[CLS]", batch_size=10, max_len=25, 183 | generation_mode="parallel-sequential", 184 | sample=True, top_k=100, temperature=1.0, burnin=200, max_iter=500, 185 | cuda=False, print_every=1, leed_out_len=15): 186 | # main generation function to call 187 | sentences = [] 188 | n_batches = math.ceil(n_samples / batch_size) 189 | start_time = time.time() 190 | for batch_n in range(n_batches): 191 | if generation_mode == "parallel-sequential": 192 | batch = self.parallel_sequential_generation(seed_text, batch_size=batch_size, max_len=max_len, top_k=top_k, 193 | temperature=temperature, burnin=burnin, max_iter=max_iter, 194 | cuda=cuda, verbose=False) 195 | elif generation_mode == "sequential": 196 | batch = self.sequential_generation(seed_text, batch_size=batch_size, max_len=max_len, top_k=top_k, 197 | temperature=temperature, leed_out_len=leed_out_len, sample=sample, 198 | cuda=cuda) 199 | elif generation_mode == "parallel": 200 | batch = self.parallel_generation(seed_text, batch_size=batch_size, 201 | max_len=max_len, top_k=top_k, temperature=temperature, 202 | sample=sample, max_iter=max_iter, 203 | cuda=cuda, verbose=False) 204 | 205 | if (batch_n + 1) % print_every == 0: 206 | print("Finished batch %d in %.3fs" % (batch_n + 1, time.time() - start_time)) 207 | start_time = time.time() 208 | 209 | sentences += batch 210 | return sentences 211 | 212 | 213 | def main(args): 214 | #Let's call the actual generation function! We'll use the following settings 215 | #- max_len (40): length of sequence to generate 216 | #- top_k (100): at each step, sample from the top_k most likely words 217 | #- temperature (1.0): smoothing parameter for the next word distribution. Higher means more like uniform; lower means more peaky 218 | #- burnin (250): for non-sequential generation, for the first burnin steps, sample from the entire next word distribution, instead of top_k 219 | #- max_iter (500): number of iterations to run for 220 | #- seed_text (["CLS"]): prefix to generate for. We found it crucial to start with the CLS token; you can try adding to it 221 | 222 | n_samples = 5 223 | batch_size = 5 224 | max_len = args.mask_len 225 | top_k = 100 226 | temperature = 1.0 227 | generation_mode = args.mode 228 | leed_out_len = 5 # max_len 229 | burnin = 250 230 | sample = True 231 | max_iter = 500 232 | 233 | model = BertGeneration(args.model_directory, args.vocab_file, args.lowercase) 234 | 235 | while True: 236 | 237 | user_seed = input("Seed for text generation: ") 238 | 239 | # Choose the prefix context 240 | seed_text = ['[CLS]'] + model.tokenizer.tokenize(user_seed.strip()) 241 | 242 | print(seed_text) 243 | bert_sents = model.generate(n_samples, seed_text=seed_text, batch_size=batch_size, max_len=max_len, 244 | generation_mode=generation_mode, 245 | sample=sample, top_k=top_k, temperature=temperature, burnin=burnin, max_iter=max_iter, 246 | cuda=model.cuda, leed_out_len=leed_out_len) 247 | 248 | for sent in bert_sents: 249 | model.printer(sent, should_detokenize=True) 250 | 251 | 252 | if __name__=="__main__": 253 | import argparse 254 | argparser = argparse.ArgumentParser(description='') 255 | argparser.add_argument('--model_directory', required=True, type=str, help='Directory with pytorch_model.bin and config.yaml') 256 | argparser.add_argument('--vocab_file', required=True, type=str, help='Name of the vocabulary file.') 257 | argparser.add_argument('--lowercase', default=False, action="store_true", help='Lowercase text (Default: False)') 258 | argparser.add_argument('--mode', default="parallel-sequential", choices=["parallel-sequential", "sequential", "parallel"], help='Generation mode') 259 | argparser.add_argument('--mask_len', default=30, type=int, help='How many subwords to generate after seed text.') 260 | args = argparser.parse_args() 261 | 262 | main(args) 263 | 264 | 265 | --------------------------------------------------------------------------------