├── README.md ├── assets ├── table3.jpg └── table5.jpg ├── eval_lm.py ├── fairseq ├── __init__.py ├── binarizer.py ├── bleu.py ├── checkpoint_utils.py ├── clib │ ├── libbleu │ │ ├── libbleu.cpp │ │ └── module.cpp │ ├── libnat │ │ └── edit_dist.cpp │ └── libnat_cuda │ │ ├── binding.cpp │ │ ├── edit_dist.cu │ │ └── edit_dist.h ├── criterions │ ├── __init__.py │ ├── adaptive_loss.py │ ├── binary_cross_entropy.py │ ├── composite_loss.py │ ├── cross_entropy.py │ ├── fairseq_criterion.py │ ├── label_smoothed_cross_entropy.py │ ├── label_smoothed_cross_entropy_with_alignment.py │ ├── legacy_masked_lm.py │ ├── masked_lm.py │ ├── mt_loss.py │ ├── nat_loss.py │ ├── sentence_prediction.py │ └── sentence_ranking.py ├── data │ ├── __init__.py │ ├── append_token_dataset.py │ ├── audio │ │ ├── __init__.py │ │ └── raw_audio_dataset.py │ ├── backtranslation_dataset.py │ ├── base_wrapper_dataset.py │ ├── colorize_dataset.py │ ├── concat_dataset.py │ ├── concat_sentences_dataset.py │ ├── data_utils.py │ ├── data_utils_fast.pyx │ ├── denoising_dataset.py │ ├── dictionary.py │ ├── encoders │ │ ├── __init__.py │ │ ├── fastbpe.py │ │ ├── gpt2_bpe.py │ │ ├── gpt2_bpe_utils.py │ │ ├── hf_bert_bpe.py │ │ ├── moses_tokenizer.py │ │ ├── nltk_tokenizer.py │ │ ├── sentencepiece_bpe.py │ │ ├── space_tokenizer.py │ │ ├── subword_nmt_bpe.py │ │ └── utils.py │ ├── fairseq_dataset.py │ ├── id_dataset.py │ ├── indexed_dataset.py │ ├── iterators.py │ ├── language_pair_dataset.py │ ├── legacy │ │ ├── __init__.py │ │ ├── block_pair_dataset.py │ │ ├── masked_lm_dataset.py │ │ └── masked_lm_dictionary.py │ ├── list_dataset.py │ ├── lm_context_window_dataset.py │ ├── lru_cache_dataset.py │ ├── mask_tokens_dataset.py │ ├── monolingual_dataset.py │ ├── multi_corpus_sampled_dataset.py │ ├── nested_dictionary_dataset.py │ ├── noising.py │ ├── num_samples_dataset.py │ ├── numel_dataset.py │ ├── offset_tokens_dataset.py │ ├── pad_dataset.py │ ├── plasma_utils.py │ ├── prepend_dataset.py │ ├── prepend_token_dataset.py │ ├── raw_label_dataset.py │ ├── replace_dataset.py │ ├── resampling_dataset.py │ ├── roll_dataset.py │ ├── round_robin_zip_datasets.py │ ├── sharded_dataset.py │ ├── sort_dataset.py │ ├── strip_token_dataset.py │ ├── subsample_dataset.py │ ├── token_block_dataset.py │ ├── token_block_utils_fast.pyx │ ├── transform_eos_dataset.py │ ├── transform_eos_lang_pair_dataset.py │ └── truncate_dataset.py ├── distributed_utils.py ├── file_io.py ├── file_utils.py ├── hub_utils.py ├── iterative_refinement_generator.py ├── legacy_distributed_data_parallel.py ├── meters.py ├── models │ ├── __init__.py │ ├── bart │ │ ├── __init__.py │ │ ├── hub_interface.py │ │ └── model.py │ ├── composite_encoder.py │ ├── distributed_fairseq_model.py │ ├── fairseq_decoder.py │ ├── fairseq_encoder.py │ ├── fairseq_incremental_decoder.py │ ├── fairseq_model.py │ ├── fconv.py │ ├── fconv_lm.py │ ├── fconv_self_att.py │ ├── lightconv.py │ ├── lightconv_lm.py │ ├── lstm.py │ ├── masked_lm.py │ ├── model_utils.py │ ├── mt_transformer.py │ ├── multilingual_transformer.py │ ├── nat │ │ ├── __init__.py │ │ ├── cmlm_transformer.py │ │ ├── fairseq_nat_model.py │ │ ├── insertion_transformer.py │ │ ├── iterative_nonautoregressive_transformer.py │ │ ├── levenshtein_transformer.py │ │ ├── levenshtein_utils.py │ │ ├── nat_crf_transformer.py │ │ ├── nonautoregressive_ensembles.py │ │ └── nonautoregressive_transformer.py │ ├── roberta │ │ ├── __init__.py │ │ ├── alignment_utils.py │ │ ├── hub_interface.py │ │ └── model.py │ ├── transformer.py │ ├── transformer_from_pretrained_xlm.py │ ├── transformer_lm.py │ └── wav2vec.py ├── modules │ ├── __init__.py │ ├── adaptive_input.py │ ├── adaptive_softmax.py │ ├── beamable_mm.py │ ├── character_token_embedder.py │ ├── conv_tbc.py │ ├── cuda_utils.cu │ ├── downsampled_multihead_attention.py │ ├── dynamic_convolution.py │ ├── dynamic_crf_layer.py │ ├── dynamicconv_layer │ │ ├── __init__.py │ │ ├── cuda_function_gen.py │ │ ├── dynamicconv_cuda.cpp │ │ ├── dynamicconv_cuda.cuh │ │ ├── dynamicconv_cuda_kernel.cu │ │ ├── dynamicconv_layer.py │ │ ├── dynamiconv_cpu.cpp │ │ └── setup.py │ ├── gelu.py │ ├── grad_multiply.py │ ├── highway.py │ ├── layer_norm.py │ ├── learned_positional_embedding.py │ ├── lightconv_layer │ │ ├── __init__.py │ │ ├── cuda_function_gen.py │ │ ├── lightconv_cuda.cpp │ │ ├── lightconv_cuda.cuh │ │ ├── lightconv_cuda_kernel.cu │ │ ├── lightconv_layer.py │ │ └── setup.py │ ├── lightweight_convolution.py │ ├── linearized_convolution.py │ ├── logsumexp_moe.py │ ├── mean_pool_gating_network.py │ ├── multihead_attention.py │ ├── positional_embedding.py │ ├── scalar_bias.py │ ├── sinusoidal_positional_embedding.py │ ├── sparse_multihead_attention.py │ ├── sparse_transformer_sentence_encoder.py │ ├── sparse_transformer_sentence_encoder_layer.py │ ├── transformer_layer.py │ ├── transformer_sentence_encoder.py │ ├── transformer_sentence_encoder_layer.py │ ├── unfold.py │ └── vggblock.py ├── optim │ ├── __init__.py │ ├── adadelta.py │ ├── adafactor.py │ ├── adagrad.py │ ├── adam.py │ ├── adamax.py │ ├── bmuf.py │ ├── fairseq_optimizer.py │ ├── fp16_optimizer.py │ ├── lr_scheduler │ │ ├── __init__.py │ │ ├── cosine_lr_scheduler.py │ │ ├── fairseq_lr_scheduler.py │ │ ├── fixed_schedule.py │ │ ├── inverse_square_root_schedule.py │ │ ├── polynomial_decay_schedule.py │ │ ├── reduce_lr_on_plateau.py │ │ ├── tri_stage_lr_scheduler.py │ │ └── triangular_lr_scheduler.py │ ├── nag.py │ └── sgd.py ├── options.py ├── pdb.py ├── progress_bar.py ├── registry.py ├── search.py ├── sequence_generator.py ├── sequence_scorer.py ├── tasks │ ├── __init__.py │ ├── audio_pretraining.py │ ├── cross_lingual_lm.py │ ├── denoising.py │ ├── fairseq_task.py │ ├── language_modeling.py │ ├── legacy_masked_lm.py │ ├── masked_lm.py │ ├── multilingual_masked_lm.py │ ├── multilingual_translation.py │ ├── semisupervised_translation.py │ ├── sentence_prediction.py │ ├── sentence_ranking.py │ ├── translation.py │ ├── translation_from_pretrained_xlm.py │ ├── translation_lev.py │ ├── translation_moe.py │ └── translation_mt.py ├── tokenizer.py ├── trainer.py └── utils.py ├── fairseq_cli ├── __init__.py ├── eval_lm.py ├── generate.py ├── interactive.py ├── preprocess.py ├── score.py ├── setup.py └── train.py ├── fairseq_logo.png ├── generate.py ├── hubconf.py ├── interactive.py ├── preprocess.py ├── run ├── test.sh └── train.sh ├── score.py ├── scripts ├── __init__.py ├── average_checkpoints.py ├── build_sym_alignment.py ├── compare_namespaces.py ├── compound_split_bleu.sh ├── convert_dictionary.lua ├── convert_model.lua ├── count_docs.py ├── multi-bleu.perl ├── sacrebleu_pregen.sh ├── shard_docs.py ├── split_train_valid_docs.py ├── spm_decode.py ├── spm_encode.py ├── spm_train.py ├── wav2vec_featurize.py └── wav2vec_manifest.py ├── setup.py ├── train.py └── validate.py /assets/table3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yongchanghao/multi-task-nat/b9bf3bd82caa96c75d1291900cf2d6ad5f08a2ac/assets/table3.jpg -------------------------------------------------------------------------------- /assets/table5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yongchanghao/multi-task-nat/b9bf3bd82caa96c75d1291900cf2d6ad5f08a2ac/assets/table5.jpg -------------------------------------------------------------------------------- /fairseq/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | __all__ = ['pdb'] 7 | __version__ = '0.9.0' 8 | 9 | import fairseq.criterions # noqa 10 | import fairseq.models # noqa 11 | import fairseq.modules # noqa 12 | import fairseq.optim # noqa 13 | import fairseq.optim.lr_scheduler # noqa 14 | import fairseq.pdb # noqa 15 | import fairseq.tasks # noqa 16 | -------------------------------------------------------------------------------- /fairseq/binarizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from collections import Counter 7 | import os 8 | 9 | from fairseq.tokenizer import tokenize_line 10 | 11 | 12 | def safe_readline(f): 13 | pos = f.tell() 14 | while True: 15 | try: 16 | return f.readline() 17 | except UnicodeDecodeError: 18 | pos -= 1 19 | f.seek(pos) # search where this character begins 20 | 21 | 22 | class Binarizer: 23 | 24 | @staticmethod 25 | def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, 26 | offset=0, end=-1): 27 | nseq, ntok = 0, 0 28 | replaced = Counter() 29 | 30 | def replaced_consumer(word, idx): 31 | if idx == dict.unk_index and word != dict.unk_word: 32 | replaced.update([word]) 33 | 34 | with open(filename, 'r', encoding='utf-8') as f: 35 | f.seek(offset) 36 | # next(f) breaks f.tell(), hence readline() must be used 37 | line = safe_readline(f) 38 | while line: 39 | if end > 0 and f.tell() > end: 40 | break 41 | ids = dict.encode_line( 42 | line=line, 43 | line_tokenizer=tokenize, 44 | add_if_not_exist=False, 45 | consumer=replaced_consumer, 46 | append_eos=append_eos, 47 | reverse_order=reverse_order, 48 | ) 49 | nseq += 1 50 | ntok += len(ids) 51 | consumer(ids) 52 | line = f.readline() 53 | return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced} 54 | 55 | @staticmethod 56 | def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1): 57 | nseq = 0 58 | 59 | with open(filename, 'r') as f: 60 | f.seek(offset) 61 | line = safe_readline(f) 62 | while line: 63 | if end > 0 and f.tell() > end: 64 | break 65 | ids = alignment_parser(line) 66 | nseq += 1 67 | consumer(ids) 68 | line = f.readline() 69 | return {'nseq': nseq} 70 | 71 | @staticmethod 72 | def find_offsets(filename, num_chunks): 73 | with open(filename, 'r', encoding='utf-8') as f: 74 | size = os.fstat(f.fileno()).st_size 75 | chunk_size = size // num_chunks 76 | offsets = [0 for _ in range(num_chunks + 1)] 77 | for i in range(1, num_chunks): 78 | f.seek(chunk_size * i) 79 | safe_readline(f) 80 | offsets[i] = f.tell() 81 | return offsets 82 | -------------------------------------------------------------------------------- /fairseq/clib/libbleu/libbleu.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | typedef struct 15 | { 16 | size_t reflen; 17 | size_t predlen; 18 | size_t match1; 19 | size_t count1; 20 | size_t match2; 21 | size_t count2; 22 | size_t match3; 23 | size_t count3; 24 | size_t match4; 25 | size_t count4; 26 | } bleu_stat; 27 | 28 | // left trim (remove pad) 29 | void bleu_ltrim(size_t* len, int** sent, int pad) { 30 | size_t start = 0; 31 | while(start < *len) { 32 | if (*(*sent + start) != pad) { break; } 33 | start++; 34 | } 35 | *sent += start; 36 | *len -= start; 37 | } 38 | 39 | // right trim remove (eos) 40 | void bleu_rtrim(size_t* len, int** sent, int pad, int eos) { 41 | size_t end = *len - 1; 42 | while (end > 0) { 43 | if (*(*sent + end) != eos && *(*sent + end) != pad) { break; } 44 | end--; 45 | } 46 | *len = end + 1; 47 | } 48 | 49 | // left and right trim 50 | void bleu_trim(size_t* len, int** sent, int pad, int eos) { 51 | bleu_ltrim(len, sent, pad); 52 | bleu_rtrim(len, sent, pad, eos); 53 | } 54 | 55 | size_t bleu_hash(int len, int* data) { 56 | size_t h = 14695981039346656037ul; 57 | size_t prime = 0x100000001b3; 58 | char* b = (char*) data; 59 | size_t blen = sizeof(int) * len; 60 | 61 | while (blen-- > 0) { 62 | h ^= *b++; 63 | h *= prime; 64 | } 65 | 66 | return h; 67 | } 68 | 69 | void bleu_addngram( 70 | size_t *ntotal, size_t *nmatch, size_t n, 71 | size_t reflen, int* ref, size_t predlen, int* pred) { 72 | 73 | if (predlen < n) { return; } 74 | 75 | predlen = predlen - n + 1; 76 | (*ntotal) += predlen; 77 | 78 | if (reflen < n) { return; } 79 | 80 | reflen = reflen - n + 1; 81 | 82 | std::map count; 83 | while (predlen > 0) { 84 | size_t w = bleu_hash(n, pred++); 85 | count[w]++; 86 | predlen--; 87 | } 88 | 89 | while (reflen > 0) { 90 | size_t w = bleu_hash(n, ref++); 91 | if (count[w] > 0) { 92 | (*nmatch)++; 93 | count[w] -=1; 94 | } 95 | reflen--; 96 | } 97 | } 98 | 99 | extern "C" { 100 | 101 | void bleu_zero_init(bleu_stat* stat) { 102 | std::memset(stat, 0, sizeof(bleu_stat)); 103 | } 104 | 105 | void bleu_one_init(bleu_stat* stat) { 106 | bleu_zero_init(stat); 107 | stat->count1 = 0; 108 | stat->count2 = 1; 109 | stat->count3 = 1; 110 | stat->count4 = 1; 111 | stat->match1 = 0; 112 | stat->match2 = 1; 113 | stat->match3 = 1; 114 | stat->match4 = 1; 115 | } 116 | 117 | void bleu_add( 118 | bleu_stat* stat, 119 | size_t reflen, int* ref, size_t predlen, int* pred, int pad, int eos) { 120 | 121 | bleu_trim(&reflen, &ref, pad, eos); 122 | bleu_trim(&predlen, &pred, pad, eos); 123 | stat->reflen += reflen; 124 | stat->predlen += predlen; 125 | 126 | bleu_addngram(&stat->count1, &stat->match1, 1, reflen, ref, predlen, pred); 127 | bleu_addngram(&stat->count2, &stat->match2, 2, reflen, ref, predlen, pred); 128 | bleu_addngram(&stat->count3, &stat->match3, 3, reflen, ref, predlen, pred); 129 | bleu_addngram(&stat->count4, &stat->match4, 4, reflen, ref, predlen, pred); 130 | } 131 | 132 | } 133 | -------------------------------------------------------------------------------- /fairseq/clib/libbleu/module.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include 10 | 11 | 12 | static PyMethodDef method_def[] = { 13 | {NULL, NULL, 0, NULL} 14 | }; 15 | 16 | static struct PyModuleDef module_def = { 17 | PyModuleDef_HEAD_INIT, 18 | "libbleu", /* name of module */ 19 | NULL, /* module documentation, may be NULL */ 20 | -1, /* size of per-interpreter state of the module, 21 | or -1 if the module keeps state in global variables. */ 22 | method_def 23 | }; 24 | 25 | 26 | #if PY_MAJOR_VERSION == 2 27 | PyMODINIT_FUNC init_libbleu() 28 | #else 29 | PyMODINIT_FUNC PyInit_libbleu() 30 | #endif 31 | { 32 | PyObject *m = PyModule_Create(&module_def); 33 | if (!m) { 34 | return NULL; 35 | } 36 | return m; 37 | } 38 | -------------------------------------------------------------------------------- /fairseq/clib/libnat_cuda/binding.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | /* 10 | This code is partially adpoted from https://github.com/1ytic/pytorch-edit-distance 11 | */ 12 | 13 | #include "edit_dist.h" 14 | #include 15 | 16 | #ifndef TORCH_CHECK 17 | #define TORCH_CHECK AT_CHECK 18 | #endif 19 | 20 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 21 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 22 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 23 | 24 | 25 | torch::Tensor LevenshteinDistance( 26 | torch::Tensor source, 27 | torch::Tensor target, 28 | torch::Tensor source_length, 29 | torch::Tensor target_length) { 30 | 31 | CHECK_INPUT(source); 32 | CHECK_INPUT(target); 33 | CHECK_INPUT(source_length); 34 | CHECK_INPUT(target_length); 35 | return LevenshteinDistanceCuda(source, target, source_length, target_length); 36 | } 37 | 38 | torch::Tensor GenerateDeletionLabel( 39 | torch::Tensor source, 40 | torch::Tensor operations) { 41 | 42 | CHECK_INPUT(source); 43 | CHECK_INPUT(operations); 44 | return GenerateDeletionLabelCuda(source, operations); 45 | } 46 | 47 | std::pair GenerateInsertionLabel( 48 | torch::Tensor target, 49 | torch::Tensor operations) { 50 | 51 | CHECK_INPUT(target); 52 | CHECK_INPUT(operations); 53 | return GenerateInsertionLabelCuda(target, operations); 54 | } 55 | 56 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 57 | m.def("levenshtein_distance", &LevenshteinDistance, "Levenshtein distance"); 58 | m.def("generate_deletion_labels", &GenerateDeletionLabel, "Generate Deletion Label"); 59 | m.def("generate_insertion_labels", &GenerateInsertionLabel, "Generate Insertion Label"); 60 | } 61 | -------------------------------------------------------------------------------- /fairseq/clib/libnat_cuda/edit_dist.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | 11 | #include 12 | 13 | torch::Tensor LevenshteinDistanceCuda( 14 | torch::Tensor source, 15 | torch::Tensor target, 16 | torch::Tensor source_length, 17 | torch::Tensor target_length); 18 | 19 | torch::Tensor GenerateDeletionLabelCuda( 20 | torch::Tensor source, 21 | torch::Tensor operations); 22 | 23 | std::pair GenerateInsertionLabelCuda( 24 | torch::Tensor source, 25 | torch::Tensor operations); 26 | -------------------------------------------------------------------------------- /fairseq/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.criterions.fairseq_criterion import FairseqCriterion 11 | 12 | 13 | build_criterion, register_criterion, CRITERION_REGISTRY = registry.setup_registry( 14 | '--criterion', 15 | base_class=FairseqCriterion, 16 | default='cross_entropy', 17 | ) 18 | 19 | 20 | # automatically import any Python files in the criterions/ directory 21 | for file in os.listdir(os.path.dirname(__file__)): 22 | if file.endswith('.py') and not file.startswith('_'): 23 | module = file[:file.find('.py')] 24 | importlib.import_module('fairseq.criterions.' + module) 25 | -------------------------------------------------------------------------------- /fairseq/criterions/binary_cross_entropy.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import math 7 | import numpy as np 8 | import torch 9 | import torch.nn.functional as F 10 | 11 | from fairseq import utils 12 | 13 | from . import FairseqCriterion, register_criterion 14 | 15 | 16 | @register_criterion('binary_cross_entropy') 17 | class BinaryCrossEntropyCriterion(FairseqCriterion): 18 | 19 | def __init__(self, args, task): 20 | super().__init__(args, task) 21 | 22 | def forward(self, model, sample, reduce=True, log_pred=False): 23 | """Compute the loss for the given sample. 24 | 25 | Returns a tuple with three elements: 26 | 1) the loss 27 | 2) the sample size, which is used as the denominator for the gradient 28 | 3) logging outputs to display while training 29 | """ 30 | net_output = model(**sample['net_input']) 31 | logits = model.get_logits(net_output).float() 32 | target = model.get_targets(sample, net_output, expand_steps=False).float() 33 | 34 | if hasattr(model, 'get_target_weights'): 35 | weights = model.get_target_weights(target, net_output) 36 | if torch.is_tensor(weights): 37 | weights = weights.float() 38 | else: 39 | weights = 1. 40 | 41 | loss = F.binary_cross_entropy_with_logits(logits, target, reduce=False) 42 | 43 | loss = loss * weights 44 | 45 | if reduce: 46 | loss = loss.sum() 47 | 48 | sample_size = target.numel() 49 | logging_output = { 50 | 'loss': utils.item(loss.data) if reduce else loss.data, 51 | 'ntokens': sample_size, 52 | 'nsentences': logits.size(0), 53 | 'sample_size': sample_size, 54 | } 55 | if log_pred: 56 | logging_output['logits'] = logits.cpu().numpy() 57 | logging_output['target'] = target.cpu().numpy() 58 | return loss, sample_size, logging_output 59 | 60 | @staticmethod 61 | def aggregate_logging_outputs(logging_outputs): 62 | """Aggregate logging outputs from data parallel training.""" 63 | loss_sum = sum(log.get('loss', 0) for log in logging_outputs) 64 | ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) 65 | nsentences = sum(log.get('nsentences', 0) for log in logging_outputs) 66 | sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) 67 | agg_output = { 68 | 'loss': loss_sum / sample_size / math.log(2), 69 | 'ntokens': ntokens, 70 | 'nsentences': nsentences, 71 | 'sample_size': sample_size, 72 | } 73 | if sample_size != ntokens: 74 | agg_output['nll_loss'] = loss_sum / ntokens / math.log(2) 75 | for key in ["logits", "target"]: 76 | if key in logging_outputs[0]: 77 | if len(logging_outputs) == 1: 78 | agg_output[key] = logging_outputs[0][key] # avoid copying 79 | else: 80 | agg_output[key] = np.concatenate( 81 | [log[key] for log in logging_outputs] 82 | ) 83 | return agg_output 84 | -------------------------------------------------------------------------------- /fairseq/criterions/composite_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from torch import nn 7 | 8 | from fairseq import utils 9 | from . import FairseqCriterion, register_criterion 10 | 11 | 12 | @register_criterion('composite_loss') 13 | class CompositeLoss(FairseqCriterion): 14 | """This is a composite loss that, given a list of model outputs and a list of targets, 15 | computes an average of losses for each output-target pair""" 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add criterion-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True, 22 | help='underlying criterion to use for the composite loss') 23 | # fmt: on 24 | 25 | @staticmethod 26 | def build_underlying_criterion(args, task): 27 | saved_criterion = args.criterion 28 | args.criterion = args.underlying_criterion 29 | assert saved_criterion != args.underlying_criterion 30 | underlying_criterion = task.build_criterion(args) 31 | args.criterion = saved_criterion 32 | return underlying_criterion 33 | 34 | @classmethod 35 | def build_criterion(cls, args, task): 36 | underlying_criterion = CompositeLoss.build_underlying_criterion(args, task) 37 | 38 | class FakeModel(nn.Module): 39 | 40 | def __init__(self, model, net_out, target): 41 | super().__init__() 42 | self.model = model 43 | self.net_out = net_out 44 | self.target = target 45 | 46 | def forward(self, **unused): 47 | return self.net_out 48 | 49 | def get_normalized_probs(self, net_output, log_probs, sample=None): 50 | return self.model.get_normalized_probs(net_output, log_probs, sample=sample) 51 | 52 | def get_targets(self, *unused): 53 | return self.target 54 | 55 | @property 56 | def decoder(self): 57 | return self.model.decoder 58 | 59 | class _CompositeLoss(FairseqCriterion): 60 | 61 | def __init__(self, args, task, underlying_criterion): 62 | super().__init__(args, task) 63 | self.underlying_criterion = underlying_criterion 64 | 65 | def forward(self, model, sample, reduce=True): 66 | net_outputs = model(**sample['net_input']) 67 | targets = sample['target'] 68 | 69 | bsz = targets[0].size(0) 70 | loss = net_outputs[0][0].new(1 if reduce else bsz).float().zero_() 71 | 72 | sample_size = 0 73 | logging_output = {} 74 | for o, t in zip(net_outputs[0], targets): 75 | m = FakeModel(model, (o, net_outputs[1]), t) 76 | sample['target'] = t 77 | l, ss, logging_output = self.underlying_criterion(m, sample, reduce) 78 | loss += l 79 | sample_size += ss 80 | 81 | loss.div_(len(targets)) 82 | sample_size /= len(targets) 83 | 84 | logging_output['loss'] = utils.item(loss.data) if reduce else loss.data 85 | return loss, sample_size, logging_output 86 | 87 | @staticmethod 88 | def aggregate_logging_outputs(logging_outputs): 89 | return underlying_criterion.__class__.aggregate_logging_outputs(logging_outputs) 90 | 91 | return _CompositeLoss(args, task, underlying_criterion) 92 | -------------------------------------------------------------------------------- /fairseq/criterions/cross_entropy.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import math 7 | import torch.nn.functional as F 8 | 9 | from fairseq import utils 10 | 11 | from . import FairseqCriterion, register_criterion 12 | 13 | 14 | @register_criterion('cross_entropy') 15 | class CrossEntropyCriterion(FairseqCriterion): 16 | 17 | def __init__(self, args, task): 18 | super().__init__(args, task) 19 | 20 | def forward(self, model, sample, reduce=True): 21 | """Compute the loss for the given sample. 22 | 23 | Returns a tuple with three elements: 24 | 1) the loss 25 | 2) the sample size, which is used as the denominator for the gradient 26 | 3) logging outputs to display while training 27 | """ 28 | net_output = model(**sample['net_input']) 29 | loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce) 30 | sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'] 31 | logging_output = { 32 | 'loss': utils.item(loss.data) if reduce else loss.data, 33 | 'nll_loss': utils.item(loss.data) if reduce else loss.data, 34 | 'ntokens': sample['ntokens'], 35 | 'nsentences': sample['target'].size(0), 36 | 'sample_size': sample_size, 37 | } 38 | return loss, sample_size, logging_output 39 | 40 | def compute_loss(self, model, net_output, sample, reduce=True): 41 | lprobs = model.get_normalized_probs(net_output, log_probs=True) 42 | lprobs = lprobs.view(-1, lprobs.size(-1)) 43 | target = model.get_targets(sample, net_output).view(-1) 44 | loss = F.nll_loss( 45 | lprobs, 46 | target, 47 | ignore_index=self.padding_idx, 48 | reduction='sum' if reduce else 'none', 49 | ) 50 | return loss, loss 51 | 52 | @staticmethod 53 | def aggregate_logging_outputs(logging_outputs): 54 | """Aggregate logging outputs from data parallel training.""" 55 | loss_sum = sum(log.get('loss', 0) for log in logging_outputs) 56 | ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) 57 | nsentences = sum(log.get('nsentences', 0) for log in logging_outputs) 58 | sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) 59 | agg_output = { 60 | 'loss': loss_sum / sample_size / math.log(2) if sample_size > 0 else 0., 61 | 'ntokens': ntokens, 62 | 'nsentences': nsentences, 63 | 'sample_size': sample_size, 64 | } 65 | if sample_size != ntokens: 66 | agg_output['nll_loss'] = loss_sum / ntokens / math.log(2) 67 | return agg_output 68 | -------------------------------------------------------------------------------- /fairseq/criterions/fairseq_criterion.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from torch.nn.modules.loss import _Loss 7 | 8 | 9 | class FairseqCriterion(_Loss): 10 | 11 | def __init__(self, args, task): 12 | super().__init__() 13 | self.args = args 14 | self.task = task 15 | self.padding_idx = task.target_dictionary.pad() if task.target_dictionary is not None else -100 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add criterion-specific arguments to the parser.""" 20 | pass 21 | 22 | @classmethod 23 | def build_criterion(cls, args, task): 24 | return cls(args, task) 25 | 26 | def forward(self, model, sample, reduce=True): 27 | """Compute the loss for the given sample. 28 | 29 | Returns a tuple with three elements: 30 | 1) the loss 31 | 2) the sample size, which is used as the denominator for the gradient 32 | 3) logging outputs to display while training 33 | """ 34 | raise NotImplementedError 35 | 36 | @staticmethod 37 | def aggregate_logging_outputs(logging_outputs): 38 | """Aggregate logging outputs from data parallel training.""" 39 | raise NotImplementedError 40 | 41 | @staticmethod 42 | def grad_denom(sample_sizes): 43 | """Compute the gradient denominator for a set of sample sizes.""" 44 | return sum(sample_sizes) 45 | -------------------------------------------------------------------------------- /fairseq/criterions/masked_lm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import math 7 | 8 | import torch 9 | import torch.nn.functional as F 10 | 11 | from fairseq import utils 12 | 13 | from . import FairseqCriterion, register_criterion 14 | 15 | 16 | @register_criterion('masked_lm') 17 | class MaskedLmLoss(FairseqCriterion): 18 | """ 19 | Implementation for the loss used in masked language model (MLM) training. 20 | """ 21 | 22 | def __init__(self, args, task): 23 | super().__init__(args, task) 24 | 25 | def forward(self, model, sample, reduce=True): 26 | """Compute the loss for the given sample. 27 | Returns a tuple with three elements: 28 | 1) the loss 29 | 2) the sample size, which is used as the denominator for the gradient 30 | 3) logging outputs to display while training 31 | """ 32 | # compute MLM loss 33 | masked_tokens = sample['target'].ne(self.padding_idx) 34 | sample_size = masked_tokens.int().sum().item() 35 | 36 | # (Rare case) When all tokens are masked, the model results in empty 37 | # tensor and gives CUDA error. 38 | if sample_size == 0: 39 | masked_tokens = None 40 | 41 | logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0] 42 | targets = model.get_targets(sample, [logits]) 43 | 44 | if sample_size != 0: 45 | targets = targets[masked_tokens] 46 | 47 | loss = F.nll_loss( 48 | F.log_softmax( 49 | logits.view(-1, logits.size(-1)), 50 | dim=-1, 51 | dtype=torch.float32, 52 | ), 53 | targets.view(-1), 54 | reduction='sum', 55 | ignore_index=self.padding_idx, 56 | ) 57 | logging_output = { 58 | 'loss': utils.item(loss.data) if reduce else loss.data, 59 | 'nll_loss': utils.item(loss.data) if reduce else loss.data, 60 | 'ntokens': sample['ntokens'], 61 | 'nsentences': sample['nsentences'], 62 | 'sample_size': sample_size, 63 | } 64 | return loss, sample_size, logging_output 65 | 66 | @staticmethod 67 | def aggregate_logging_outputs(logging_outputs): 68 | """Aggregate logging outputs from data parallel training.""" 69 | loss = sum(log.get('loss', 0) for log in logging_outputs) 70 | ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) 71 | nsentences = sum(log.get('nsentences', 0) for log in logging_outputs) 72 | sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) 73 | 74 | agg_output = { 75 | 'loss': loss / sample_size / math.log(2), 76 | 'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if ntokens > 0 else 0., 77 | 'ntokens': ntokens, 78 | 'nsentences': nsentences, 79 | 'sample_size': sample_size, 80 | } 81 | return agg_output 82 | -------------------------------------------------------------------------------- /fairseq/data/append_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class AppendTokenDataset(BaseWrapperDataset): 13 | 14 | def __init__(self, dataset, token=None): 15 | super().__init__(dataset) 16 | self.token = token 17 | if token is not None: 18 | self._sizes = np.array(dataset.sizes) + 1 19 | else: 20 | self._sizes = dataset.sizes 21 | 22 | def __getitem__(self, idx): 23 | item = self.dataset[idx] 24 | if self.token is not None: 25 | item = torch.cat([item, item.new([self.token])]) 26 | return item 27 | 28 | @property 29 | def sizes(self): 30 | return self._sizes 31 | 32 | def num_tokens(self, index): 33 | n = self.dataset.num_tokens(index) 34 | if self.token is not None: 35 | n += 1 36 | return n 37 | 38 | def size(self, index): 39 | n = self.dataset.size(index) 40 | if self.token is not None: 41 | n += 1 42 | return n 43 | -------------------------------------------------------------------------------- /fairseq/data/audio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yongchanghao/multi-task-nat/b9bf3bd82caa96c75d1291900cf2d6ad5f08a2ac/fairseq/data/audio/__init__.py -------------------------------------------------------------------------------- /fairseq/data/base_wrapper_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from torch.utils.data.dataloader import default_collate 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class BaseWrapperDataset(FairseqDataset): 12 | 13 | def __init__(self, dataset): 14 | super().__init__() 15 | self.dataset = dataset 16 | 17 | def __getitem__(self, index): 18 | return self.dataset[index] 19 | 20 | def __len__(self): 21 | return len(self.dataset) 22 | 23 | def collater(self, samples): 24 | if hasattr(self.dataset, 'collater'): 25 | return self.dataset.collater(samples) 26 | else: 27 | return default_collate(samples) 28 | 29 | @property 30 | def sizes(self): 31 | return self.dataset.sizes 32 | 33 | def num_tokens(self, index): 34 | return self.dataset.num_tokens(index) 35 | 36 | def size(self, index): 37 | return self.dataset.size(index) 38 | 39 | def ordered_indices(self): 40 | return self.dataset.ordered_indices() 41 | 42 | @property 43 | def supports_prefetch(self): 44 | return getattr(self.dataset, 'supports_prefetch', False) 45 | 46 | def prefetch(self, indices): 47 | self.dataset.prefetch(indices) 48 | 49 | def set_epoch(self, epoch): 50 | super().set_epoch(epoch) 51 | if hasattr(self.dataset, 'set_epoch'): 52 | self.dataset.set_epoch(epoch) 53 | -------------------------------------------------------------------------------- /fairseq/data/colorize_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class ColorizeDataset(BaseWrapperDataset): 12 | """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """ 13 | def __init__(self, dataset, color_getter): 14 | super().__init__(dataset) 15 | self.color_getter = color_getter 16 | 17 | def collater(self, samples): 18 | base_collate = super().collater(samples) 19 | if len(base_collate) > 0: 20 | base_collate["net_input"]["colors"] = torch.tensor( 21 | list(self.color_getter(self.dataset, s["id"]) for s in samples), 22 | dtype=torch.long, 23 | ) 24 | return base_collate 25 | -------------------------------------------------------------------------------- /fairseq/data/concat_sentences_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class ConcatSentencesDataset(FairseqDataset): 12 | 13 | def __init__(self, *datasets): 14 | super().__init__() 15 | self.datasets = datasets 16 | assert all(len(ds) == len(datasets[0]) for ds in datasets), \ 17 | 'datasets must have the same length' 18 | 19 | def __getitem__(self, index): 20 | return torch.cat([ds[index] for ds in self.datasets]) 21 | 22 | def __len__(self): 23 | return len(self.datasets[0]) 24 | 25 | def collater(self, samples): 26 | return self.datasets[0].collater(samples) 27 | 28 | @property 29 | def sizes(self): 30 | return sum(ds.sizes for ds in self.datasets) 31 | 32 | def num_tokens(self, index): 33 | return sum(ds.num_tokens(index) for ds in self.datasets) 34 | 35 | def size(self, index): 36 | return sum(ds.size(index) for ds in self.datasets) 37 | 38 | def ordered_indices(self): 39 | return self.datasets[0].ordered_indices() 40 | 41 | @property 42 | def supports_prefetch(self): 43 | return any( 44 | getattr(ds, 'supports_prefetch', False) for ds in self.datasets 45 | ) 46 | 47 | def prefetch(self, indices): 48 | for ds in self.datasets: 49 | if getattr(ds, 'supports_prefetch', False): 50 | ds.prefetch(indices) 51 | 52 | def set_epoch(self, epoch): 53 | super().set_epoch(epoch) 54 | for ds in self.datasets: 55 | if hasattr(ds, 'set_epoch'): 56 | ds.set_epoch(epoch) 57 | -------------------------------------------------------------------------------- /fairseq/data/data_utils_fast.pyx: -------------------------------------------------------------------------------- 1 | # cython: language_level=3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import numpy as np 8 | 9 | cimport cython 10 | cimport numpy as np 11 | 12 | DTYPE = np.int64 13 | ctypedef np.int64_t DTYPE_t 14 | 15 | 16 | cdef _is_batch_full(list batch, long num_tokens, long max_tokens, long max_sentences): 17 | if len(batch) == 0: 18 | return 0 19 | if max_sentences > 0 and len(batch) == max_sentences: 20 | return 1 21 | if max_tokens > 0 and num_tokens > max_tokens: 22 | return 1 23 | return 0 24 | 25 | 26 | @cython.cdivision(True) 27 | cpdef list batch_by_size_fast( 28 | np.ndarray[DTYPE_t, ndim=1] indices, 29 | num_tokens_fn, 30 | long max_tokens, 31 | long max_sentences, 32 | int bsz_mult, 33 | ): 34 | cdef long sample_len = 0 35 | cdef list sample_lens = [] 36 | cdef list batch = [] 37 | cdef list batches = [] 38 | cdef long mod_len 39 | cdef long i 40 | cdef long idx 41 | cdef long num_tokens 42 | cdef DTYPE_t[:] indices_view = indices 43 | 44 | for i in range(len(indices_view)): 45 | idx = indices_view[i] 46 | num_tokens = num_tokens_fn(idx) 47 | sample_lens.append(num_tokens) 48 | sample_len = max(sample_len, num_tokens) 49 | 50 | assert max_tokens <= 0 or sample_len <= max_tokens, ( 51 | "sentence at index {} of size {} exceeds max_tokens " 52 | "limit of {}!".format(idx, sample_len, max_tokens) 53 | ) 54 | num_tokens = (len(batch) + 1) * sample_len 55 | 56 | if _is_batch_full(batch, num_tokens, max_tokens, max_sentences): 57 | mod_len = max( 58 | bsz_mult * (len(batch) // bsz_mult), 59 | len(batch) % bsz_mult, 60 | ) 61 | batches.append(batch[:mod_len]) 62 | batch = batch[mod_len:] 63 | sample_lens = sample_lens[mod_len:] 64 | sample_len = max(sample_lens) if len(sample_lens) > 0 else 0 65 | batch.append(idx) 66 | if len(batch) > 0: 67 | batches.append(batch) 68 | return batches 69 | -------------------------------------------------------------------------------- /fairseq/data/encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | 12 | 13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY = registry.setup_registry( 14 | '--tokenizer', 15 | default=None, 16 | ) 17 | 18 | 19 | build_bpe, register_bpe, BPE_REGISTRY = registry.setup_registry( 20 | '--bpe', 21 | default=None, 22 | ) 23 | 24 | 25 | # automatically import any Python files in the encoders/ directory 26 | for file in os.listdir(os.path.dirname(__file__)): 27 | if file.endswith('.py') and not file.startswith('_'): 28 | module = file[:file.find('.py')] 29 | importlib.import_module('fairseq.data.encoders.' + module) 30 | -------------------------------------------------------------------------------- /fairseq/data/encoders/fastbpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('fastbpe') 11 | class fastBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--bpe-codes', type=str, 17 | help='path to fastBPE BPE') 18 | # fmt: on 19 | 20 | def __init__(self, args): 21 | if args.bpe_codes is None: 22 | raise ValueError('--bpe-codes is required for --bpe=subword_nmt') 23 | codes = file_utils.cached_path(args.bpe_codes) 24 | try: 25 | import fastBPE 26 | self.bpe = fastBPE.fastBPE(codes) 27 | self.bpe_symbol = "@@ " 28 | except ImportError: 29 | raise ImportError('Please install fastBPE with: pip install fastBPE') 30 | 31 | def encode(self, x: str) -> str: 32 | return self.bpe.apply([x])[0] 33 | 34 | def decode(self, x: str) -> str: 35 | return (x + ' ').replace(self.bpe_symbol, '').rstrip() 36 | -------------------------------------------------------------------------------- /fairseq/data/encoders/gpt2_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | from .gpt2_bpe_utils import get_encoder 10 | 11 | 12 | DEFAULT_ENCODER_JSON = 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' 13 | DEFAULT_VOCAB_BPE = 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' 14 | 15 | 16 | @register_bpe('gpt2') 17 | class GPT2BPE(object): 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | # fmt: off 22 | parser.add_argument('--gpt2-encoder-json', type=str, 23 | default=DEFAULT_ENCODER_JSON, 24 | help='path to encoder.json') 25 | parser.add_argument('--gpt2-vocab-bpe', type=str, 26 | default=DEFAULT_VOCAB_BPE, 27 | help='path to vocab.bpe') 28 | # fmt: on 29 | 30 | def __init__(self, args): 31 | encoder_json = file_utils.cached_path( 32 | getattr(args, 'gpt2_encoder_json', DEFAULT_ENCODER_JSON) 33 | ) 34 | vocab_bpe = file_utils.cached_path( 35 | getattr(args, 'gpt2_vocab_bpe', DEFAULT_VOCAB_BPE) 36 | ) 37 | self.bpe = get_encoder(encoder_json, vocab_bpe) 38 | 39 | def encode(self, x: str) -> str: 40 | return ' '.join(map(str, self.bpe.encode(x))) 41 | 42 | def decode(self, x: str) -> str: 43 | return self.bpe.decode([ 44 | int(tok) if tok not in {''} else tok 45 | for tok in x.split() 46 | ]) 47 | 48 | def is_beginning_of_word(self, x: str) -> bool: 49 | return self.decode(x).startswith(' ') 50 | -------------------------------------------------------------------------------- /fairseq/data/encoders/hf_bert_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_bpe 7 | 8 | 9 | @register_bpe('bert') 10 | class BertBPE(object): 11 | 12 | @staticmethod 13 | def add_args(parser): 14 | # fmt: off 15 | parser.add_argument('--bpe-cased', action='store_true', 16 | help='set for cased BPE', 17 | default=False) 18 | parser.add_argument('--bpe-vocab-file', type=str, 19 | help='bpe vocab file.') 20 | # fmt: on 21 | 22 | def __init__(self, args): 23 | try: 24 | from pytorch_transformers import BertTokenizer 25 | from pytorch_transformers.tokenization_utils import clean_up_tokenization 26 | except ImportError: 27 | raise ImportError( 28 | 'Please install 1.0.0 version of pytorch_transformers' 29 | 'with: pip install pytorch-transformers' 30 | ) 31 | 32 | if 'bpe_vocab_file' in args: 33 | self.bert_tokenizer = BertTokenizer( 34 | args.bpe_vocab_file, 35 | do_lower_case=not args.bpe_cased 36 | ) 37 | else: 38 | vocab_file_name = 'bert-base-cased' if args.bpe_cased else 'bert-base-uncased' 39 | self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name) 40 | self.clean_up_tokenization = clean_up_tokenization 41 | 42 | def encode(self, x: str) -> str: 43 | return ' '.join(self.bert_tokenizer.tokenize(x)) 44 | 45 | def decode(self, x: str) -> str: 46 | return self.clean_up_tokenization( 47 | self.bert_tokenizer.convert_tokens_to_string(x.split(' ')) 48 | ) 49 | 50 | def is_beginning_of_word(self, x: str) -> bool: 51 | return not x.startswith('##') 52 | -------------------------------------------------------------------------------- /fairseq/data/encoders/moses_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_tokenizer 7 | 8 | 9 | @register_tokenizer('moses') 10 | class MosesTokenizer(object): 11 | 12 | @staticmethod 13 | def add_args(parser): 14 | # fmt: off 15 | parser.add_argument('--moses-source-lang', metavar='SRC', 16 | help='source language') 17 | parser.add_argument('--moses-target-lang', metavar='TARGET', 18 | help='target language') 19 | parser.add_argument('--moses-no-dash-splits', action='store_true', default=False, 20 | help='don\'t apply dash split rules') 21 | parser.add_argument('--moses-no-escape', action='store_true', default=False, 22 | help='don\'t perform HTML escaping on apostrophy, quotes, etc.') 23 | # fmt: on 24 | 25 | def __init__(self, args): 26 | self.args = args 27 | 28 | if getattr(args, 'moses_source_lang', None) is None: 29 | args.moses_source_lang = getattr(args, 'source_lang', 'en') 30 | if getattr(args, 'moses_target_lang', None) is None: 31 | args.moses_target_lang = getattr(args, 'target_lang', 'en') 32 | 33 | try: 34 | from sacremoses import MosesTokenizer, MosesDetokenizer 35 | self.tok = MosesTokenizer(args.moses_source_lang) 36 | self.detok = MosesDetokenizer(args.moses_target_lang) 37 | except ImportError: 38 | raise ImportError('Please install Moses tokenizer with: pip install sacremoses') 39 | 40 | def encode(self, x: str) -> str: 41 | return self.tok.tokenize( 42 | x, 43 | aggressive_dash_splits=(not self.args.moses_no_dash_splits), 44 | return_str=True, 45 | escape=(not self.args.moses_no_escape), 46 | ) 47 | 48 | def decode(self, x: str) -> str: 49 | return self.detok.detokenize(x.split()) 50 | -------------------------------------------------------------------------------- /fairseq/data/encoders/nltk_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_tokenizer 7 | 8 | 9 | @register_tokenizer('nltk') 10 | class NLTKTokenizer(object): 11 | 12 | def __init__(self, source_lang=None, target_lang=None): 13 | try: 14 | from nltk.tokenize import word_tokenize 15 | self.word_tokenize = word_tokenize 16 | except ImportError: 17 | raise ImportError('Please install nltk with: pip install nltk') 18 | 19 | def encode(self, x: str) -> str: 20 | return ' '.join(self.word_tokenize(x)) 21 | 22 | def decode(self, x: str) -> str: 23 | return x 24 | -------------------------------------------------------------------------------- /fairseq/data/encoders/sentencepiece_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('sentencepiece') 11 | class SentencepieceBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--sentencepiece-vocab', type=str, 17 | help='path to sentencepiece vocab') 18 | # fmt: on 19 | 20 | def __init__(self, args): 21 | vocab = file_utils.cached_path(args.sentencepiece_vocab) 22 | try: 23 | import sentencepiece as spm 24 | self.sp = spm.SentencePieceProcessor() 25 | self.sp.Load(vocab) 26 | except ImportError: 27 | raise ImportError('Please install sentencepiece with: pip install sentencepiece') 28 | 29 | def encode(self, x: str) -> str: 30 | return ' '.join(self.sp.EncodeAsPieces(x)) 31 | 32 | def decode(self, x: str) -> str: 33 | return x.replace(' ', '').replace('\u2581', ' ').strip() 34 | 35 | def is_beginning_of_word(self, x: str) -> bool: 36 | if x in ['', '', '', '']: 37 | # special elements are always considered beginnings 38 | # HACK: this logic is already present in fairseq/tasks/masked_lm.py 39 | # but these special tokens are also contained in the sentencepiece 40 | # vocabulary which causes duplicate special tokens. This hack makes 41 | # sure that they are all taken into account. 42 | return True 43 | return x.startswith('\u2581') 44 | -------------------------------------------------------------------------------- /fairseq/data/encoders/space_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | from fairseq.data.encoders import register_tokenizer 9 | 10 | 11 | @register_tokenizer('space') 12 | class SpaceTokenizer(object): 13 | 14 | def __init__(self, source_lang=None, target_lang=None): 15 | self.space_tok = re.compile(r"\s+") 16 | 17 | def encode(self, x: str) -> str: 18 | return self.space_tok.sub(' ', x) 19 | 20 | def decode(self, x: str) -> str: 21 | return x 22 | -------------------------------------------------------------------------------- /fairseq/data/encoders/subword_nmt_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('subword_nmt') 11 | class SubwordNMTBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--bpe-codes', type=str, 17 | help='path to subword NMT BPE') 18 | parser.add_argument('--bpe-separator', default='@@', 19 | help='BPE separator') 20 | # fmt: on 21 | 22 | def __init__(self, args): 23 | if args.bpe_codes is None: 24 | raise ValueError('--bpe-codes is required for --bpe=subword_nmt') 25 | codes = file_utils.cached_path(args.bpe_codes) 26 | try: 27 | from subword_nmt import apply_bpe 28 | bpe_parser = apply_bpe.create_parser() 29 | bpe_args = bpe_parser.parse_args([ 30 | '--codes', codes, 31 | '--separator', args.bpe_separator, 32 | ]) 33 | self.bpe = apply_bpe.BPE( 34 | bpe_args.codes, 35 | bpe_args.merges, 36 | bpe_args.separator, 37 | None, 38 | bpe_args.glossaries, 39 | ) 40 | self.bpe_symbol = bpe_args.separator + ' ' 41 | except ImportError: 42 | raise ImportError('Please install subword_nmt with: pip install subword-nmt') 43 | 44 | def encode(self, x: str) -> str: 45 | return self.bpe.process_line(x) 46 | 47 | def decode(self, x: str) -> str: 48 | return (x + ' ').replace(self.bpe_symbol, '').rstrip() 49 | -------------------------------------------------------------------------------- /fairseq/data/encoders/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from fairseq.data import encoders 8 | 9 | 10 | def get_whole_word_mask(args, dictionary): 11 | bpe = encoders.build_bpe(args) 12 | if bpe is not None: 13 | def is_beginning_of_word(i): 14 | if i < dictionary.nspecial: 15 | # special elements are always considered beginnings 16 | return True 17 | tok = dictionary[i] 18 | if tok.startswith('madeupword'): 19 | return True 20 | try: 21 | return bpe.is_beginning_of_word(tok) 22 | except ValueError: 23 | return True 24 | mask_whole_words = torch.ByteTensor(list( 25 | map(is_beginning_of_word, range(len(dictionary))) 26 | )) 27 | return mask_whole_words 28 | return None 29 | -------------------------------------------------------------------------------- /fairseq/data/fairseq_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch.utils.data 8 | 9 | 10 | class EpochListening: 11 | """Mixin for receiving updates whenever the epoch increments.""" 12 | def set_epoch(self, epoch): 13 | """Will receive the updated epoch number at the beginning of the epoch. 14 | """ 15 | pass 16 | 17 | 18 | class FairseqDataset(torch.utils.data.Dataset, EpochListening): 19 | """A dataset that provides helpers for batching.""" 20 | 21 | def __getitem__(self, index): 22 | raise NotImplementedError 23 | 24 | def __len__(self): 25 | raise NotImplementedError 26 | 27 | def collater(self, samples): 28 | """Merge a list of samples to form a mini-batch. 29 | 30 | Args: 31 | samples (List[dict]): samples to collate 32 | 33 | Returns: 34 | dict: a mini-batch suitable for forwarding with a Model 35 | """ 36 | raise NotImplementedError 37 | 38 | def num_tokens(self, index): 39 | """Return the number of tokens in a sample. This value is used to 40 | enforce ``--max-tokens`` during batching.""" 41 | raise NotImplementedError 42 | 43 | def size(self, index): 44 | """Return an example's size as a float or tuple. This value is used when 45 | filtering a dataset with ``--max-positions``.""" 46 | raise NotImplementedError 47 | 48 | def ordered_indices(self): 49 | """Return an ordered list of indices. Batches will be constructed based 50 | on this order.""" 51 | return np.arange(len(self)) 52 | 53 | @property 54 | def supports_prefetch(self): 55 | """Whether this dataset supports prefetching.""" 56 | return False 57 | 58 | def attr(self, attr: str, index: int): 59 | return getattr(self, attr, None) 60 | 61 | def prefetch(self, indices): 62 | """Prefetch the data required for this epoch.""" 63 | raise NotImplementedError 64 | 65 | 66 | class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening): 67 | """For datasets that need to be read sequentially, usually because the data 68 | is being streamed or otherwise can't be manipulated on a single machine. 69 | """ 70 | 71 | def __iter__(self): 72 | raise NotImplementedError 73 | -------------------------------------------------------------------------------- /fairseq/data/id_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class IdDataset(FairseqDataset): 12 | 13 | def __getitem__(self, index): 14 | return index 15 | 16 | def __len__(self): 17 | return 0 18 | 19 | def collater(self, samples): 20 | return torch.tensor(samples) 21 | -------------------------------------------------------------------------------- /fairseq/data/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary 7 | from .block_pair_dataset import BlockPairDataset 8 | from .masked_lm_dataset import MaskedLMDataset 9 | 10 | __all__ = [ 11 | 'BertDictionary', 12 | 'BlockPairDataset', 13 | 'MaskedLMDataset', 14 | 'MaskedLMDictionary', 15 | ] 16 | -------------------------------------------------------------------------------- /fairseq/data/legacy/masked_lm_dictionary.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import Dictionary 7 | 8 | 9 | class MaskedLMDictionary(Dictionary): 10 | """ 11 | Dictionary for Masked Language Modelling tasks. This extends Dictionary by 12 | adding the mask symbol. 13 | """ 14 | def __init__( 15 | self, 16 | pad='', 17 | eos='', 18 | unk='', 19 | mask='', 20 | ): 21 | super().__init__(pad, eos, unk) 22 | self.mask_word = mask 23 | self.mask_index = self.add_symbol(mask) 24 | self.nspecial = len(self.symbols) 25 | 26 | def mask(self): 27 | """Helper to get index of mask symbol""" 28 | return self.mask_index 29 | 30 | 31 | class BertDictionary(MaskedLMDictionary): 32 | """ 33 | Dictionary for BERT task. This extends MaskedLMDictionary by adding support 34 | for cls and sep symbols. 35 | """ 36 | def __init__( 37 | self, 38 | pad='', 39 | eos='', 40 | unk='', 41 | mask='', 42 | cls='', 43 | sep='' 44 | ): 45 | super().__init__(pad, eos, unk, mask) 46 | self.cls_word = cls 47 | self.sep_word = sep 48 | self.cls_index = self.add_symbol(cls) 49 | self.sep_index = self.add_symbol(sep) 50 | self.nspecial = len(self.symbols) 51 | 52 | def cls(self): 53 | """Helper to get index of cls symbol""" 54 | return self.cls_index 55 | 56 | def sep(self): 57 | """Helper to get index of sep symbol""" 58 | return self.sep_index 59 | -------------------------------------------------------------------------------- /fairseq/data/list_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ListDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, sizes=None): 12 | super().__init__(dataset) 13 | self._sizes = sizes 14 | 15 | def __iter__(self): 16 | for x in self.dataset: 17 | yield x 18 | 19 | def collater(self, samples): 20 | return samples 21 | 22 | @property 23 | def sizes(self): 24 | return self._sizes 25 | 26 | def num_tokens(self, index): 27 | return self.sizes[index] 28 | 29 | def size(self, index): 30 | return self.sizes[index] 31 | 32 | def set_epoch(self, epoch): 33 | pass 34 | -------------------------------------------------------------------------------- /fairseq/data/lm_context_window_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from fairseq.data.monolingual_dataset import MonolingualDataset 10 | 11 | from . import FairseqDataset 12 | 13 | 14 | class LMContextWindowDataset(FairseqDataset): 15 | """Wraps a MonolingualDataset and provides more context for evaluation.""" 16 | 17 | def __init__(self, dataset, tokens_per_sample, context_window, pad_idx): 18 | assert isinstance(dataset, MonolingualDataset) 19 | assert context_window > 0 20 | self.dataset = dataset 21 | self.tokens_per_sample = tokens_per_sample 22 | self.context_window = context_window 23 | self.pad_idx = pad_idx 24 | self.prev_tokens = np.empty([0]) 25 | 26 | def __getitem__(self, index): 27 | return self.dataset[index] 28 | 29 | def __len__(self): 30 | return len(self.dataset) 31 | 32 | def collater(self, samples): 33 | sample = self.dataset.collater(samples) 34 | 35 | pad = self.pad_idx 36 | max_sample_len = self.tokens_per_sample + self.context_window 37 | 38 | bsz, tsz = sample['net_input']['src_tokens'].shape 39 | start_idxs = [0] * bsz 40 | toks = sample['net_input']['src_tokens'] 41 | lengths = sample['net_input']['src_lengths'] 42 | tgt = sample['target'] 43 | new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64) 44 | new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64) 45 | sample_lens = toks.ne(pad).long().sum(dim=1).cpu() 46 | for i in range(bsz): 47 | sample_len = sample_lens[i] 48 | extra = len(self.prev_tokens) + sample_len - max_sample_len 49 | if extra > 0: 50 | self.prev_tokens = self.prev_tokens[extra:] 51 | pads = np.full(self.context_window - len(self.prev_tokens), pad) 52 | new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads]) 53 | new_tgt[i, len(self.prev_tokens):len(self.prev_tokens) + len(tgt[i])] = tgt[i] 54 | start_idxs[i] = len(self.prev_tokens) 55 | lengths[i] += len(self.prev_tokens) 56 | self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window:] 57 | sample['net_input']['src_tokens'] = torch.from_numpy(new_toks) 58 | sample['target'] = torch.from_numpy(new_tgt) 59 | sample['start_indices'] = start_idxs 60 | 61 | return sample 62 | 63 | def num_tokens(self, index): 64 | return self.dataset.num_tokens(index) 65 | 66 | def size(self, index): 67 | return self.dataset.size(index) 68 | 69 | def ordered_indices(self): 70 | # NOTE we don't shuffle the data to retain access to the previous dataset elements 71 | return np.arange(len(self.dataset)) 72 | 73 | @property 74 | def supports_prefetch(self): 75 | return getattr(self.dataset, 'supports_prefetch', False) 76 | 77 | def prefetch(self, indices): 78 | return self.dataset.prefetch(indices) 79 | -------------------------------------------------------------------------------- /fairseq/data/lru_cache_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from functools import lru_cache 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class LRUCacheDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, token=None): 14 | super().__init__(dataset) 15 | 16 | @lru_cache(maxsize=8) 17 | def __getitem__(self, index): 18 | return self.dataset[index] 19 | 20 | @lru_cache(maxsize=8) 21 | def collater(self, samples): 22 | return self.dataset.collater(samples) 23 | -------------------------------------------------------------------------------- /fairseq/data/num_samples_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqDataset 7 | 8 | 9 | class NumSamplesDataset(FairseqDataset): 10 | 11 | def __getitem__(self, index): 12 | return 1 13 | 14 | def __len__(self): 15 | return 0 16 | 17 | def collater(self, samples): 18 | return sum(samples) 19 | -------------------------------------------------------------------------------- /fairseq/data/numel_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class NumelDataset(BaseWrapperDataset): 13 | 14 | def __init__(self, dataset, reduce=False): 15 | super().__init__(dataset) 16 | self.reduce = reduce 17 | 18 | def __getitem__(self, index): 19 | item = self.dataset[index] 20 | if torch.is_tensor(item): 21 | return torch.numel(item) 22 | else: 23 | return np.size(item) 24 | 25 | def __len__(self): 26 | return len(self.dataset) 27 | 28 | def collater(self, samples): 29 | if self.reduce: 30 | return sum(samples) 31 | else: 32 | return torch.tensor(samples) 33 | -------------------------------------------------------------------------------- /fairseq/data/offset_tokens_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class OffsetTokensDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, offset): 12 | super().__init__(dataset) 13 | self.offset = offset 14 | 15 | def __getitem__(self, idx): 16 | return self.dataset[idx] + self.offset 17 | -------------------------------------------------------------------------------- /fairseq/data/pad_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import data_utils 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class PadDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, pad_idx, left_pad): 14 | super().__init__(dataset) 15 | self.pad_idx = pad_idx 16 | self.left_pad = left_pad 17 | 18 | def collater(self, samples): 19 | return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) 20 | 21 | 22 | class LeftPadDataset(PadDataset): 23 | 24 | def __init__(self, dataset, pad_idx): 25 | super().__init__(dataset, pad_idx, left_pad=True) 26 | 27 | 28 | class RightPadDataset(PadDataset): 29 | 30 | def __init__(self, dataset, pad_idx): 31 | super().__init__(dataset, pad_idx, left_pad=False) 32 | -------------------------------------------------------------------------------- /fairseq/data/plasma_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import subprocess 7 | import tempfile 8 | 9 | 10 | class PlasmaArray(object): 11 | """ 12 | Wrapper around numpy arrays that automatically moves the data to shared 13 | memory upon serialization. This is particularly helpful when passing numpy 14 | arrays through multiprocessing, so that data is not unnecessarily 15 | duplicated or pickled. 16 | """ 17 | 18 | def __init__(self, array): 19 | super().__init__() 20 | self.array = array 21 | self.disable = array.nbytes < 134217728 # disable for arrays <128MB 22 | self.object_id = None 23 | self.path = None 24 | 25 | # variables with underscores shouldn't be pickled 26 | self._client = None 27 | self._server = None 28 | self._server_tmp = None 29 | self._plasma = None 30 | 31 | @property 32 | def plasma(self): 33 | if self._plasma is None and not self.disable: 34 | try: 35 | import pyarrow.plasma as plasma 36 | self._plasma = plasma 37 | except ImportError: 38 | self._plasma = None 39 | return self._plasma 40 | 41 | def start_server(self): 42 | if self.plasma is None or self._server is not None: 43 | return 44 | assert self.object_id is None 45 | assert self.path is None 46 | self._server_tmp = tempfile.NamedTemporaryFile() 47 | self.path = self._server_tmp.name 48 | self._server = subprocess.Popen([ 49 | 'plasma_store', 50 | '-m', str(int(1.05 * self.array.nbytes)), 51 | '-s', self.path, 52 | ]) 53 | 54 | @property 55 | def client(self): 56 | if self._client is None: 57 | assert self.path is not None 58 | self._client = self.plasma.connect(self.path) 59 | return self._client 60 | 61 | def __getstate__(self): 62 | if self.plasma is None: 63 | return self.__dict__ 64 | if self.object_id is None: 65 | self.start_server() 66 | self.object_id = self.client.put(self.array) 67 | state = self.__dict__.copy() 68 | del state['array'] 69 | state['_client'] = None 70 | state['_server'] = None 71 | state['_server_tmp'] = None 72 | state['_plasma'] = None 73 | return state 74 | 75 | def __setstate__(self, state): 76 | self.__dict__.update(state) 77 | if self.plasma is None: 78 | return 79 | self.array = self.client.get(self.object_id) 80 | 81 | def __del__(self): 82 | if self._server is not None: 83 | self._server.kill() 84 | self._server = None 85 | self._server_tmp.close() 86 | self._server_tmp = None 87 | -------------------------------------------------------------------------------- /fairseq/data/prepend_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): 14 | super().__init__(dataset) 15 | self.prepend_getter = prepend_getter 16 | self.ensure_first_token = ensure_first_token_is 17 | 18 | def __getitem__(self, idx): 19 | item = self.dataset[idx] 20 | is_tuple = isinstance(item, tuple) 21 | src = item[0] if is_tuple else item 22 | 23 | assert self.ensure_first_token is None or src[0] == self.ensure_first_token 24 | prepend_idx = self.prepend_getter(self.dataset, idx) 25 | assert isinstance(prepend_idx, int) 26 | src[0] = prepend_idx 27 | item = tuple((src,) + item[1:]) if is_tuple else src 28 | return item 29 | -------------------------------------------------------------------------------- /fairseq/data/prepend_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependTokenDataset(BaseWrapperDataset): 13 | 14 | def __init__(self, dataset, token=None): 15 | super().__init__(dataset) 16 | self.token = token 17 | if token is not None: 18 | self._sizes = np.array(dataset.sizes) + 1 19 | else: 20 | self._sizes = dataset.sizes 21 | 22 | def __getitem__(self, idx): 23 | item = self.dataset[idx] 24 | if self.token is not None: 25 | item = torch.cat([item.new([self.token]), item]) 26 | return item 27 | 28 | @property 29 | def sizes(self): 30 | return self._sizes 31 | 32 | def num_tokens(self, index): 33 | n = self.dataset.num_tokens(index) 34 | if self.token is not None: 35 | n += 1 36 | return n 37 | 38 | def size(self, index): 39 | n = self.dataset.size(index) 40 | if self.token is not None: 41 | n += 1 42 | return n 43 | -------------------------------------------------------------------------------- /fairseq/data/raw_label_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class RawLabelDataset(FairseqDataset): 12 | 13 | def __init__(self, labels): 14 | super().__init__() 15 | self.labels = labels 16 | 17 | def __getitem__(self, index): 18 | return self.labels[index] 19 | 20 | def __len__(self): 21 | return len(self.labels) 22 | 23 | def collater(self, samples): 24 | return torch.tensor(samples) 25 | -------------------------------------------------------------------------------- /fairseq/data/replace_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ReplaceDataset(BaseWrapperDataset): 10 | """Replaces tokens found in the dataset by a specified replacement token 11 | 12 | Args: 13 | dataset (~torch.utils.data.Dataset): dataset to replace tokens in 14 | replace_map(Dictionary[int,int]): map of token to replace -> replacement token 15 | offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be 16 | as many as the number of objects returned by the underlying dataset __getitem__ method. 17 | """ 18 | 19 | def __init__(self, dataset, replace_map, offsets): 20 | super().__init__(dataset) 21 | assert len(replace_map) > 0 22 | self.replace_map = replace_map 23 | self.offsets = offsets 24 | 25 | def __getitem__(self, index): 26 | item = self.dataset[index] 27 | is_tuple = isinstance(item, tuple) 28 | srcs = item if is_tuple else [item] 29 | 30 | for offset, src in zip(self.offsets, srcs): 31 | for k, v in self.replace_map.items(): 32 | src_off = src[offset:] if offset >= 0 else src[:offset] 33 | src_off.masked_fill_(src_off == k, v) 34 | 35 | item = srcs if is_tuple else srcs[0] 36 | return item 37 | -------------------------------------------------------------------------------- /fairseq/data/roll_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class RollDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, shifts): 14 | super().__init__(dataset) 15 | self.shifts = shifts 16 | 17 | def __getitem__(self, index): 18 | item = self.dataset[index] 19 | return torch.roll(item, self.shifts) 20 | -------------------------------------------------------------------------------- /fairseq/data/sharded_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import itertools 7 | import os 8 | import random 9 | 10 | from . import BaseWrapperDataset 11 | from fairseq.data import data_utils 12 | 13 | 14 | class ShardedDataset(BaseWrapperDataset): 15 | """A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS. 16 | 17 | Loads a dataset which has been sharded into multiple files. each shard is only loaded for each specific epoch 18 | 19 | """ 20 | 21 | def __init__( 22 | self, 23 | dictionary, 24 | dataset_impl: str, 25 | path: str, 26 | split: str, 27 | epoch: int, 28 | name: str = None, 29 | combine: bool = False, 30 | seed: int = 0, 31 | ): 32 | self._name = name if name is not None else os.path.basename(path) 33 | num_shards = 0 34 | for i in itertools.count(): 35 | if not os.path.exists(os.path.join(path, "shard" + str(i))): 36 | break 37 | num_shards += 1 38 | 39 | if num_shards > 0 and split == "train": 40 | random.seed(seed ^ epoch) 41 | shard = random.randint(0, num_shards - 1) 42 | split_path = os.path.join(path, "shard" + str(shard), split) 43 | else: 44 | split_path = os.path.join(path, split) 45 | if os.path.isdir(split_path): 46 | split_path = os.path.join(split_path, split) 47 | 48 | dataset = data_utils.load_indexed_dataset( 49 | split_path, dictionary, dataset_impl, combine=combine 50 | ) 51 | if dataset is None: 52 | raise FileNotFoundError( 53 | "Dataset not found: {} ({})".format(split, split_path) 54 | ) 55 | 56 | super().__init__(dataset) 57 | 58 | @property 59 | def name(self): 60 | return self._name 61 | -------------------------------------------------------------------------------- /fairseq/data/sort_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class SortDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, sort_order): 14 | super().__init__(dataset) 15 | if not isinstance(sort_order, (list, tuple)): 16 | sort_order = [sort_order] 17 | self.sort_order = sort_order 18 | 19 | assert all(len(so) == len(dataset) for so in sort_order) 20 | 21 | def ordered_indices(self): 22 | return np.lexsort(self.sort_order) 23 | -------------------------------------------------------------------------------- /fairseq/data/strip_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class StripTokenDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, id_to_strip): 12 | super().__init__(dataset) 13 | self.id_to_strip = id_to_strip 14 | 15 | def __getitem__(self, index): 16 | item = self.dataset[index] 17 | return item[item.ne(self.id_to_strip)] 18 | -------------------------------------------------------------------------------- /fairseq/data/subsample_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class SubsampleDataset(BaseWrapperDataset): 12 | """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples 13 | 14 | Args: 15 | dataset (~torch.utils.data.Dataset): dataset to subsample 16 | size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive) 17 | """ 18 | 19 | def __init__(self, dataset, size_ratio): 20 | super().__init__(dataset) 21 | assert size_ratio < 1 22 | self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int) 23 | self.indices = np.random.choice( 24 | list(range(len(self.dataset))), self.actual_size, replace=False 25 | ) 26 | print( 27 | "subsampled dataset from {} to {} (ratio={})".format( 28 | len(self.dataset), self.actual_size, size_ratio 29 | ) 30 | ) 31 | 32 | def __getitem__(self, index): 33 | return self.dataset[self.indices[index]] 34 | 35 | def __len__(self): 36 | return self.actual_size 37 | 38 | def collater(self, samples): 39 | return self.dataset.collater(samples) 40 | 41 | @property 42 | def sizes(self): 43 | return self.dataset.sizes[self.indices] 44 | 45 | @property 46 | def name(self): 47 | return self.dataset.name 48 | 49 | def num_tokens(self, index): 50 | return self.dataset.num_tokens(self.indices[index]) 51 | 52 | def size(self, index): 53 | return self.dataset.size(self.indices[index]) 54 | 55 | def ordered_indices(self): 56 | """Return an ordered list of indices. Batches will be constructed based 57 | on this order.""" 58 | if self.shuffle: 59 | order = [np.random.permutation(len(self))] 60 | else: 61 | order = [np.arange(len(self))] 62 | order.append(self.sizes) 63 | return np.lexsort(order) 64 | 65 | def prefetch(self, indices): 66 | self.dataset.prefetch(self.indices[indices]) 67 | -------------------------------------------------------------------------------- /fairseq/data/transform_eos_lang_pair_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from . import FairseqDataset 8 | import torch 9 | from typing import Optional 10 | 11 | 12 | class TransformEosLangPairDataset(FairseqDataset): 13 | """A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on 14 | collated samples of language pair dataset. 15 | 16 | Note that the transformation is applied in :func:`collater`. 17 | 18 | Args: 19 | dataset (~fairseq.data.FairseqDataset): dataset that collates sample into 20 | LanguagePairDataset schema 21 | src_eos (int): original source end-of-sentence symbol index to be replaced 22 | new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol 23 | tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced 24 | new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the 25 | beginning of 'prev_output_tokens' 26 | """ 27 | 28 | def __init__( 29 | self, 30 | dataset: FairseqDataset, 31 | src_eos: int, 32 | new_src_eos: Optional[int] = None, 33 | tgt_bos: Optional[int] = None, 34 | new_tgt_bos: Optional[int] = None, 35 | ): 36 | self.dataset = dataset 37 | self.src_eos = src_eos 38 | self.new_src_eos = new_src_eos 39 | self.tgt_bos = tgt_bos 40 | self.new_tgt_bos = new_tgt_bos 41 | 42 | def __getitem__(self, index): 43 | return self.dataset[index] 44 | 45 | def __len__(self): 46 | return len(self.dataset) 47 | 48 | def collater(self, samples): 49 | samples = self.dataset.collater(samples) 50 | 51 | if self.new_src_eos is not None: 52 | if self.dataset.left_pad_source: 53 | assert(samples['net_input']['src_tokens'][:, -1] != self.src_eos).sum() == 0 54 | samples['net_input']['src_tokens'][:, -1] = self.new_src_eos 55 | else: 56 | eos_idx = samples['net_input']['src_lengths'] - 1 57 | assert( 58 | samples['net_input']['src_tokens'][torch.arange(eos_idx.size(0)), eos_idx] != self.src_eos 59 | ).sum() == 0 60 | eos_idx = eos_idx.resize_(len(samples['net_input']['src_lengths']), 1) 61 | samples['net_input']['src_tokens'].scatter_(1, eos_idx, self.new_src_eos) 62 | 63 | if self.new_tgt_bos is not None and 'prev_output_tokens' in samples['net_input']: 64 | if self.dataset.left_pad_target: 65 | # TODO: support different padding direction on target side 66 | raise NotImplementedError( 67 | 'TransformEosLangPairDataset does not implement --left-pad-target True option' 68 | ) 69 | else: 70 | assert (samples['net_input']['prev_output_tokens'][:, 0] != self.tgt_bos).sum() == 0 71 | samples['net_input']['prev_output_tokens'][:, 0] = self.new_tgt_bos 72 | 73 | return samples 74 | 75 | def num_tokens(self, index): 76 | return self.dataset.num_tokens(index) 77 | 78 | def size(self, index): 79 | return self.dataset.size(index) 80 | 81 | def ordered_indices(self): 82 | return self.dataset.ordered_indices() 83 | 84 | @property 85 | def supports_prefetch(self): 86 | return getattr(self.dataset, 'supports_prefetch', False) 87 | 88 | def prefetch(self, indices): 89 | return self.dataset.prefetch(indices) 90 | -------------------------------------------------------------------------------- /fairseq/data/truncate_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class TruncateDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, truncation_length): 14 | super().__init__(dataset) 15 | assert truncation_length is not None 16 | self.truncation_length = truncation_length 17 | self.dataset = dataset 18 | 19 | def __getitem__(self, index): 20 | item = self.dataset[index] 21 | item_len = item.size(0) 22 | if item_len > self.truncation_length: 23 | item = item[:self.truncation_length] 24 | return item 25 | 26 | @property 27 | def sizes(self): 28 | return np.minimum(self.dataset.sizes, self.truncation_length) 29 | 30 | def __len__(self): 31 | return len(self.dataset) 32 | -------------------------------------------------------------------------------- /fairseq/file_io.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import os 9 | import shutil 10 | from typing import List, Optional 11 | 12 | 13 | try: 14 | from fvcore.common.file_io import PathManager as FVCorePathManager 15 | 16 | except (ImportError, ModuleNotFoundError): 17 | FVCorePathManager = None 18 | 19 | 20 | class PathManager: 21 | """ 22 | Wrapper for insulating OSS I/O (using Python builtin operations) from 23 | fvcore's PathManager abstraction (for transparently handling various 24 | internal backends). 25 | """ 26 | 27 | @staticmethod 28 | def open( 29 | path: str, 30 | mode: str = "r", 31 | buffering: int = -1, 32 | encoding: Optional[str] = None, 33 | errors: Optional[str] = None, 34 | newline: Optional[str] = None, 35 | ): 36 | if FVCorePathManager: 37 | return FVCorePathManager.open( 38 | path=path, 39 | mode=mode, 40 | buffering=buffering, 41 | encoding=encoding, 42 | errors=errors, 43 | newline=newline, 44 | ) 45 | return open( 46 | path, 47 | mode=mode, 48 | buffering=buffering, 49 | encoding=encoding, 50 | errors=errors, 51 | newline=newline, 52 | ) 53 | 54 | @staticmethod 55 | def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: 56 | if FVCorePathManager: 57 | return FVCorePathManager.copy( 58 | src_path=src_path, dst_path=dst_path, overwrite=overwrite 59 | ) 60 | return shutil.copyfile(src_path, dst_path) 61 | 62 | @staticmethod 63 | def get_local_path(path: str) -> str: 64 | if FVCorePathManager: 65 | return FVCorePathManager.get_local_path(path) 66 | return path 67 | 68 | @staticmethod 69 | def exists(path: str) -> bool: 70 | if FVCorePathManager: 71 | return FVCorePathManager.exists(path) 72 | return os.path.exists(path) 73 | 74 | @staticmethod 75 | def isfile(path: str) -> bool: 76 | if FVCorePathManager: 77 | return FVCorePathManager.isfile(path) 78 | return os.path.isfile(path) 79 | 80 | @staticmethod 81 | def ls(path: str) -> List[str]: 82 | if FVCorePathManager: 83 | return FVCorePathManager.ls(path) 84 | return os.listdir(path) 85 | 86 | @staticmethod 87 | def mkdirs(path: str) -> None: 88 | if FVCorePathManager: 89 | return FVCorePathManager.mkdirs(path) 90 | os.makedirs(path, exist_ok=True) 91 | 92 | @staticmethod 93 | def rm(path: str) -> None: 94 | if FVCorePathManager: 95 | return FVCorePathManager.rm(path) 96 | os.remove(path) 97 | 98 | @staticmethod 99 | def register_handler(handler) -> None: 100 | if FVCorePathManager: 101 | return FVCorePathManager.register_handler(handler=handler) 102 | -------------------------------------------------------------------------------- /fairseq/meters.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import time 7 | 8 | 9 | class AverageMeter(object): 10 | """Computes and stores the average and current value""" 11 | def __init__(self): 12 | self.reset() 13 | 14 | def reset(self): 15 | self.val = 0 16 | self.avg = 0 17 | self.sum = 0 18 | self.count = 0 19 | 20 | def update(self, val, n=1): 21 | self.val = val 22 | self.sum += val * n 23 | self.count += n 24 | self.avg = self.sum / self.count 25 | 26 | 27 | class TimeMeter(object): 28 | """Computes the average occurrence of some event per second""" 29 | def __init__(self, init=0): 30 | self.reset(init) 31 | 32 | def reset(self, init=0): 33 | self.init = init 34 | self.start = time.time() 35 | self.n = 0 36 | 37 | def update(self, val=1): 38 | self.n += val 39 | 40 | @property 41 | def avg(self): 42 | return self.n / self.elapsed_time 43 | 44 | @property 45 | def elapsed_time(self): 46 | return self.init + (time.time() - self.start) 47 | 48 | 49 | class StopwatchMeter(object): 50 | """Computes the sum/avg duration of some event in seconds""" 51 | def __init__(self): 52 | self.reset() 53 | 54 | def start(self): 55 | self.start_time = time.time() 56 | 57 | def stop(self, n=1): 58 | if self.start_time is not None: 59 | delta = time.time() - self.start_time 60 | self.sum += delta 61 | self.n += n 62 | self.start_time = None 63 | 64 | def reset(self): 65 | self.sum = 0 66 | self.n = 0 67 | self.start_time = None 68 | 69 | @property 70 | def avg(self): 71 | return self.sum / self.n 72 | -------------------------------------------------------------------------------- /fairseq/models/bart/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | -------------------------------------------------------------------------------- /fairseq/models/composite_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.models import FairseqEncoder 7 | 8 | 9 | class CompositeEncoder(FairseqEncoder): 10 | """ 11 | A wrapper around a dictionary of :class:`FairseqEncoder` objects. 12 | 13 | We run forward on each encoder and return a dictionary of outputs. The first 14 | encoder's dictionary is used for initialization. 15 | 16 | Args: 17 | encoders (dict): a dictionary of :class:`FairseqEncoder` objects. 18 | """ 19 | 20 | def __init__(self, encoders): 21 | super().__init__(next(iter(encoders.values())).dictionary) 22 | self.encoders = encoders 23 | for key in self.encoders: 24 | self.add_module(key, self.encoders[key]) 25 | 26 | def forward(self, src_tokens, src_lengths): 27 | """ 28 | Args: 29 | src_tokens (LongTensor): tokens in the source language of shape 30 | `(batch, src_len)` 31 | src_lengths (LongTensor): lengths of each source sentence of shape 32 | `(batch)` 33 | 34 | Returns: 35 | dict: 36 | the outputs from each Encoder 37 | """ 38 | encoder_out = {} 39 | for key in self.encoders: 40 | encoder_out[key] = self.encoders[key](src_tokens, src_lengths) 41 | return encoder_out 42 | 43 | def reorder_encoder_out(self, encoder_out, new_order): 44 | """Reorder encoder output according to new_order.""" 45 | for key in self.encoders: 46 | encoder_out[key] = self.encoders[key].reorder_encoder_out(encoder_out[key], new_order) 47 | return encoder_out 48 | 49 | def max_positions(self): 50 | return min([self.encoders[key].max_positions() for key in self.encoders]) 51 | 52 | def upgrade_state_dict(self, state_dict): 53 | for key in self.encoders: 54 | self.encoders[key].upgrade_state_dict(state_dict) 55 | return state_dict 56 | -------------------------------------------------------------------------------- /fairseq/models/distributed_fairseq_model.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import inspect 7 | 8 | import torch.nn as nn 9 | 10 | from fairseq.legacy_distributed_data_parallel import LegacyDistributedDataParallel 11 | from fairseq.models import BaseFairseqModel 12 | 13 | 14 | def DistributedFairseqModel(args, model): 15 | """ 16 | Wrap a *model* to support distributed data parallel training. 17 | 18 | This is similar to the built-in DistributedDataParallel, but allows 19 | additional configuration of the DistributedDataParallel class to 20 | use, and also provides easier access to the wrapped model by 21 | forwarding requests for missing attributes to the wrapped model. 22 | 23 | Args: 24 | args (argparse.Namespace): fairseq args 25 | model (BaseFairseqModel): model to wrap 26 | """ 27 | # determine which DDP class to extend 28 | assert isinstance(model, nn.Module) 29 | if args.ddp_backend == 'c10d': 30 | ddp_class = nn.parallel.DistributedDataParallel 31 | init_kwargs = dict( 32 | module=model, 33 | device_ids=[args.device_id], 34 | output_device=args.device_id, 35 | broadcast_buffers=False, 36 | bucket_cap_mb=args.bucket_cap_mb, 37 | ) 38 | # Maintain backward compatibility 39 | if 'check_reduction' in inspect.getargspec(ddp_class)[0]: 40 | init_kwargs['check_reduction'] = True 41 | if 'find_unused_parameters' in inspect.getargspec(ddp_class)[0]: 42 | init_kwargs['find_unused_parameters'] = args.find_unused_parameters 43 | elif args.ddp_backend == 'no_c10d': 44 | ddp_class = LegacyDistributedDataParallel 45 | init_kwargs = dict( 46 | module=model, 47 | world_size=args.distributed_world_size, 48 | buffer_size=2**28, 49 | ) 50 | else: 51 | raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend) 52 | 53 | class _DistributedFairseqModel(ddp_class): 54 | """Extend DistributedDataParallel to check for missing 55 | attributes in the wrapped module.""" 56 | 57 | def __init__(self, *args, **kwargs): 58 | super().__init__(*args, **kwargs) 59 | 60 | def __getattr__(self, name): 61 | wrapped_module = super().__getattr__('module') 62 | if hasattr(wrapped_module, name): 63 | return getattr(wrapped_module, name) 64 | return super().__getattr__(name) 65 | 66 | return _DistributedFairseqModel(**init_kwargs) 67 | -------------------------------------------------------------------------------- /fairseq/models/fairseq_decoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | from fairseq import utils 9 | 10 | 11 | class FairseqDecoder(nn.Module): 12 | """Base class for decoders.""" 13 | 14 | def __init__(self, dictionary): 15 | super().__init__() 16 | self.dictionary = dictionary 17 | self.onnx_trace = False 18 | 19 | def forward(self, prev_output_tokens, encoder_out=None, **kwargs): 20 | """ 21 | Args: 22 | prev_output_tokens (LongTensor): shifted output tokens of shape 23 | `(batch, tgt_len)`, for teacher forcing 24 | encoder_out (dict, optional): output from the encoder, used for 25 | encoder-side attention 26 | 27 | Returns: 28 | tuple: 29 | - the decoder's output of shape `(batch, tgt_len, vocab)` 30 | - a dictionary with any model-specific outputs 31 | """ 32 | x, extra = self.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs) 33 | x = self.output_layer(x) 34 | return x, extra 35 | 36 | def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs): 37 | """ 38 | Returns: 39 | tuple: 40 | - the decoder's features of shape `(batch, tgt_len, embed_dim)` 41 | - a dictionary with any model-specific outputs 42 | """ 43 | raise NotImplementedError 44 | 45 | def output_layer(self, features, **kwargs): 46 | """ 47 | Project features to the default output size, e.g., vocabulary size. 48 | 49 | Args: 50 | features (Tensor): features returned by *extract_features*. 51 | """ 52 | raise NotImplementedError 53 | 54 | def get_normalized_probs(self, net_output, log_probs, sample): 55 | """Get normalized probabilities (or log probs) from a net's output.""" 56 | 57 | if hasattr(self, 'adaptive_softmax') and self.adaptive_softmax is not None: 58 | if sample is not None: 59 | assert 'target' in sample 60 | target = sample['target'] 61 | else: 62 | target = None 63 | out = self.adaptive_softmax.get_log_prob(net_output[0], target=target) 64 | return out.exp_() if not log_probs else out 65 | 66 | logits = net_output[0] 67 | if log_probs: 68 | return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace) 69 | else: 70 | return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace) 71 | 72 | def max_positions(self): 73 | """Maximum input length supported by the decoder.""" 74 | return 1e6 # an arbitrary large number 75 | 76 | def upgrade_state_dict(self, state_dict): 77 | """Upgrade a (possibly old) state dict for new versions of fairseq.""" 78 | return state_dict 79 | 80 | def prepare_for_onnx_export_(self): 81 | self.onnx_trace = True 82 | -------------------------------------------------------------------------------- /fairseq/models/fairseq_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | 9 | class FairseqEncoder(nn.Module): 10 | """Base class for encoders.""" 11 | 12 | def __init__(self, dictionary): 13 | super().__init__() 14 | self.dictionary = dictionary 15 | 16 | def forward(self, src_tokens, src_lengths=None, **kwargs): 17 | """ 18 | Args: 19 | src_tokens (LongTensor): tokens in the source language of shape 20 | `(batch, src_len)` 21 | src_lengths (LongTensor): lengths of each source sentence of shape 22 | `(batch)` 23 | """ 24 | raise NotImplementedError 25 | 26 | def reorder_encoder_out(self, encoder_out, new_order): 27 | """ 28 | Reorder encoder output according to `new_order`. 29 | 30 | Args: 31 | encoder_out: output from the ``forward()`` method 32 | new_order (LongTensor): desired order 33 | 34 | Returns: 35 | `encoder_out` rearranged according to `new_order` 36 | """ 37 | raise NotImplementedError 38 | 39 | def max_positions(self): 40 | """Maximum input length supported by the encoder.""" 41 | return 1e6 # an arbitrary large number 42 | 43 | def upgrade_state_dict(self, state_dict): 44 | """Upgrade a (possibly old) state dict for new versions of fairseq.""" 45 | return state_dict 46 | -------------------------------------------------------------------------------- /fairseq/models/model_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from typing import List, Optional 7 | 8 | import torch 9 | from torch import Tensor 10 | 11 | 12 | @torch.jit.script 13 | def script_skip_tensor_list(x: List[Tensor], mask): 14 | res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x] 15 | outputs = [] 16 | for i, t in enumerate(res): 17 | if t.numel() != 0: 18 | outputs.append(t) 19 | else: 20 | outputs.append(x[i]) 21 | return outputs 22 | 23 | 24 | @torch.jit.script 25 | def script_skip_tensor(x: Tensor, mask): 26 | # None case 27 | if x.size(0) == 0: 28 | return x 29 | res = x[mask] if x.size(0) == mask.size(0) else x[:, mask] 30 | if res.numel() == 0: 31 | return x 32 | else: 33 | return res 34 | 35 | 36 | @torch.jit.script 37 | def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int): 38 | """ 39 | Expand 2D/3D tensor on dim=1 40 | """ 41 | if x is None: 42 | return None 43 | 44 | assert x.dim() == 2 or x.dim() == 3 45 | assert trg_dim >= x.size(1), (trg_dim, x.size()) 46 | if trg_dim == x.size(1): 47 | return x 48 | 49 | dims = [x.size(0), trg_dim - x.size(1)] 50 | if x.dim() == 3: 51 | dims.append(x.size(2)) 52 | x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1) 53 | 54 | return x 55 | 56 | 57 | @torch.jit.script 58 | def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor: 59 | return x if x is not None else y 60 | 61 | 62 | @torch.jit.script 63 | def fill_tensors(x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int) -> Optional[Tensor]: 64 | """ 65 | Filling tensor x with y at masked positions (dim=0). 66 | """ 67 | if x is None or x.size()[0] == 0 or y is None: 68 | return x 69 | assert x.dim() == y.dim() and mask.size(0) == x.size(0) 70 | assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) 71 | 72 | n_selected = mask.sum() 73 | if n_selected == 0: 74 | return x 75 | assert n_selected == y.size(0) 76 | if n_selected == x.size(0): 77 | return y 78 | 79 | if x.size(1) < y.size(1): 80 | x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx) 81 | x[mask] = y 82 | elif x.size(1) > y.size(1): 83 | x[mask] = torch.tensor(padding_idx).type_as(x) 84 | if x.dim() == 2: 85 | x[mask, :y.size(1)] = y 86 | else: 87 | x[mask, :y.size(1), :] = y 88 | else: 89 | x[mask] = y 90 | return x 91 | -------------------------------------------------------------------------------- /fairseq/models/nat/__init__.py: -------------------------------------------------------------------------------- 1 | from .fairseq_nat_model import * 2 | from .nonautoregressive_transformer import * 3 | from .nat_crf_transformer import * 4 | from .iterative_nonautoregressive_transformer import * 5 | from .cmlm_transformer import * 6 | from .levenshtein_transformer import * 7 | from .insertion_transformer import * 8 | -------------------------------------------------------------------------------- /fairseq/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | -------------------------------------------------------------------------------- /fairseq/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .adaptive_input import AdaptiveInput 7 | from .adaptive_softmax import AdaptiveSoftmax 8 | from .beamable_mm import BeamableMM 9 | from .character_token_embedder import CharacterTokenEmbedder 10 | from .conv_tbc import ConvTBC 11 | from .downsampled_multihead_attention import DownsampledMultiHeadAttention 12 | from .dynamic_convolution import DynamicConv, DynamicConv1dTBC 13 | from .dynamic_crf_layer import DynamicCRF 14 | from .gelu import gelu, gelu_accurate 15 | from .grad_multiply import GradMultiply 16 | from .highway import Highway 17 | from .layer_norm import LayerNorm 18 | from .learned_positional_embedding import LearnedPositionalEmbedding 19 | from .lightweight_convolution import LightweightConv, LightweightConv1dTBC 20 | from .linearized_convolution import LinearizedConvolution 21 | from .logsumexp_moe import LogSumExpMoE 22 | from .mean_pool_gating_network import MeanPoolGatingNetwork 23 | from .multihead_attention import MultiheadAttention 24 | from .positional_embedding import PositionalEmbedding 25 | from .scalar_bias import ScalarBias 26 | from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding 27 | from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer 28 | from .transformer_sentence_encoder import TransformerSentenceEncoder 29 | from .unfold import unfold1d 30 | from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer 31 | from .vggblock import VGGBlock 32 | 33 | __all__ = [ 34 | 'AdaptiveInput', 35 | 'AdaptiveSoftmax', 36 | 'BeamableMM', 37 | 'CharacterTokenEmbedder', 38 | 'ConvTBC', 39 | 'DownsampledMultiHeadAttention', 40 | 'DynamicConv1dTBC', 41 | 'DynamicConv', 42 | 'DynamicCRF', 43 | 'gelu', 44 | 'gelu_accurate', 45 | 'GradMultiply', 46 | 'Highway', 47 | 'LayerNorm', 48 | 'LearnedPositionalEmbedding', 49 | 'LightweightConv1dTBC', 50 | 'LightweightConv', 51 | 'LinearizedConvolution', 52 | 'LogSumExpMoE', 53 | 'MeanPoolGatingNetwork', 54 | 'MultiheadAttention', 55 | 'PositionalEmbedding', 56 | 'ScalarBias', 57 | 'SinusoidalPositionalEmbedding', 58 | 'TransformerSentenceEncoderLayer', 59 | 'TransformerSentenceEncoder', 60 | 'TransformerDecoderLayer', 61 | 'TransformerEncoderLayer', 62 | 'VGGBlock', 63 | 'unfold1d', 64 | ] 65 | -------------------------------------------------------------------------------- /fairseq/modules/adaptive_input.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import torch 8 | from torch import nn 9 | 10 | from typing import List 11 | 12 | 13 | class AdaptiveInput(nn.Module): 14 | 15 | def __init__( 16 | self, 17 | vocab_size: int, 18 | padding_idx: int, 19 | initial_dim: int, 20 | factor: float, 21 | output_dim: int, 22 | cutoff: List[int], 23 | ): 24 | super().__init__() 25 | 26 | if vocab_size > cutoff[-1]: 27 | cutoff = cutoff + [vocab_size] 28 | else: 29 | assert vocab_size == cutoff[ 30 | -1], 'cannot specify cutoff larger than vocab size' 31 | 32 | self.cutoff = cutoff 33 | self.embedding_dim = output_dim 34 | self.padding_idx = padding_idx 35 | 36 | self.embeddings = nn.ModuleList() 37 | for i in range(len(self.cutoff)): 38 | prev = self.cutoff[i - 1] if i > 0 else 0 39 | size = self.cutoff[i] - prev 40 | dim = int(initial_dim // (factor ** i)) 41 | seq = nn.Sequential( 42 | nn.Embedding(size, dim, padding_idx), 43 | nn.Linear(dim, output_dim, bias=False) 44 | ) 45 | self.embeddings.append(seq) 46 | 47 | def init_weights(m): 48 | if isinstance(m, nn.Embedding): 49 | nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5) 50 | nn.init.constant_(m.weight[padding_idx], 0) 51 | elif hasattr(m, 'weight'): 52 | nn.init.xavier_uniform_(m.weight) 53 | 54 | self.apply(init_weights) 55 | 56 | self.register_buffer('_float_tensor', torch.FloatTensor(1)) 57 | 58 | def weights_for_band(self, band: int): 59 | return self.embeddings[band][0].weight, self.embeddings[band][1].weight 60 | 61 | def forward(self, input: torch.Tensor): 62 | result = self._float_tensor.new(input.shape + (self.embedding_dim,)) 63 | for i in range(len(self.cutoff)): 64 | mask = input.lt(self.cutoff[i]) 65 | if i > 0: 66 | mask.mul_(input.ge(self.cutoff[i - 1])) 67 | chunk_input = input[mask] - self.cutoff[i - 1] 68 | else: 69 | chunk_input = input[mask] 70 | if mask.any(): 71 | result[mask] = self.embeddings[i](chunk_input) 72 | return result 73 | -------------------------------------------------------------------------------- /fairseq/modules/beamable_mm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class BeamableMM(nn.Module): 11 | """This module provides an optimized MM for beam decoding with attention. 12 | 13 | It leverage the fact that the source-side of the input is replicated beam 14 | times and the target-side of the input is of width one. This layer speeds up 15 | inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} 16 | with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. 17 | """ 18 | def __init__(self, beam_size=None): 19 | super(BeamableMM, self).__init__() 20 | self.beam_size = beam_size 21 | 22 | def forward(self, input1, input2): 23 | if ( 24 | not self.training and # test mode 25 | self.beam_size is not None and # beam size is set 26 | input1.dim() == 3 and # only support batched input 27 | input1.size(1) == 1 # single time step update 28 | ): 29 | bsz, beam = input1.size(0), self.beam_size 30 | 31 | # bsz x 1 x nhu --> bsz/beam x beam x nhu 32 | input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) 33 | 34 | # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu 35 | input2 = input2.unfold(0, beam, beam)[:, :, :, 0] 36 | 37 | # use non batched operation if bsz = beam 38 | if input1.size(0) == 1: 39 | output = torch.mm(input1[0, :, :], input2[0, :, :]) 40 | else: 41 | output = input1.bmm(input2) 42 | return output.view(bsz, 1, -1) 43 | else: 44 | return input1.bmm(input2) 45 | 46 | def set_beam_size(self, beam_size): 47 | self.beam_size = beam_size 48 | -------------------------------------------------------------------------------- /fairseq/modules/conv_tbc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from torch.nn.modules.utils import _single 8 | 9 | 10 | class ConvTBC(torch.nn.Module): 11 | """1D convolution over an input of shape (time x batch x channel) 12 | 13 | The implementation uses gemm to perform the convolution. This implementation 14 | is faster than cuDNN for small kernel sizes. 15 | """ 16 | def __init__(self, in_channels, out_channels, kernel_size, padding=0): 17 | super(ConvTBC, self).__init__() 18 | self.in_channels = in_channels 19 | self.out_channels = out_channels 20 | self.kernel_size = _single(kernel_size) 21 | self.padding = _single(padding) 22 | 23 | self.weight = torch.nn.Parameter(torch.Tensor( 24 | self.kernel_size[0], in_channels, out_channels)) 25 | self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) 26 | 27 | def forward(self, input): 28 | return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0]) 29 | 30 | def __repr__(self): 31 | s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}' 32 | ', padding={padding}') 33 | if self.bias is None: 34 | s += ', bias=False' 35 | s += ')' 36 | return s.format(name=self.__class__.__name__, **self.__dict__) 37 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .dynamicconv_layer import DynamicconvLayer # noqa 7 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | std::vector dynamicconv_cuda_forward( 12 | at::Tensor input, 13 | at::Tensor filters, 14 | int padding_l); 15 | 16 | std::vector dynamicconv_cuda_backward( 17 | at::Tensor gradOutput, 18 | int padding_l, 19 | at::Tensor input, 20 | at::Tensor filters); 21 | 22 | 23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 26 | 27 | std::vector dynamicconv_forward( 28 | at::Tensor input, 29 | at::Tensor filters, 30 | int padding_l) { 31 | 32 | CHECK_INPUT(input); 33 | CHECK_INPUT(filters); 34 | 35 | return dynamicconv_cuda_forward(input, filters, 36 | padding_l); 37 | } 38 | 39 | std::vector dynamicconv_backward( 40 | at::Tensor gradOutput, 41 | int padding_l, 42 | at::Tensor input, 43 | at::Tensor filters) { 44 | 45 | CHECK_INPUT(gradOutput); 46 | CHECK_INPUT(input); 47 | CHECK_INPUT(filters); 48 | 49 | return dynamicconv_cuda_backward(gradOutput, padding_l, 50 | input, filters); 51 | } 52 | 53 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 54 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)"); 55 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)"); 56 | } 57 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | #define SHFL_MASK 0xffffffff 27 | 28 | template 29 | __global__ 30 | void dynamicconv_forward_kernel(const scalar_t* input, 31 | const scalar_t* weight, 32 | int minibatch, 33 | int sequenceLength, 34 | int numFeatures, 35 | int numFiltersInBlock, 36 | int numHeads, 37 | scalar_t* output); 38 | 39 | template 40 | __global__ 41 | void dynamicconv_backward_kernel( 42 | const scalar_t* gradOutput, // B * C * T 43 | const scalar_t* input, // B * C * T 44 | const scalar_t* weight, 45 | int minibatch, 46 | int sequenceLength, 47 | int numFeatures, 48 | int numFiltersInBlock, 49 | int numHeads, 50 | scalar_t* gradWeight, 51 | scalar_t* gradInput); // B * H * k * T 52 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | std::vector dynamicconv_cpu_forward( 5 | float* input, 6 | float* filters, 7 | int padding_l); 8 | 9 | std::vector dynamicconv_cpu_backward( 10 | float* gradOutput, 11 | int padding_l, 12 | float* input, 13 | float* filters); 14 | 15 | std::vector dynamicconv_forward( 16 | float* input, 17 | float* filters, 18 | int padding_l) { 19 | 20 | return dynamicconv_cpu_forward(input, filters, padding_l); 21 | } 22 | 23 | std::vector dynamicconv_backward( 24 | float* gradOutput, 25 | int padding_l, 26 | float* input, 27 | float* filters) { 28 | 29 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters); 30 | } 31 | 32 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 33 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)"); 34 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)"); 35 | } 36 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 9 | 10 | setup( 11 | name='dynamicconv_layer', 12 | ext_modules=[ 13 | CUDAExtension( 14 | name='dynamicconv_cuda', 15 | sources=[ 16 | 'dynamicconv_cuda.cpp', 17 | 'dynamicconv_cuda_kernel.cu', 18 | ], 19 | ), 20 | ], 21 | cmdclass={ 22 | 'build_ext': BuildExtension 23 | }) 24 | -------------------------------------------------------------------------------- /fairseq/modules/gelu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with 7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs 8 | """ 9 | 10 | import math 11 | 12 | import torch 13 | 14 | 15 | def gelu_accurate(x): 16 | if not hasattr(gelu_accurate, "_a"): 17 | gelu_accurate._a = math.sqrt(2 / math.pi) 18 | return 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) 19 | 20 | 21 | def gelu(x: torch.Tensor) -> torch.Tensor: 22 | if hasattr(torch.nn.functional, 'gelu'): 23 | return torch.nn.functional.gelu(x.float()).type_as(x) 24 | else: 25 | return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) 26 | -------------------------------------------------------------------------------- /fairseq/modules/grad_multiply.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class GradMultiply(torch.autograd.Function): 10 | @staticmethod 11 | def forward(ctx, x, scale): 12 | ctx.scale = scale 13 | res = x.new(x) 14 | return res 15 | 16 | @staticmethod 17 | def backward(ctx, grad): 18 | return grad * ctx.scale, None 19 | -------------------------------------------------------------------------------- /fairseq/modules/highway.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from torch import nn 9 | 10 | 11 | class Highway(torch.nn.Module): 12 | """ 13 | A `Highway layer `_. 14 | Adopted from the AllenNLP implementation. 15 | """ 16 | 17 | def __init__( 18 | self, 19 | input_dim: int, 20 | num_layers: int = 1 21 | ): 22 | super(Highway, self).__init__() 23 | self.input_dim = input_dim 24 | self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2) 25 | for _ in range(num_layers)]) 26 | self.activation = nn.ReLU() 27 | 28 | self.reset_parameters() 29 | 30 | def reset_parameters(self): 31 | for layer in self.layers: 32 | # As per comment in AllenNLP: 33 | # We should bias the highway layer to just carry its input forward. We do that by 34 | # setting the bias on `B(x)` to be positive, because that means `g` will be biased to 35 | # be high, so we will carry the input forward. The bias on `B(x)` is the second half 36 | # of the bias vector in each Linear layer. 37 | nn.init.constant_(layer.bias[self.input_dim:], 1) 38 | 39 | nn.init.constant_(layer.bias[:self.input_dim], 0) 40 | nn.init.xavier_normal_(layer.weight) 41 | 42 | def forward( 43 | self, 44 | x: torch.Tensor 45 | ): 46 | for layer in self.layers: 47 | projection = layer(x) 48 | proj_x, gate = projection.chunk(2, dim=-1) 49 | proj_x = self.activation(proj_x) 50 | gate = torch.sigmoid(gate) 51 | x = gate * x + (gate.new_tensor([1]) - gate) * proj_x 52 | return x 53 | -------------------------------------------------------------------------------- /fairseq/modules/layer_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | 10 | 11 | try: 12 | from apex.normalization import FusedLayerNorm as _FusedLayerNorm 13 | 14 | has_fused_layernorm = True 15 | 16 | class FusedLayerNorm(_FusedLayerNorm): 17 | @torch.jit.unused 18 | def forward(self, x): 19 | return super().forward(x) 20 | 21 | 22 | except ImportError: 23 | has_fused_layernorm = False 24 | 25 | 26 | def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): 27 | if not export and torch.cuda.is_available() and has_fused_layernorm: 28 | return FusedLayerNorm(normalized_shape, eps, elementwise_affine) 29 | return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) 30 | 31 | 32 | class Fp32LayerNorm(nn.LayerNorm): 33 | def __init__(self, *args, **kwargs): 34 | super().__init__(*args, **kwargs) 35 | 36 | def forward(self, input): 37 | output = F.layer_norm( 38 | input.float(), 39 | self.normalized_shape, 40 | self.weight.float() if self.weight is not None else None, 41 | self.bias.float() if self.bias is not None else None, 42 | self.eps, 43 | ) 44 | return output.type_as(input) 45 | -------------------------------------------------------------------------------- /fairseq/modules/learned_positional_embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | from fairseq import utils 9 | 10 | 11 | class LearnedPositionalEmbedding(nn.Embedding): 12 | """ 13 | This module learns positional embeddings up to a fixed maximum size. 14 | Padding ids are ignored by either offsetting based on padding_idx 15 | or by setting padding_idx to None and ensuring that the appropriate 16 | position ids are passed to the forward function. 17 | """ 18 | 19 | def __init__( 20 | self, 21 | num_embeddings: int, 22 | embedding_dim: int, 23 | padding_idx: int, 24 | ): 25 | super().__init__(num_embeddings, embedding_dim, padding_idx) 26 | self.onnx_trace = False 27 | 28 | def forward(self, input, incremental_state=None, positions=None): 29 | """Input is expected to be of size [bsz x seqlen].""" 30 | assert ( 31 | (positions is None) or (self.padding_idx is None) 32 | ), "If positions is pre-computed then padding_idx should not be set." 33 | 34 | if positions is None: 35 | if incremental_state is not None: 36 | # positions is the same for every token when decoding a single step 37 | # Without the int() cast, it doesn't work in some cases when exporting to ONNX 38 | positions = input.data.new(1, 1).fill_(int(self.padding_idx + input.size(1))) 39 | else: 40 | positions = utils.make_positions( 41 | input, self.padding_idx, onnx_trace=self.onnx_trace, 42 | ) 43 | return super().forward(positions) 44 | 45 | def max_positions(self): 46 | """Maximum number of supported positions.""" 47 | if self.padding_idx is not None: 48 | return self.num_embeddings - self.padding_idx - 1 49 | else: 50 | return self.num_embeddings 51 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .lightconv_layer import LightconvLayer # noqa 7 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/lightconv_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | std::vector lightconv_cuda_forward( 12 | at::Tensor input, 13 | at::Tensor filters, 14 | int padding_l); 15 | 16 | std::vector lightconv_cuda_backward( 17 | at::Tensor gradOutput, 18 | int padding_l, 19 | at::Tensor input, 20 | at::Tensor filters); 21 | 22 | 23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 26 | 27 | std::vector lightconv_forward( 28 | at::Tensor input, 29 | at::Tensor filters, 30 | int padding_l) { 31 | 32 | CHECK_INPUT(input); 33 | CHECK_INPUT(filters); 34 | 35 | return lightconv_cuda_forward(input, filters, padding_l); 36 | } 37 | 38 | std::vector lightconv_backward( 39 | at::Tensor gradOutput, 40 | int padding_l, 41 | at::Tensor input, 42 | at::Tensor filters) { 43 | 44 | CHECK_INPUT(gradOutput); 45 | CHECK_INPUT(input); 46 | CHECK_INPUT(filters); 47 | 48 | return lightconv_cuda_backward(gradOutput, padding_l, input, filters); 49 | } 50 | 51 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 52 | m.def("forward", &lightconv_forward, "lighconv forward (CUDA)"); 53 | m.def("backward", &lightconv_backward, "lighconv backward (CUDA)"); 54 | } 55 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/lightconv_cuda.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | 24 | #define SHFL_MASK 0xffffffff 25 | 26 | template 27 | __global__ 28 | void lightconv_forward_kernel(const scalar_t* input, 29 | const scalar_t* filters, 30 | int minibatch, int sequenceLength, 31 | int numFeatures, int numFiltersInBlock, 32 | scalar_t* output); 33 | 34 | template 35 | __global__ 36 | void lightconv_grad_wrt_input_kernel( 37 | const scalar_t* input, 38 | const scalar_t* filters, 39 | int minibatch, 40 | int sequenceLength, 41 | int numFeatures, 42 | int numFiltersInBlock, 43 | scalar_t* output); 44 | 45 | template 46 | __global__ 47 | void lightconv_grad_wrt_weights_firstpass_short_kernel( 48 | const scalar_t* input, 49 | const scalar_t* gradInput, 50 | int minibatch, 51 | int sequenceLength, 52 | int numFeatures, 53 | int numFiltersInBlock, 54 | int numHeads, 55 | float* output); 56 | 57 | template 58 | __global__ 59 | void lightconv_grad_wrt_weights_secondpass_short_kernel( 60 | const float* input, 61 | const int minibatch, 62 | const int numFiltersInBlock, 63 | scalar_t* output); 64 | 65 | template 66 | __global__ 67 | void lightconv_grad_wrt_weights_firstpass_kernel( 68 | const scalar_t* input, 69 | const scalar_t* gradInput, 70 | int minibatch, 71 | int sequenceLength, 72 | int numFeatures, 73 | int numFiltersInBlock, 74 | float* output); 75 | 76 | template 77 | __global__ 78 | void lightconv_grad_wrt_weights_secondpass_kernel( 79 | const float* input, 80 | const int minibatch, 81 | const int numFiltersInBlock, 82 | scalar_t* output); 83 | 84 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 9 | 10 | setup( 11 | name='lightconv_layer', 12 | ext_modules=[ 13 | CUDAExtension('lightconv_cuda', [ 14 | 'lightconv_cuda.cpp', 15 | 'lightconv_cuda_kernel.cu', 16 | ]), 17 | ], 18 | cmdclass={ 19 | 'build_ext': BuildExtension 20 | }) 21 | -------------------------------------------------------------------------------- /fairseq/modules/linearized_convolution.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn.functional as F 8 | 9 | from fairseq import utils 10 | 11 | from .conv_tbc import ConvTBC 12 | 13 | 14 | class LinearizedConvolution(ConvTBC): 15 | """An optimized version of nn.Conv1d. 16 | 17 | At training time, this module uses ConvTBC, which is an optimized version 18 | of Conv1d. At inference time, it optimizes incremental generation (i.e., 19 | one time step at a time) by replacing the convolutions with linear layers. 20 | Note that the input order changes from training to inference. 21 | """ 22 | 23 | def __init__(self, in_channels, out_channels, kernel_size, **kwargs): 24 | super().__init__(in_channels, out_channels, kernel_size, **kwargs) 25 | self._linearized_weight = None 26 | self.register_backward_hook(self._clear_linearized_weight) 27 | 28 | def forward(self, input, incremental_state=None): 29 | """ 30 | Args: 31 | incremental_state: Used to buffer signal; if not None, then input is 32 | expected to contain a single frame. If the input order changes 33 | between time steps, call reorder_incremental_state. 34 | Input: 35 | Time x Batch x Channel during training 36 | Batch x Time x Channel during inference 37 | """ 38 | if incremental_state is None: 39 | output = super().forward(input) 40 | if self.kernel_size[0] > 1 and self.padding[0] > 0: 41 | # remove future timesteps added by padding 42 | output = output[:-self.padding[0], :, :] 43 | return output 44 | 45 | # reshape weight 46 | weight = self._get_linearized_weight() 47 | kw = self.kernel_size[0] 48 | 49 | bsz = input.size(0) # input: bsz x len x dim 50 | if kw > 1: 51 | input = input.data 52 | input_buffer = self._get_input_buffer(incremental_state) 53 | if input_buffer is None: 54 | input_buffer = input.new(bsz, kw, input.size(2)).zero_() 55 | self._set_input_buffer(incremental_state, input_buffer) 56 | else: 57 | # shift buffer 58 | input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() 59 | # append next input 60 | input_buffer[:, -1, :] = input[:, -1, :] 61 | input = input_buffer 62 | with torch.no_grad(): 63 | output = F.linear(input.view(bsz, -1), weight, self.bias) 64 | return output.view(bsz, 1, -1) 65 | 66 | def reorder_incremental_state(self, incremental_state, new_order): 67 | input_buffer = self._get_input_buffer(incremental_state) 68 | if input_buffer is not None: 69 | input_buffer = input_buffer.index_select(0, new_order) 70 | self._set_input_buffer(incremental_state, input_buffer) 71 | 72 | def _get_input_buffer(self, incremental_state): 73 | return utils.get_incremental_state(self, incremental_state, 'input_buffer') 74 | 75 | def _set_input_buffer(self, incremental_state, new_buffer): 76 | return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) 77 | 78 | def _get_linearized_weight(self): 79 | if self._linearized_weight is None: 80 | kw = self.kernel_size[0] 81 | weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() 82 | assert weight.size() == (self.out_channels, kw, self.in_channels) 83 | self._linearized_weight = torch.nn.Parameter(weight.view(self.out_channels, -1)) 84 | return self._linearized_weight 85 | 86 | def _clear_linearized_weight(self, *args): 87 | self._linearized_weight = None 88 | -------------------------------------------------------------------------------- /fairseq/modules/logsumexp_moe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class LogSumExpMoE(torch.autograd.Function): 10 | """Standard LogSumExp forward pass, but use *posterior* for the backward. 11 | 12 | See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" 13 | (Shen et al., 2019) `_. 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, logp, posterior, dim=-1): 18 | ctx.save_for_backward(posterior) 19 | ctx.dim = dim 20 | return torch.logsumexp(logp, dim=dim) 21 | 22 | @staticmethod 23 | def backward(ctx, grad_output): 24 | posterior, = ctx.saved_tensors 25 | grad_logp = grad_output.unsqueeze(ctx.dim) * posterior 26 | return grad_logp, None, None 27 | -------------------------------------------------------------------------------- /fairseq/modules/mean_pool_gating_network.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn.functional as F 8 | 9 | 10 | class MeanPoolGatingNetwork(torch.nn.Module): 11 | """A simple mean-pooling gating network for selecting experts. 12 | 13 | This module applies mean pooling over an encoder's output and returns 14 | reponsibilities for each expert. The encoder format is expected to match 15 | :class:`fairseq.models.transformer.TransformerEncoder`. 16 | """ 17 | 18 | def __init__(self, embed_dim, num_experts, dropout=None): 19 | super().__init__() 20 | self.embed_dim = embed_dim 21 | self.num_experts = num_experts 22 | 23 | self.fc1 = torch.nn.Linear(embed_dim, embed_dim) 24 | self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None 25 | self.fc2 = torch.nn.Linear(embed_dim, num_experts) 26 | 27 | def forward(self, encoder_out): 28 | if not ( 29 | hasattr(encoder_out, 'encoder_out') 30 | and hasattr(encoder_out, 'encoder_padding_mask') 31 | and encoder_out.encoder_out.size(2) == self.embed_dim 32 | ): 33 | raise ValueError('Unexpected format for encoder_out') 34 | 35 | # mean pooling over time 36 | encoder_padding_mask = encoder_out.encoder_padding_mask # B x T 37 | encoder_out = encoder_out.encoder_out.transpose(0, 1) # B x T x C 38 | if encoder_padding_mask is not None: 39 | encoder_out = encoder_out.clone() # required because of transpose above 40 | encoder_out[encoder_padding_mask] = 0 41 | ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True) 42 | x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out) 43 | else: 44 | x = torch.mean(encoder_out, dim=1) 45 | 46 | x = torch.tanh(self.fc1(x)) 47 | if self.dropout is not None: 48 | x = self.dropout(x) 49 | x = self.fc2(x) 50 | return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x) 51 | -------------------------------------------------------------------------------- /fairseq/modules/positional_embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | from .learned_positional_embedding import LearnedPositionalEmbedding 9 | from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding 10 | 11 | 12 | def PositionalEmbedding( 13 | num_embeddings: int, 14 | embedding_dim: int, 15 | padding_idx: int, 16 | learned: bool = False, 17 | ): 18 | if learned: 19 | # if padding_idx is specified then offset the embedding ids by 20 | # this index and adjust num_embeddings appropriately 21 | # TODO: The right place for this offset would be inside 22 | # LearnedPositionalEmbedding. Move this there for a cleaner implementation. 23 | if padding_idx is not None: 24 | num_embeddings = num_embeddings + padding_idx + 1 25 | m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) 26 | nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) 27 | if padding_idx is not None: 28 | nn.init.constant_(m.weight[padding_idx], 0) 29 | else: 30 | m = SinusoidalPositionalEmbedding( 31 | embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, 32 | ) 33 | return m 34 | -------------------------------------------------------------------------------- /fairseq/modules/scalar_bias.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | # 6 | 7 | import torch 8 | 9 | 10 | class ScalarBias(torch.autograd.Function): 11 | """ 12 | Adds a vector of scalars, used in self-attention mechanism to allow 13 | the model to optionally attend to this vector instead of the past 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, input, dim, bias_init): 18 | size = list(input.size()) 19 | size[dim] += 1 20 | output = input.new(*size).fill_(bias_init) 21 | output.narrow(dim, 1, size[dim] - 1).copy_(input) 22 | ctx.dim = dim 23 | return output 24 | 25 | @staticmethod 26 | def backward(ctx, grad): 27 | return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None 28 | 29 | 30 | def scalar_bias(input, dim, bias_init=0): 31 | return ScalarBias.apply(input, dim, bias_init) 32 | -------------------------------------------------------------------------------- /fairseq/modules/sinusoidal_positional_embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import math 7 | 8 | import torch 9 | import torch.nn as nn 10 | import torch.onnx.operators 11 | 12 | from fairseq import utils 13 | 14 | 15 | class SinusoidalPositionalEmbedding(nn.Module): 16 | """This module produces sinusoidal positional embeddings of any length. 17 | 18 | Padding symbols are ignored. 19 | """ 20 | 21 | def __init__(self, embedding_dim, padding_idx, init_size=1024): 22 | super().__init__() 23 | self.embedding_dim = embedding_dim 24 | self.padding_idx = padding_idx 25 | self.weights = SinusoidalPositionalEmbedding.get_embedding( 26 | init_size, 27 | embedding_dim, 28 | padding_idx, 29 | ) 30 | self.onnx_trace = False 31 | self.register_buffer('_float_tensor', torch.FloatTensor(1)) 32 | 33 | def prepare_for_onnx_export_(self): 34 | self.onnx_trace = True 35 | 36 | @staticmethod 37 | def get_embedding(num_embeddings, embedding_dim, padding_idx=None): 38 | """Build sinusoidal embeddings. 39 | 40 | This matches the implementation in tensor2tensor, but differs slightly 41 | from the description in Section 3.5 of "Attention Is All You Need". 42 | """ 43 | half_dim = embedding_dim // 2 44 | emb = math.log(10000) / (half_dim - 1) 45 | emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) 46 | emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) 47 | emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) 48 | if embedding_dim % 2 == 1: 49 | # zero pad 50 | emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) 51 | if padding_idx is not None: 52 | emb[padding_idx, :] = 0 53 | return emb 54 | 55 | def forward(self, input, incremental_state=None, timestep=None, **kwargs): 56 | """Input is expected to be of size [bsz x seqlen].""" 57 | bsz, seq_len = torch.onnx.operators.shape_as_tensor(input) 58 | max_pos = self.padding_idx + 1 + seq_len 59 | if self.weights is None or max_pos > self.weights.size(0): 60 | # recompute/expand embeddings if needed 61 | self.weights = SinusoidalPositionalEmbedding.get_embedding( 62 | max_pos, 63 | self.embedding_dim, 64 | self.padding_idx, 65 | ) 66 | self.weights = self.weights.to(self._float_tensor) 67 | 68 | if incremental_state is not None: 69 | # positions is the same for every token when decoding a single step 70 | pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len 71 | if self.onnx_trace: 72 | return self.weights.index_select(index=self.padding_idx + pos, dim=0).unsqueeze(1).repeat(bsz, 1, 1) 73 | return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) 74 | 75 | positions = utils.make_positions(input, self.padding_idx, onnx_trace=self.onnx_trace) 76 | if self.onnx_trace: 77 | flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) 78 | embedding_shape = torch.cat((bsz.view(1), seq_len.view(1), torch.LongTensor([-1]))) 79 | embeddings = torch.onnx.operators.reshape_from_tensor_shape(flat_embeddings, embedding_shape) 80 | return embeddings 81 | return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() 82 | 83 | def max_positions(self): 84 | """Maximum number of supported positions.""" 85 | return int(1e5) # an arbitrary large number 86 | -------------------------------------------------------------------------------- /fairseq/modules/sparse_transformer_sentence_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | from fairseq.modules import TransformerSentenceEncoder 8 | from fairseq.modules.sparse_transformer_sentence_encoder_layer import SparseTransformerSentenceEncoderLayer 9 | 10 | 11 | class SparseTransformerSentenceEncoder(TransformerSentenceEncoder): 12 | """ 13 | Sparse implementation of the TransformerSentenceEncoder 14 | - see SparseMultiheadAttention 15 | """ 16 | 17 | def __init__( 18 | self, 19 | padding_idx: int, 20 | vocab_size: int, 21 | num_encoder_layers: int = 6, 22 | embedding_dim: int = 768, 23 | ffn_embedding_dim: int = 3072, 24 | num_attention_heads: int = 8, 25 | dropout: float = 0.1, 26 | attention_dropout: float = 0.1, 27 | activation_dropout: float = 0.1, 28 | max_seq_len: int = 256, 29 | num_segments: int = 2, 30 | use_position_embeddings: bool = True, 31 | offset_positions_by_padding: bool = True, 32 | encoder_normalize_before: bool = False, 33 | apply_bert_init: bool = False, 34 | activation_fn: str = "relu", 35 | learned_pos_embedding: bool = True, 36 | add_bias_kv: bool = False, 37 | add_zero_attn: bool = False, 38 | embed_scale: float = None, 39 | freeze_embeddings: bool = False, 40 | n_trans_layers_to_freeze: int = 0, 41 | export: bool = False, 42 | is_bidirectional: bool = True, 43 | stride: int = 32, 44 | expressivity: int = 8, 45 | ) -> None: 46 | 47 | super().__init__( 48 | padding_idx, vocab_size, num_encoder_layers, embedding_dim, 49 | ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, 50 | activation_dropout, max_seq_len, num_segments, use_position_embeddings, 51 | offset_positions_by_padding, encoder_normalize_before, apply_bert_init, 52 | activation_fn, learned_pos_embedding, add_bias_kv, add_zero_attn, 53 | embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export 54 | ) 55 | 56 | self.layers = nn.ModuleList( 57 | [ 58 | SparseTransformerSentenceEncoderLayer( 59 | embedding_dim=self.embedding_dim, 60 | ffn_embedding_dim=ffn_embedding_dim, 61 | num_attention_heads=num_attention_heads, 62 | dropout=self.dropout, 63 | attention_dropout=attention_dropout, 64 | activation_dropout=activation_dropout, 65 | activation_fn=activation_fn, 66 | add_bias_kv=add_bias_kv, 67 | add_zero_attn=add_zero_attn, 68 | export=export, 69 | is_bidirectional=is_bidirectional, 70 | stride=stride, 71 | expressivity=expressivity, 72 | ) 73 | for _ in range(num_encoder_layers) 74 | ] 75 | ) 76 | 77 | def freeze_module_params(m): 78 | if m is not None: 79 | for p in m.parameters(): 80 | p.requires_grad = False 81 | 82 | for layer in range(n_trans_layers_to_freeze): 83 | freeze_module_params(self.layers[layer]) 84 | -------------------------------------------------------------------------------- /fairseq/modules/sparse_transformer_sentence_encoder_layer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.modules import TransformerSentenceEncoderLayer 7 | from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention 8 | 9 | 10 | class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): 11 | """ 12 | Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) 13 | """ 14 | 15 | def __init__( 16 | self, 17 | embedding_dim: int = 768, 18 | ffn_embedding_dim: int = 3072, 19 | num_attention_heads: int = 8, 20 | dropout: float = 0.1, 21 | attention_dropout: float = 0.1, 22 | activation_dropout: float = 0.1, 23 | activation_fn: str = 'relu', 24 | add_bias_kv: bool = False, 25 | add_zero_attn: bool = False, 26 | export: bool = False, 27 | is_bidirectional: bool = True, 28 | stride: int = 32, 29 | expressivity: int = 8, 30 | ) -> None: 31 | 32 | super().__init__( 33 | embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, 34 | attention_dropout, activation_dropout, activation_fn, add_bias_kv, 35 | add_zero_attn, export 36 | ) 37 | 38 | self.self_attn = SparseMultiheadAttention( 39 | self.embedding_dim, 40 | num_attention_heads, 41 | dropout=attention_dropout, 42 | add_bias_kv=add_bias_kv, 43 | add_zero_attn=add_zero_attn, 44 | self_attention=True, 45 | is_bidirectional=is_bidirectional, 46 | stride=stride, 47 | expressivity=expressivity, 48 | ) 49 | -------------------------------------------------------------------------------- /fairseq/modules/transformer_sentence_encoder_layer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | 10 | from fairseq import utils 11 | from fairseq.modules import ( 12 | LayerNorm, 13 | MultiheadAttention, 14 | ) 15 | 16 | 17 | class TransformerSentenceEncoderLayer(nn.Module): 18 | """ 19 | Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained 20 | models. 21 | """ 22 | 23 | def __init__( 24 | self, 25 | embedding_dim: int = 768, 26 | ffn_embedding_dim: int = 3072, 27 | num_attention_heads: int = 8, 28 | dropout: float = 0.1, 29 | attention_dropout: float = 0.1, 30 | activation_dropout: float = 0.1, 31 | activation_fn: str = 'relu', 32 | add_bias_kv: bool = False, 33 | add_zero_attn: bool = False, 34 | export: bool = False, 35 | ) -> None: 36 | 37 | super().__init__() 38 | # Initialize parameters 39 | self.embedding_dim = embedding_dim 40 | self.dropout = dropout 41 | self.activation_dropout = activation_dropout 42 | 43 | # Initialize blocks 44 | self.activation_fn = utils.get_activation_fn(activation_fn) 45 | self.self_attn = MultiheadAttention( 46 | self.embedding_dim, 47 | num_attention_heads, 48 | dropout=attention_dropout, 49 | add_bias_kv=add_bias_kv, 50 | add_zero_attn=add_zero_attn, 51 | self_attention=True 52 | ) 53 | 54 | # layer norm associated with the self attention layer 55 | self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export) 56 | self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) 57 | self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) 58 | 59 | # layer norm associated with the position wise feed-forward NN 60 | self.final_layer_norm = LayerNorm(self.embedding_dim, export=export) 61 | 62 | def forward( 63 | self, 64 | x: torch.Tensor, 65 | self_attn_mask: torch.Tensor = None, 66 | self_attn_padding_mask: torch.Tensor = None, 67 | ): 68 | """ 69 | LayerNorm is applied either before or after the self-attention/ffn 70 | modules similar to the original Transformer imlementation. 71 | """ 72 | residual = x 73 | x, attn = self.self_attn( 74 | query=x, 75 | key=x, 76 | value=x, 77 | key_padding_mask=self_attn_padding_mask, 78 | need_weights=False, 79 | attn_mask=self_attn_mask, 80 | ) 81 | x = F.dropout(x, p=self.dropout, training=self.training) 82 | x = residual + x 83 | x = self.self_attn_layer_norm(x) 84 | 85 | residual = x 86 | x = self.activation_fn(self.fc1(x)) 87 | x = F.dropout(x, p=self.activation_dropout, training=self.training) 88 | x = self.fc2(x) 89 | x = F.dropout(x, p=self.dropout, training=self.training) 90 | x = residual + x 91 | x = self.final_layer_norm(x) 92 | return x, attn 93 | -------------------------------------------------------------------------------- /fairseq/modules/unfold.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn.functional as F 7 | 8 | 9 | def unfold1d(x, kernel_size, padding_l, pad_value=0): 10 | '''unfold T x B x C to T x B x C x K''' 11 | if kernel_size > 1: 12 | T, B, C = x.size() 13 | x = F.pad(x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value) 14 | x = x.as_strided((T, B, C, kernel_size), (B*C, C, 1, B*C)) 15 | else: 16 | x = x.unsqueeze(3) 17 | return x 18 | -------------------------------------------------------------------------------- /fairseq/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.optim.fairseq_optimizer import FairseqOptimizer 11 | from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer 12 | from fairseq.optim.bmuf import FairseqBMUF # noqa 13 | 14 | 15 | __all__ = [ 16 | 'FairseqOptimizer', 17 | 'FP16Optimizer', 18 | 'MemoryEfficientFP16Optimizer', 19 | ] 20 | 21 | 22 | build_optimizer, register_optimizer, OPTIMIZER_REGISTRY = registry.setup_registry( 23 | '--optimizer', 24 | base_class=FairseqOptimizer, 25 | default='nag', 26 | ) 27 | 28 | 29 | # automatically import any Python files in the optim/ directory 30 | for file in os.listdir(os.path.dirname(__file__)): 31 | if file.endswith('.py') and not file.startswith('_'): 32 | module = file[:file.find('.py')] 33 | importlib.import_module('fairseq.optim.' + module) 34 | -------------------------------------------------------------------------------- /fairseq/optim/adadelta.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('adadelta') 12 | class Adadelta(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', 22 | help='coefficient used for computing a running average of squared gradients') 23 | parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', 24 | help='term added to the denominator to improve numerical stability') 25 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 26 | help='weight decay') 27 | parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') 28 | # fmt: on 29 | 30 | @property 31 | def optimizer_config(self): 32 | """ 33 | Return a kwarg dictionary that will be used to override optimizer 34 | args stored in checkpoints. This allows us to load a checkpoint and 35 | resume training using a different set of optimizer args, e.g., with a 36 | different learning rate. 37 | """ 38 | return { 39 | 'lr': self.args.lr[0], 40 | 'rho': self.args.adadelta_rho, 41 | 'eps': self.args.adadelta_eps, 42 | 'weight_decay': self.args.weight_decay, 43 | } 44 | -------------------------------------------------------------------------------- /fairseq/optim/adagrad.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('adagrad') 12 | class Adagrad(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 22 | help='weight decay') 23 | # fmt: on 24 | 25 | @property 26 | def optimizer_config(self): 27 | """ 28 | Return a kwarg dictionary that will be used to override optimizer 29 | args stored in checkpoints. This allows us to load a checkpoint and 30 | resume training using a different set of optimizer args, e.g., with a 31 | different learning rate. 32 | """ 33 | return { 34 | 'lr': self.args.lr[0], 35 | 'weight_decay': self.args.weight_decay, 36 | } 37 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import FairseqLRScheduler 11 | 12 | 13 | build_lr_scheduler, register_lr_scheduler, LR_SCHEDULER_REGISTRY = registry.setup_registry( 14 | '--lr-scheduler', 15 | base_class=FairseqLRScheduler, 16 | default='fixed', 17 | ) 18 | 19 | # automatically import any Python files in the optim/lr_scheduler/ directory 20 | for file in os.listdir(os.path.dirname(__file__)): 21 | if file.endswith('.py') and not file.startswith('_'): 22 | module = file[:file.find('.py')] 23 | importlib.import_module('fairseq.optim.lr_scheduler.' + module) 24 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .. import FairseqOptimizer 7 | 8 | 9 | class FairseqLRScheduler(object): 10 | 11 | def __init__(self, args, optimizer): 12 | super().__init__() 13 | if not isinstance(optimizer, FairseqOptimizer): 14 | raise ValueError('optimizer must be an instance of FairseqOptimizer') 15 | self.args = args 16 | self.optimizer = optimizer 17 | self.best = None 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | """Add arguments to the parser for this LR scheduler.""" 22 | pass 23 | 24 | def state_dict(self): 25 | """Return the LR scheduler state dict.""" 26 | return {'best': self.best} 27 | 28 | def load_state_dict(self, state_dict): 29 | """Load an LR scheduler state dict.""" 30 | self.best = state_dict['best'] 31 | 32 | def step(self, epoch, val_loss=None): 33 | """Update the learning rate at the end of the given epoch.""" 34 | if val_loss is not None: 35 | if self.best is None: 36 | self.best = val_loss 37 | else: 38 | self.best = min(self.best, val_loss) 39 | 40 | def step_update(self, num_updates): 41 | """Update the learning rate after each update.""" 42 | return self.optimizer.get_lr() 43 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/fixed_schedule.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqLRScheduler, register_lr_scheduler 7 | 8 | 9 | @register_lr_scheduler('fixed') 10 | class FixedSchedule(FairseqLRScheduler): 11 | """Decay the LR on a fixed schedule.""" 12 | 13 | def __init__(self, args, optimizer): 14 | super().__init__(args, optimizer) 15 | 16 | # set defaults 17 | args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0 18 | 19 | self.lr = args.lr[0] 20 | if args.warmup_updates > 0: 21 | self.warmup_factor = 1. / args.warmup_updates 22 | else: 23 | self.warmup_factor = 1 24 | 25 | @staticmethod 26 | def add_args(parser): 27 | """Add arguments to the parser for this LR scheduler.""" 28 | # fmt: off 29 | parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', 30 | help='force annealing at specified epoch') 31 | parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', 32 | help='shrink factor for annealing, lr_new = (lr * lr_shrink)') 33 | parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', 34 | help='warmup the learning rate linearly for the first N updates') 35 | # fmt: on 36 | 37 | def get_next_lr(self, epoch): 38 | lrs = self.args.lr 39 | if self.args.force_anneal is None or epoch < self.args.force_anneal: 40 | # use fixed LR schedule 41 | next_lr = lrs[min(epoch, len(lrs) - 1)] 42 | else: 43 | # annneal based on lr_shrink 44 | next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal) 45 | return next_lr 46 | 47 | def step(self, epoch, val_loss=None): 48 | """Update the learning rate at the end of the given epoch.""" 49 | super().step(epoch, val_loss) 50 | self.lr = self.get_next_lr(epoch) 51 | self.optimizer.set_lr(self.warmup_factor * self.lr) 52 | return self.optimizer.get_lr() 53 | 54 | def step_update(self, num_updates): 55 | """Update the learning rate after each update.""" 56 | if self.args.warmup_updates > 0 and num_updates < self.args.warmup_updates: 57 | self.warmup_factor = (num_updates + 1) / float(self.args.warmup_updates) 58 | self.optimizer.set_lr(self.warmup_factor * self.lr) 59 | return self.optimizer.get_lr() 60 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/inverse_square_root_schedule.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqLRScheduler, register_lr_scheduler 7 | 8 | 9 | @register_lr_scheduler('inverse_sqrt') 10 | class InverseSquareRootSchedule(FairseqLRScheduler): 11 | """Decay the LR based on the inverse square root of the update number. 12 | 13 | We also support a warmup phase where we linearly increase the learning rate 14 | from some initial learning rate (``--warmup-init-lr``) until the configured 15 | learning rate (``--lr``). Thereafter we decay proportional to the number of 16 | updates, with a decay factor set to align with the configured learning rate. 17 | 18 | During warmup:: 19 | 20 | lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) 21 | lr = lrs[update_num] 22 | 23 | After warmup:: 24 | 25 | decay_factor = args.lr * sqrt(args.warmup_updates) 26 | lr = decay_factor / sqrt(update_num) 27 | """ 28 | 29 | def __init__(self, args, optimizer): 30 | super().__init__(args, optimizer) 31 | if len(args.lr) > 1: 32 | raise ValueError( 33 | 'Cannot use a fixed learning rate schedule with inverse_sqrt.' 34 | ' Consider --lr-scheduler=fixed instead.' 35 | ) 36 | warmup_end_lr = args.lr[0] 37 | if args.warmup_init_lr < 0: 38 | args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr 39 | 40 | # linearly warmup for the first args.warmup_updates 41 | self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates 42 | 43 | # then, decay prop. to the inverse square root of the update number 44 | self.decay_factor = warmup_end_lr * args.warmup_updates**0.5 45 | 46 | # initial learning rate 47 | self.lr = args.warmup_init_lr 48 | self.optimizer.set_lr(self.lr) 49 | 50 | @staticmethod 51 | def add_args(parser): 52 | """Add arguments to the parser for this LR scheduler.""" 53 | # fmt: off 54 | parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', 55 | help='warmup the learning rate linearly for the first N updates') 56 | parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', 57 | help='initial learning rate during warmup phase; default is args.lr') 58 | # fmt: on 59 | 60 | def step(self, epoch, val_loss=None): 61 | """Update the learning rate at the end of the given epoch.""" 62 | super().step(epoch, val_loss) 63 | # we don't change the learning rate at epoch boundaries 64 | return self.optimizer.get_lr() 65 | 66 | def step_update(self, num_updates): 67 | """Update the learning rate after each update.""" 68 | if num_updates < self.args.warmup_updates: 69 | self.lr = self.args.warmup_init_lr + num_updates*self.lr_step 70 | else: 71 | self.lr = self.decay_factor * num_updates**-0.5 72 | self.optimizer.set_lr(self.lr) 73 | return self.lr 74 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/polynomial_decay_schedule.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqLRScheduler, register_lr_scheduler 7 | 8 | 9 | @register_lr_scheduler('polynomial_decay') 10 | class PolynomialDecaySchedule(FairseqLRScheduler): 11 | """Decay the LR on a fixed schedule.""" 12 | 13 | def __init__(self, args, optimizer): 14 | super().__init__(args, optimizer) 15 | 16 | # set defaults 17 | args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0 18 | 19 | self.lr = args.lr[0] 20 | if args.warmup_updates > 0: 21 | self.warmup_factor = 1. / args.warmup_updates 22 | else: 23 | self.warmup_factor = 1 24 | self.end_learning_rate = args.end_learning_rate 25 | self.total_num_update = args.total_num_update 26 | self.power = args.power 27 | self.optimizer.set_lr(self.warmup_factor * self.lr) 28 | 29 | @staticmethod 30 | def add_args(parser): 31 | """Add arguments to the parser for this LR scheduler.""" 32 | parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', 33 | help='force annealing at specified epoch') 34 | parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', 35 | help='warmup the learning rate linearly for the first N updates') 36 | parser.add_argument('--end-learning-rate', default=0.0, type=float) 37 | parser.add_argument('--power', default=1.0, type=float) 38 | parser.add_argument('--total-num-update', default=1000000, type=int) 39 | 40 | def get_next_lr(self, epoch): 41 | lrs = self.args.lr 42 | if self.args.force_anneal is None or epoch < self.args.force_anneal: 43 | # use fixed LR schedule 44 | next_lr = lrs[min(epoch, len(lrs) - 1)] 45 | else: 46 | # annneal based on lr_shrink 47 | next_lr = self.optimizer.get_lr() 48 | return next_lr 49 | 50 | def step(self, epoch, val_loss=None): 51 | """Update the learning rate at the end of the given epoch.""" 52 | super().step(epoch, val_loss) 53 | self.lr = self.get_next_lr(epoch) 54 | self.optimizer.set_lr(self.warmup_factor * self.lr) 55 | return self.optimizer.get_lr() 56 | 57 | def step_update(self, num_updates): 58 | """Update the learning rate after each update.""" 59 | if self.args.warmup_updates > 0 and num_updates <= self.args.warmup_updates: 60 | self.warmup_factor = num_updates / float(self.args.warmup_updates) 61 | lr = self.warmup_factor * self.lr 62 | elif num_updates >= self.total_num_update: 63 | lr = self.end_learning_rate 64 | else: 65 | warmup = self.args.warmup_updates 66 | lr_range = self.lr - self.end_learning_rate 67 | pct_remaining = 1 - (num_updates - warmup) / (self.total_num_update - warmup) 68 | lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate 69 | self.optimizer.set_lr(lr) 70 | return self.optimizer.get_lr() 71 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/triangular_lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import math 7 | 8 | from . import FairseqLRScheduler, register_lr_scheduler 9 | 10 | 11 | @register_lr_scheduler('triangular') 12 | class TriangularSchedule(FairseqLRScheduler): 13 | """Assign LR based on a triangular cyclical schedule. 14 | 15 | See https://arxiv.org/pdf/1506.01186.pdf for details. 16 | """ 17 | 18 | def __init__(self, args, optimizer): 19 | super().__init__(args, optimizer) 20 | if len(args.lr) > 1: 21 | raise ValueError( 22 | 'Cannot use a fixed learning rate schedule with triangular.' 23 | ' Consider --lr-scheduler=fixed instead.' 24 | ) 25 | 26 | lr = args.lr[0] 27 | 28 | assert args.max_lr > lr, 'max_lr must be more than lr' 29 | self.min_lr = lr 30 | self.max_lr = args.max_lr 31 | self.stepsize = args.lr_period_updates // 2 32 | self.lr_shrink = args.lr_shrink 33 | self.shrink_min = args.shrink_min 34 | 35 | # initial learning rate 36 | self.lr = self.min_lr 37 | self.optimizer.set_lr(self.lr) 38 | 39 | @staticmethod 40 | def add_args(parser): 41 | """Add arguments to the parser for this LR scheduler.""" 42 | # fmt: off 43 | parser.add_argument('--max-lr', required=True, type=float, metavar='LR', 44 | help='max learning rate, must be more than args.lr') 45 | parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', 46 | help='initial number of updates per period (cycle length)') 47 | parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', 48 | help='shrink factor for annealing') 49 | parser.add_argument('--shrink-min', action='store_true', 50 | help='if set, also shrinks min lr') 51 | # fmt: on 52 | 53 | def step(self, epoch, val_loss=None): 54 | """Update the learning rate at the end of the given epoch.""" 55 | super().step(epoch, val_loss) 56 | # we don't change the learning rate at epoch boundaries 57 | return self.optimizer.get_lr() 58 | 59 | def step_update(self, num_updates): 60 | """Update the learning rate after each update.""" 61 | cycle = math.floor(num_updates / (2 * self.stepsize)) 62 | 63 | lr_shrink = self.lr_shrink ** cycle 64 | max_lr = self.max_lr * lr_shrink 65 | if self.shrink_min: 66 | min_lr = self.min_lr * lr_shrink 67 | else: 68 | min_lr = self.min_lr 69 | 70 | x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1) 71 | self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x)) 72 | 73 | self.optimizer.set_lr(self.lr) 74 | return self.lr 75 | -------------------------------------------------------------------------------- /fairseq/optim/nag.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from torch.optim.optimizer import Optimizer, required 8 | 9 | from . import FairseqOptimizer, register_optimizer 10 | 11 | 12 | @register_optimizer('nag') 13 | class FairseqNAG(FairseqOptimizer): 14 | def __init__(self, args, params): 15 | super().__init__(args) 16 | self._optimizer = NAG(params, **self.optimizer_config) 17 | 18 | @staticmethod 19 | def add_args(parser): 20 | """Add optimizer-specific arguments to the parser.""" 21 | # fmt: off 22 | parser.add_argument('--momentum', default=0.99, type=float, metavar='M', 23 | help='momentum factor') 24 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 25 | help='weight decay') 26 | # fmt: on 27 | 28 | @property 29 | def optimizer_config(self): 30 | """ 31 | Return a kwarg dictionary that will be used to override optimizer 32 | args stored in checkpoints. This allows us to load a checkpoint and 33 | resume training using a different set of optimizer args, e.g., with a 34 | different learning rate. 35 | """ 36 | return { 37 | 'lr': self.args.lr[0], 38 | 'momentum': self.args.momentum, 39 | 'weight_decay': self.args.weight_decay, 40 | } 41 | 42 | 43 | class NAG(Optimizer): 44 | def __init__(self, params, lr=required, momentum=0, weight_decay=0): 45 | defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay) 46 | super(NAG, self).__init__(params, defaults) 47 | 48 | @property 49 | def supports_memory_efficient_fp16(self): 50 | return True 51 | 52 | def step(self, closure=None): 53 | """Performs a single optimization step. 54 | 55 | Arguments: 56 | closure (callable, optional): A closure that reevaluates the model 57 | and returns the loss. 58 | """ 59 | loss = None 60 | if closure is not None: 61 | loss = closure() 62 | 63 | for group in self.param_groups: 64 | weight_decay = group['weight_decay'] 65 | momentum = group['momentum'] 66 | lr = group['lr'] 67 | lr_old = group.get('lr_old', lr) 68 | lr_correct = lr / lr_old 69 | 70 | for p in group['params']: 71 | if p.grad is None: 72 | continue 73 | 74 | p_data_fp32 = p.data.float() 75 | 76 | d_p = p.grad.data.float() 77 | param_state = self.state[p] 78 | if 'momentum_buffer' not in param_state: 79 | param_state['momentum_buffer'] = torch.zeros_like(d_p) 80 | else: 81 | param_state['momentum_buffer'] = param_state['momentum_buffer'].type_as(d_p) 82 | 83 | buf = param_state['momentum_buffer'] 84 | 85 | if weight_decay != 0: 86 | p_data_fp32.mul_(1 - lr * weight_decay) 87 | p_data_fp32.add_(momentum * momentum * lr_correct, buf) 88 | p_data_fp32.add_(-(1 + momentum) * lr, d_p) 89 | 90 | buf.mul_(momentum * lr_correct).add_(-lr, d_p) 91 | 92 | p.data.copy_(p_data_fp32) 93 | 94 | group['lr_old'] = lr 95 | 96 | return loss 97 | -------------------------------------------------------------------------------- /fairseq/optim/sgd.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('sgd') 12 | class SGD(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.SGD(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--momentum', default=0.0, type=float, metavar='M', 22 | help='momentum factor') 23 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 24 | help='weight decay') 25 | # fmt: on 26 | 27 | @property 28 | def optimizer_config(self): 29 | """ 30 | Return a kwarg dictionary that will be used to override optimizer 31 | args stored in checkpoints. This allows us to load a checkpoint and 32 | resume training using a different set of optimizer args, e.g., with a 33 | different learning rate. 34 | """ 35 | return { 36 | 'lr': self.args.lr[0], 37 | 'momentum': self.args.momentum, 38 | 'weight_decay': self.args.weight_decay, 39 | } 40 | -------------------------------------------------------------------------------- /fairseq/pdb.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import multiprocessing 7 | import os 8 | import pdb 9 | import sys 10 | 11 | 12 | __all__ = ['set_trace'] 13 | 14 | 15 | _stdin = [None] 16 | _stdin_lock = multiprocessing.Lock() 17 | try: 18 | _stdin_fd = sys.stdin.fileno() 19 | except Exception: 20 | _stdin_fd = None 21 | 22 | 23 | class MultiprocessingPdb(pdb.Pdb): 24 | """A Pdb wrapper that works in a multiprocessing environment. 25 | 26 | Usage: `from fairseq import pdb; pdb.set_trace()` 27 | """ 28 | 29 | def __init__(self): 30 | pdb.Pdb.__init__(self, nosigint=True) 31 | 32 | def _cmdloop(self): 33 | stdin_bak = sys.stdin 34 | with _stdin_lock: 35 | try: 36 | if _stdin_fd is not None: 37 | if not _stdin[0]: 38 | _stdin[0] = os.fdopen(_stdin_fd) 39 | sys.stdin = _stdin[0] 40 | self.cmdloop() 41 | finally: 42 | sys.stdin = stdin_bak 43 | 44 | 45 | def set_trace(): 46 | pdb = MultiprocessingPdb() 47 | pdb.set_trace(sys._getframe().f_back) 48 | -------------------------------------------------------------------------------- /fairseq/registry.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import argparse 7 | 8 | 9 | REGISTRIES = {} 10 | 11 | 12 | def setup_registry( 13 | registry_name: str, 14 | base_class=None, 15 | default=None, 16 | ): 17 | assert registry_name.startswith('--') 18 | registry_name = registry_name[2:].replace('-', '_') 19 | 20 | REGISTRY = {} 21 | REGISTRY_CLASS_NAMES = set() 22 | 23 | # maintain a registry of all registries 24 | if registry_name in REGISTRIES: 25 | return # registry already exists 26 | REGISTRIES[registry_name] = { 27 | 'registry': REGISTRY, 28 | 'default': default, 29 | } 30 | 31 | def build_x(args, *extra_args, **extra_kwargs): 32 | choice = getattr(args, registry_name, None) 33 | if choice is None: 34 | return None 35 | cls = REGISTRY[choice] 36 | if hasattr(cls, 'build_' + registry_name): 37 | builder = getattr(cls, 'build_' + registry_name) 38 | else: 39 | builder = cls 40 | set_defaults(args, cls) 41 | return builder(args, *extra_args, **extra_kwargs) 42 | 43 | def register_x(name): 44 | 45 | def register_x_cls(cls): 46 | if name in REGISTRY: 47 | raise ValueError('Cannot register duplicate {} ({})'.format(registry_name, name)) 48 | if cls.__name__ in REGISTRY_CLASS_NAMES: 49 | raise ValueError( 50 | 'Cannot register {} with duplicate class name ({})'.format( 51 | registry_name, cls.__name__, 52 | ) 53 | ) 54 | if base_class is not None and not issubclass(cls, base_class): 55 | raise ValueError('{} must extend {}'.format(cls.__name__, base_class.__name__)) 56 | REGISTRY[name] = cls 57 | REGISTRY_CLASS_NAMES.add(cls.__name__) 58 | return cls 59 | 60 | return register_x_cls 61 | 62 | return build_x, register_x, REGISTRY 63 | 64 | 65 | def set_defaults(args, cls): 66 | """Helper to set default arguments based on *add_args*.""" 67 | if not hasattr(cls, 'add_args'): 68 | return 69 | parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, allow_abbrev=False) 70 | cls.add_args(parser) 71 | # copied from argparse.py: 72 | defaults = argparse.Namespace() 73 | for action in parser._actions: 74 | if action.dest is not argparse.SUPPRESS: 75 | if not hasattr(defaults, action.dest): 76 | if action.default is not argparse.SUPPRESS: 77 | setattr(defaults, action.dest, action.default) 78 | for key, default_value in vars(defaults).items(): 79 | if not hasattr(args, key): 80 | setattr(args, key, default_value) 81 | -------------------------------------------------------------------------------- /fairseq/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import argparse 7 | import importlib 8 | import os 9 | 10 | from .fairseq_task import FairseqTask 11 | 12 | TASK_REGISTRY = {} 13 | TASK_CLASS_NAMES = set() 14 | 15 | 16 | def setup_task(args, **kwargs): 17 | return TASK_REGISTRY[args.task].setup_task(args, **kwargs) 18 | 19 | 20 | def register_task(name): 21 | """ 22 | New tasks can be added to fairseq with the 23 | :func:`~fairseq.tasks.register_task` function decorator. 24 | 25 | For example:: 26 | 27 | @register_task('classification') 28 | class ClassificationTask(FairseqTask): 29 | (...) 30 | 31 | .. note:: 32 | 33 | All Tasks must implement the :class:`~fairseq.tasks.FairseqTask` 34 | interface. 35 | 36 | Please see the 37 | 38 | Args: 39 | name (str): the name of the task 40 | """ 41 | 42 | def register_task_cls(cls): 43 | if name in TASK_REGISTRY: 44 | raise ValueError('Cannot register duplicate task ({})'.format(name)) 45 | if not issubclass(cls, FairseqTask): 46 | raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__)) 47 | if cls.__name__ in TASK_CLASS_NAMES: 48 | raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__)) 49 | TASK_REGISTRY[name] = cls 50 | TASK_CLASS_NAMES.add(cls.__name__) 51 | return cls 52 | 53 | return register_task_cls 54 | 55 | 56 | # automatically import any Python files in the tasks/ directory 57 | for file in os.listdir(os.path.dirname(__file__)): 58 | if file.endswith('.py') and not file.startswith('_'): 59 | task_name = file[:file.find('.py')] 60 | importlib.import_module('fairseq.tasks.' + task_name) 61 | 62 | # expose `task_parser` for sphinx 63 | if task_name in TASK_REGISTRY: 64 | parser = argparse.ArgumentParser(add_help=False) 65 | group_task = parser.add_argument_group('Task name') 66 | # fmt: off 67 | group_task.add_argument('--task', metavar=task_name, 68 | help='Enable this task with: ``--task=' + task_name + '``') 69 | # fmt: on 70 | group_args = parser.add_argument_group('Additional command-line arguments') 71 | TASK_REGISTRY[task_name].add_args(group_args) 72 | globals()[task_name + '_parser'] = parser 73 | 74 | 75 | def get_task(name): 76 | return TASK_REGISTRY[name] 77 | -------------------------------------------------------------------------------- /fairseq/tasks/audio_pretraining.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import os 7 | 8 | from fairseq.data import FileAudioDataset 9 | from . import FairseqTask, register_task 10 | 11 | 12 | @register_task('audio_pretraining') 13 | class AudioPretrainingTask(FairseqTask): 14 | """ 15 | 16 | """ 17 | 18 | @staticmethod 19 | def add_args(parser): 20 | """Add task-specific arguments to the parser.""" 21 | parser.add_argument('data', help='path to data directory') 22 | parser.add_argument('--sample-rate', default=16000, type=int, 23 | help='target sample rate. audio files will be up/down sampled to this rate') 24 | parser.add_argument('--max-sample-size', default=None, type=int, 25 | help='max sample size to crop to for batching. default = min sample length') 26 | parser.add_argument('--min-sample-size', default=None, type=int, 27 | help='min sample size to crop to for batching. default = same as --max-sample-size') 28 | 29 | def __init__(self, args): 30 | super().__init__(args) 31 | 32 | @classmethod 33 | def setup_task(cls, args, **kwargs): 34 | """Setup the task (e.g., load dictionaries). 35 | 36 | Args: 37 | args (argparse.Namespace): parsed command-line arguments 38 | """ 39 | return cls(args) 40 | 41 | def load_dataset(self, split, **kwargs): 42 | """Load a given dataset split. 43 | 44 | Args: 45 | split (str): name of the split (e.g., train, valid, test) 46 | """ 47 | 48 | manifest = os.path.join(self.args.data, '{}.tsv'.format(split)) 49 | self.datasets[split] = FileAudioDataset(manifest, 50 | sample_rate=self.args.sample_rate, 51 | max_sample_size=self.args.max_sample_size, 52 | min_sample_size=self.args.min_sample_size) 53 | 54 | @property 55 | def target_dictionary(self): 56 | """Return the :class:`~fairseq.data.Dictionary` for the language 57 | model.""" 58 | return None 59 | -------------------------------------------------------------------------------- /fairseq/tasks/translation_from_pretrained_xlm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary 7 | from fairseq.tasks.translation import TranslationTask 8 | 9 | from . import register_task 10 | 11 | 12 | @register_task("translation_from_pretrained_xlm") 13 | class TranslationFromPretrainedXLMTask(TranslationTask): 14 | """ 15 | Same as TranslationTask except use the MaskedLMDictionary class so that 16 | we can load data that was binarized with the MaskedLMDictionary class. 17 | 18 | This task should be used for the entire training pipeline when we want to 19 | train an NMT model from a pretrained XLM checkpoint: binarizing NMT data, 20 | training NMT with the pretrained XLM checkpoint, and subsequent evaluation 21 | of that trained model. 22 | """ 23 | 24 | @classmethod 25 | def load_dictionary(cls, filename): 26 | """Load the masked LM dictionary from the filename 27 | 28 | Args: 29 | filename (str): the filename 30 | """ 31 | return MaskedLMDictionary.load(filename) 32 | -------------------------------------------------------------------------------- /fairseq/tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | SPACE_NORMALIZER = re.compile(r"\s+") 9 | 10 | 11 | def tokenize_line(line): 12 | line = SPACE_NORMALIZER.sub(" ", line) 13 | line = line.strip() 14 | return line.split() 15 | -------------------------------------------------------------------------------- /fairseq_cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yongchanghao/multi-task-nat/b9bf3bd82caa96c75d1291900cf2d6ad5f08a2ac/fairseq_cli/__init__.py -------------------------------------------------------------------------------- /fairseq_cli/score.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | BLEU scoring of generated translations against reference translations. 8 | """ 9 | 10 | import argparse 11 | import os 12 | import sys 13 | 14 | from fairseq import bleu 15 | from fairseq.data import dictionary 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.') 20 | # fmt: off 21 | parser.add_argument('-s', '--sys', default='-', help='system output') 22 | parser.add_argument('-r', '--ref', required=True, help='references') 23 | parser.add_argument('-o', '--order', default=4, metavar='N', 24 | type=int, help='consider ngrams up to this order') 25 | parser.add_argument('--ignore-case', action='store_true', 26 | help='case-insensitive scoring') 27 | parser.add_argument('--sacrebleu', action='store_true', 28 | help='score with sacrebleu') 29 | parser.add_argument('--sentence-bleu', action='store_true', 30 | help='report sentence-level BLEUs (i.e., with +1 smoothing)') 31 | # fmt: on 32 | return parser 33 | 34 | 35 | def main(): 36 | parser = get_parser() 37 | args = parser.parse_args() 38 | print(args) 39 | 40 | assert args.sys == '-' or os.path.exists(args.sys), \ 41 | "System output file {} does not exist".format(args.sys) 42 | assert os.path.exists(args.ref), \ 43 | "Reference file {} does not exist".format(args.ref) 44 | 45 | dict = dictionary.Dictionary() 46 | 47 | def readlines(fd): 48 | for line in fd.readlines(): 49 | if args.ignore_case: 50 | yield line.lower() 51 | else: 52 | yield line 53 | 54 | if args.sacrebleu: 55 | import sacrebleu 56 | 57 | def score(fdsys): 58 | with open(args.ref, encoding="utf8") as fdref: 59 | print(sacrebleu.corpus_bleu(fdsys, [fdref])) 60 | elif args.sentence_bleu: 61 | def score(fdsys): 62 | with open(args.ref, encoding="utf8") as fdref: 63 | scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) 64 | for i, (sys_tok, ref_tok) in enumerate(zip(readlines(fdsys), readlines(fdref))): 65 | scorer.reset(one_init=True) 66 | sys_tok = dict.encode_line(sys_tok) 67 | ref_tok = dict.encode_line(ref_tok) 68 | scorer.add(ref_tok, sys_tok) 69 | print(i, scorer.result_string(args.order)) 70 | else: 71 | def score(fdsys): 72 | with open(args.ref, encoding="utf8") as fdref: 73 | scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) 74 | for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)): 75 | sys_tok = dict.encode_line(sys_tok) 76 | ref_tok = dict.encode_line(ref_tok) 77 | scorer.add(ref_tok, sys_tok) 78 | print(scorer.result_string(args.order)) 79 | 80 | if args.sys == '-': 81 | score(sys.stdin) 82 | else: 83 | with open(args.sys, 'r', encoding="utf8") as f: 84 | score(f) 85 | 86 | 87 | if __name__ == '__main__': 88 | main() 89 | -------------------------------------------------------------------------------- /fairseq_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yongchanghao/multi-task-nat/b9bf3bd82caa96c75d1291900cf2d6ad5f08a2ac/fairseq_logo.png -------------------------------------------------------------------------------- /hubconf.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import functools 7 | 8 | from fairseq.hub_utils import BPEHubInterface as bpe # noqa 9 | from fairseq.hub_utils import TokenizerHubInterface as tokenizer # noqa 10 | from fairseq.models import MODEL_REGISTRY 11 | 12 | 13 | dependencies = [ 14 | 'numpy', 15 | 'regex', 16 | 'requests', 17 | 'torch', 18 | ] 19 | 20 | 21 | # torch.hub doesn't build Cython components, so if they are not found then try 22 | # to build them here 23 | try: 24 | import fairseq.data.token_block_utils_fast 25 | except (ImportError, ModuleNotFoundError): 26 | try: 27 | import cython 28 | import os 29 | from setuptools import sandbox 30 | sandbox.run_setup( 31 | os.path.join(os.path.dirname(__file__), 'setup.py'), 32 | ['build_ext', '--inplace'], 33 | ) 34 | except (ImportError, ModuleNotFoundError): 35 | print( 36 | 'Unable to build Cython components. Please make sure Cython is ' 37 | 'installed if the torch.hub model you are loading depends on it.' 38 | ) 39 | 40 | 41 | for _model_type, _cls in MODEL_REGISTRY.items(): 42 | for model_name in _cls.hub_models().keys(): 43 | globals()[model_name] = functools.partial( 44 | _cls.from_pretrained, 45 | model_name, 46 | ) 47 | # to simplify the interface we only expose named models 48 | # globals()[_model_type] = _cls.from_pretrained 49 | -------------------------------------------------------------------------------- /run/test.sh: -------------------------------------------------------------------------------- 1 | # -------- manual configuration --------- 2 | # see train.py for specific meaning 3 | NAME=# 4 | WRITE=# 5 | DATA_BIN=# 6 | SRC=# 7 | TGT=# 8 | 9 | # -------- normally no need to change --------- 10 | CHECKPOINT=$WRITE/$NAME/ckpts 11 | DECODING=$WRITE/$NAME/decoding/test 12 | mkdir $DECODING 13 | # Note that you have to obtain the averaged modle 14 | # following the instruction in the README file. 15 | ckpt=averaged.pt 16 | 17 | # -------- specific arguments --------- 18 | CUDA_VISIBLE_DEVICES=0 python generate.py \ 19 | $DATA_BIN \ 20 | --fp16 \ 21 | -s $SRC -t $TGT \ 22 | --gen-subset test \ 23 | --max-tokens 2048 \ 24 | --task translation_mt \ 25 | --generator nat \ 26 | --path $CHECKPOINT/$ckpt \ 27 | --iter-decode-max-iter 10 \ 28 | --iter-decode-force-max-iter \ 29 | --remove-bpe \ 30 | --iter-decode-with-beam 5 \ 31 | --valid-decoding-path $DECODING \ 32 | --decoding-path $DECODING \ 33 | --multi-bleu-path ./scripts \ 34 | --num-ref $DATA=1 \ 35 | |& tee $DECODING/$ckpt.gen 36 | -------------------------------------------------------------------------------- /run/train.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 2 | 3 | 4 | # -------- manual configuration --------- 5 | NAME=# {{whatever, a fancy name for a single experiment, e.g. mtnat, bert-xxxxxl, GPT-5, ...}} 6 | WRITE=# {{root directory for all experiments, e.g. $HOME/my_experiments_dir }} 7 | DATA=# {{your original text data, for evaluating bleu during training}} 8 | DATA_BIN=# {{preprocessed data here}} 9 | SRC=# {{suffix of the source language}} 10 | TGT=# {{suffix of the target language}} 11 | 12 | 13 | # -------- normally no need to change --------- 14 | CHECKPOINT="$WRITE/$NAME/ckpts" 15 | TB="$WRITE/$NAME/tensorboard" 16 | VALID_PATH="$WRITE/$NAME/decoding" 17 | mkdir $VALID_PATH 18 | sed -r 's/(@@ )|(@@ ?$)//g' < $DATA/valid.$SRC > $VALID_PATH/valid.$SRC 19 | sed -r 's/(@@ )|(@@ ?$)//g' < $DATA/valid.$TGT > $VALID_PATH/valid.$TGT 20 | 21 | 22 | # -------- specific arguments --------- 23 | python train.py \ 24 | $DATA_BIN \ 25 | --fp16 \ 26 | -s $SRC -t $TGT \ 27 | --save-dir $CHECKPOINT \ 28 | --ddp-backend=no_c10d \ 29 | --task translation_mt \ 30 | --criterion mt_loss \ 31 | --arch mt_transformer \ 32 | --noise random_mask \ 33 | --optimizer adam --adam-betas '(0.9,0.98)' \ 34 | --lr 0.0005 --lr-scheduler inverse_sqrt \ 35 | --min-lr '1e-09' --warmup-updates 10000 \ 36 | --warmup-init-lr '1e-07' --label-smoothing 0.1 \ 37 | --dropout 0.3 --weight-decay 0.01 \ 38 | --decoder-learned-pos \ 39 | --encoder-learned-pos \ 40 | --share-all-embeddings \ 41 | --at-weight 0.5 \ 42 | --nat-weight 0.5 \ 43 | --at-drop-rate 0 \ 44 | --nat-drop-rate 0 \ 45 | --log-format 'simple' --log-interval 100 \ 46 | --max-tokens 16000 \ 47 | --update-freq 1 \ 48 | --save-interval-updates 2000 \ 49 | --apply-bert-init \ 50 | --max-update 300000 \ 51 | --no-epoch-checkpoints \ 52 | --quiet \ 53 | --max-sentences-valid 128 \ 54 | --all-gather-list-size 522240 \ 55 | --num-ref $DATA=1 \ 56 | --valid-decoding-path $VALID_PATH\ 57 | --share-encoder \ 58 | --remove-bpe \ 59 | --multi-bleu-path ./scripts \ 60 | --selection-criterion nat \ 61 | --tensorboard-logdir $TB -------------------------------------------------------------------------------- /score.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | BLEU scoring of generated translations against reference translations. 8 | """ 9 | 10 | import argparse 11 | import os 12 | import sys 13 | 14 | from fairseq import bleu 15 | from fairseq.data import dictionary 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.') 20 | # fmt: off 21 | parser.add_argument('-s', '--sys', default='-', help='system output') 22 | parser.add_argument('-r', '--ref', required=True, help='references') 23 | parser.add_argument('-o', '--order', default=4, metavar='N', 24 | type=int, help='consider ngrams up to this order') 25 | parser.add_argument('--ignore-case', action='store_true', 26 | help='case-insensitive scoring') 27 | parser.add_argument('--sacrebleu', action='store_true', 28 | help='score with sacrebleu') 29 | parser.add_argument('--sentence-bleu', action='store_true', 30 | help='report sentence-level BLEUs (i.e., with +1 smoothing)') 31 | # fmt: on 32 | return parser 33 | 34 | 35 | def main(): 36 | parser = get_parser() 37 | args = parser.parse_args() 38 | print(args) 39 | 40 | assert args.sys == '-' or os.path.exists(args.sys), \ 41 | "System output file {} does not exist".format(args.sys) 42 | assert os.path.exists(args.ref), \ 43 | "Reference file {} does not exist".format(args.ref) 44 | 45 | dict = dictionary.Dictionary() 46 | 47 | def readlines(fd): 48 | for line in fd.readlines(): 49 | if args.ignore_case: 50 | yield line.lower() 51 | else: 52 | yield line 53 | 54 | if args.sacrebleu: 55 | import sacrebleu 56 | 57 | def score(fdsys): 58 | with open(args.ref, encoding="utf8") as fdref: 59 | print(sacrebleu.corpus_bleu(fdsys, [fdref])) 60 | elif args.sentence_bleu: 61 | def score(fdsys): 62 | with open(args.ref, encoding="utf8") as fdref: 63 | scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) 64 | for i, (sys_tok, ref_tok) in enumerate(zip(readlines(fdsys), readlines(fdref))): 65 | scorer.reset(one_init=True) 66 | sys_tok = dict.encode_line(sys_tok) 67 | ref_tok = dict.encode_line(ref_tok) 68 | scorer.add(ref_tok, sys_tok) 69 | print(i, scorer.result_string(args.order)) 70 | else: 71 | def score(fdsys): 72 | with open(args.ref, encoding="utf8") as fdref: 73 | scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) 74 | for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)): 75 | sys_tok = dict.encode_line(sys_tok) 76 | ref_tok = dict.encode_line(ref_tok) 77 | scorer.add(ref_tok, sys_tok) 78 | print(scorer.result_string(args.order)) 79 | 80 | if args.sys == '-': 81 | score(sys.stdin) 82 | else: 83 | with open(args.sys, 'r', encoding="utf8") as f: 84 | score(f) 85 | 86 | 87 | if __name__ == '__main__': 88 | main() 89 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yongchanghao/multi-task-nat/b9bf3bd82caa96c75d1291900cf2d6ad5f08a2ac/scripts/__init__.py -------------------------------------------------------------------------------- /scripts/compare_namespaces.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Helper script to compare two argparse.Namespace objects.""" 3 | 4 | from argparse import Namespace # noqa 5 | 6 | 7 | def main(): 8 | 9 | ns1 = eval(input('Namespace 1: ')) 10 | ns2 = eval(input('Namespace 2: ')) 11 | 12 | def keys(ns): 13 | ks = set() 14 | for k in dir(ns): 15 | if not k.startswith('_'): 16 | ks.add(k) 17 | return ks 18 | 19 | k1 = keys(ns1) 20 | k2 = keys(ns2) 21 | 22 | def print_keys(ks, ns1, ns2=None): 23 | for k in ks: 24 | if ns2 is None: 25 | print('{}\t{}'.format(k, getattr(ns1, k, None))) 26 | else: 27 | print('{}\t{}\t{}'.format(k, getattr(ns1, k, None), getattr(ns2, k, None))) 28 | 29 | print('Keys unique to namespace 1:') 30 | print_keys(k1 - k2, ns1) 31 | print() 32 | 33 | print('Keys unique to namespace 2:') 34 | print_keys(k2 - k1, ns2) 35 | print() 36 | 37 | print('Overlapping keys with different values:') 38 | ks = [k for k in k1 & k2 if getattr(ns1, k, 'None') != getattr(ns2, k, 'None')] 39 | print_keys(ks, ns1, ns2) 40 | print() 41 | 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /scripts/compound_split_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ]; then 4 | echo "usage: $0 GENERATE_PY_OUTPUT" 5 | exit 1 6 | fi 7 | 8 | GEN=$1 9 | 10 | SYS=$GEN.sys 11 | REF=$GEN.ref 12 | 13 | if [ $(tail -n 1 $GEN | grep BLEU | wc -l) -ne 1 ]; then 14 | echo "not done generating" 15 | fi 16 | 17 | grep ^H $GEN | cut -f3- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $SYS 18 | grep ^T $GEN | cut -f2- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $REF 19 | fairseq-score --sys $SYS --ref $REF 20 | -------------------------------------------------------------------------------- /scripts/convert_dictionary.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (c) Facebook, Inc. and its affiliates. 2 | -- 3 | -- This source code is licensed under the MIT license found in the 4 | -- LICENSE file in the root directory of this source tree. 5 | -- 6 | -- Usage: convert_dictionary.lua 7 | require 'fairseq' 8 | require 'torch' 9 | require 'paths' 10 | 11 | if #arg < 1 then 12 | print('usage: convert_dictionary.lua ') 13 | os.exit(1) 14 | end 15 | if not paths.filep(arg[1]) then 16 | print('error: file does not exit: ' .. arg[1]) 17 | os.exit(1) 18 | end 19 | 20 | dict = torch.load(arg[1]) 21 | dst = paths.basename(arg[1]):gsub('.th7', '.txt') 22 | assert(dst:match('.txt$')) 23 | 24 | f = io.open(dst, 'w') 25 | for idx, symbol in ipairs(dict.index_to_symbol) do 26 | if idx > dict.cutoff then 27 | break 28 | end 29 | f:write(symbol) 30 | f:write(' ') 31 | f:write(dict.index_to_freq[idx]) 32 | f:write('\n') 33 | end 34 | f:close() 35 | -------------------------------------------------------------------------------- /scripts/count_docs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Count the number of documents and average number of lines and tokens per 8 | document in a large file. Documents should be separated by a single empty line. 9 | """ 10 | 11 | import argparse 12 | import gzip 13 | import sys 14 | 15 | import numpy as np 16 | 17 | 18 | def main(): 19 | parser = argparse.ArgumentParser() 20 | parser.add_argument('input') 21 | parser.add_argument('--gzip', action='store_true') 22 | args = parser.parse_args() 23 | 24 | def gopen(): 25 | if args.gzip: 26 | return gzip.open(args.input, 'r') 27 | else: 28 | return open(args.input, 'r', encoding='utf-8') 29 | 30 | num_lines = [] 31 | num_toks = [] 32 | with gopen() as h: 33 | num_docs = 1 34 | num_lines_in_doc = 0 35 | num_toks_in_doc = 0 36 | for i, line in enumerate(h): 37 | if len(line.strip()) == 0: # empty line indicates new document 38 | num_docs += 1 39 | num_lines.append(num_lines_in_doc) 40 | num_toks.append(num_toks_in_doc) 41 | num_lines_in_doc = 0 42 | num_toks_in_doc = 0 43 | else: 44 | num_lines_in_doc += 1 45 | num_toks_in_doc += len(line.rstrip().split()) 46 | if i % 1000000 == 0: 47 | print(i, file=sys.stderr, end="", flush=True) 48 | elif i % 100000 == 0: 49 | print(".", file=sys.stderr, end="", flush=True) 50 | print(file=sys.stderr, flush=True) 51 | 52 | print("found {} docs".format(num_docs)) 53 | print("average num lines per doc: {}".format(np.mean(num_lines))) 54 | print("average num toks per doc: {}".format(np.mean(num_toks))) 55 | 56 | 57 | if __name__ == '__main__': 58 | main() 59 | -------------------------------------------------------------------------------- /scripts/sacrebleu_pregen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 4 ]; then 4 | echo "usage: $0 TESTSET SRCLANG TGTLANG GEN" 5 | exit 1 6 | fi 7 | 8 | TESTSET=$1 9 | SRCLANG=$2 10 | TGTLANG=$3 11 | 12 | GEN=$4 13 | 14 | echo 'Cloning Moses github repository (for tokenization scripts)...' 15 | git clone https://github.com/moses-smt/mosesdecoder.git 16 | 17 | SCRIPTS=mosesdecoder/scripts 18 | DETOKENIZER=$SCRIPTS/tokenizer/detokenizer.perl 19 | 20 | grep ^H $GEN \ 21 | | sed 's/^H\-//' \ 22 | | sort -n -k 1 \ 23 | | cut -f 3 \ 24 | | perl $DETOKENIZER -l $TGTLANG \ 25 | | sed "s/ - /-/g" \ 26 | > $GEN.sorted.detok 27 | 28 | sacrebleu --test-set $TESTSET --language-pair "${SRCLANG}-${TGTLANG}" < $GEN.sorted.detok 29 | -------------------------------------------------------------------------------- /scripts/shard_docs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Split a large file into shards while respecting document boundaries. Documents 8 | should be separated by a single empty line. 9 | """ 10 | 11 | import argparse 12 | import contextlib 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('input') 18 | parser.add_argument('--num-shards', type=int) 19 | args = parser.parse_args() 20 | 21 | assert args.num_shards is not None and args.num_shards > 1 22 | 23 | with open(args.input, 'r', encoding='utf-8') as h: 24 | with contextlib.ExitStack() as stack: 25 | outputs = [ 26 | stack.enter_context(open(args.input + ".shard" + str(i), "w", encoding="utf-8")) 27 | for i in range(args.num_shards) 28 | ] 29 | 30 | doc = [] 31 | first_doc = [True]*args.num_shards 32 | def output_doc(i): 33 | if not first_doc[i]: 34 | outputs[i].write("\n") 35 | first_doc[i] = False 36 | for line in doc: 37 | outputs[i].write(line) 38 | doc.clear() 39 | 40 | num_docs = 0 41 | for line in h: 42 | if line.strip() == "": # empty line indicates new document 43 | output_doc(num_docs % args.num_shards) 44 | num_docs += 1 45 | else: 46 | doc.append(line) 47 | output_doc(num_docs % args.num_shards) 48 | 49 | 50 | if __name__ == '__main__': 51 | main() 52 | -------------------------------------------------------------------------------- /scripts/split_train_valid_docs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Split a large file into a train and valid set while respecting document 8 | boundaries. Documents should be separated by a single empty line. 9 | """ 10 | 11 | import argparse 12 | import random 13 | import sys 14 | 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument('input') 19 | parser.add_argument('sample_output', help='train output file') 20 | parser.add_argument('remainder_output', help='valid output file') 21 | parser.add_argument('-k', type=int, help="remainder size") 22 | parser.add_argument('--lines', action='store_true', 23 | help='split lines instead of docs') 24 | args = parser.parse_args() 25 | 26 | assert args.k is not None 27 | 28 | sample = [] 29 | remainder = [] 30 | num_docs = [0] 31 | 32 | def update_sample(doc): 33 | if len(sample) < args.k: 34 | sample.append(doc.copy()) 35 | else: 36 | i = num_docs[0] 37 | j = random.randrange(i + 1) 38 | if j < args.k: 39 | remainder.append(sample[j]) 40 | sample[j] = doc.copy() 41 | else: 42 | remainder.append(doc.copy()) 43 | num_docs[0] += 1 44 | doc.clear() 45 | 46 | with open(args.input, 'r', encoding='utf-8') as h: 47 | doc = [] 48 | for i, line in enumerate(h): 49 | if line.strip() == "": # empty line indicates new document 50 | update_sample(doc) 51 | else: 52 | doc.append(line) 53 | if args.lines: 54 | update_sample(doc) 55 | if i % 1000000 == 0: 56 | print(i, file=sys.stderr, end="", flush=True) 57 | elif i % 100000 == 0: 58 | print(".", file=sys.stderr, end="", flush=True) 59 | if len(doc) > 0: 60 | update_sample(doc) 61 | print(file=sys.stderr, flush=True) 62 | 63 | assert len(sample) == args.k 64 | 65 | with open(args.sample_output, 'w', encoding='utf-8') as out: 66 | first = True 67 | for doc in sample: 68 | if not first and not args.lines: 69 | out.write("\n") 70 | first = False 71 | for line in doc: 72 | out.write(line) 73 | 74 | with open(args.remainder_output, 'w', encoding='utf-8') as out: 75 | first = True 76 | for doc in remainder: 77 | if not first and not args.lines: 78 | out.write("\n") 79 | first = False 80 | for line in doc: 81 | out.write(line) 82 | 83 | 84 | if __name__ == '__main__': 85 | main() 86 | -------------------------------------------------------------------------------- /scripts/spm_decode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from __future__ import absolute_import, division, print_function, unicode_literals 9 | 10 | import argparse 11 | 12 | import sentencepiece as spm 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument("--model", required=True, 18 | help="sentencepiece model to use for decoding") 19 | parser.add_argument("--input", required=True, help="input file to decode") 20 | parser.add_argument("--input_format", choices=["piece", "id"], default="piece") 21 | args = parser.parse_args() 22 | 23 | sp = spm.SentencePieceProcessor() 24 | sp.Load(args.model) 25 | 26 | if args.input_format == "piece": 27 | def decode(l): 28 | return "".join(sp.DecodePieces(l)) 29 | elif args.input_format == "id": 30 | def decode(l): 31 | return "".join(sp.DecodeIds(l)) 32 | else: 33 | raise NotImplementedError 34 | 35 | def tok2int(tok): 36 | # remap reference-side (represented as <>) to 0 37 | return int(tok) if tok != "<>" else 0 38 | 39 | with open(args.input, "r", encoding="utf-8") as h: 40 | for line in h: 41 | if args.input_format == "id": 42 | print(decode(list(map(tok2int, line.rstrip().split())))) 43 | elif args.input_format == "piece": 44 | print(decode(line.rstrip().split())) 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /scripts/spm_encode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from __future__ import absolute_import, division, print_function, unicode_literals 9 | 10 | import argparse 11 | import contextlib 12 | import sys 13 | 14 | import sentencepiece as spm 15 | 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument("--model", required=True, 20 | help="sentencepiece model to use for encoding") 21 | parser.add_argument("--inputs", nargs="+", default=['-'], 22 | help="input files to filter/encode") 23 | parser.add_argument("--outputs", nargs="+", default=['-'], 24 | help="path to save encoded outputs") 25 | parser.add_argument("--output_format", choices=["piece", "id"], default="piece") 26 | parser.add_argument("--min-len", type=int, metavar="N", 27 | help="filter sentence pairs with fewer than N tokens") 28 | parser.add_argument("--max-len", type=int, metavar="N", 29 | help="filter sentence pairs with more than N tokens") 30 | args = parser.parse_args() 31 | 32 | assert len(args.inputs) == len(args.outputs), \ 33 | "number of input and output paths should match" 34 | 35 | sp = spm.SentencePieceProcessor() 36 | sp.Load(args.model) 37 | 38 | if args.output_format == "piece": 39 | def encode(l): 40 | return sp.EncodeAsPieces(l) 41 | elif args.output_format == "id": 42 | def encode(l): 43 | return list(map(str, sp.EncodeAsIds(l))) 44 | else: 45 | raise NotImplementedError 46 | 47 | if args.min_len is not None or args.max_len is not None: 48 | def valid(line): 49 | return ( 50 | (args.min_len is None or len(line) >= args.min_len) 51 | and (args.max_len is None or len(line) <= args.max_len) 52 | ) 53 | else: 54 | def valid(lines): 55 | return True 56 | 57 | with contextlib.ExitStack() as stack: 58 | inputs = [ 59 | stack.enter_context(open(input, "r", encoding="utf-8")) \ 60 | if input != "-" else sys.stdin 61 | for input in args.inputs 62 | ] 63 | outputs = [ 64 | stack.enter_context(open(output, "w", encoding="utf-8")) \ 65 | if output != "-" else sys.stdout 66 | for output in args.outputs 67 | ] 68 | 69 | stats = { 70 | "num_empty": 0, 71 | "num_filtered": 0, 72 | } 73 | 74 | def encode_line(line): 75 | line = line.strip() 76 | if len(line) > 0: 77 | line = encode(line) 78 | if valid(line): 79 | return line 80 | else: 81 | stats["num_filtered"] += 1 82 | else: 83 | stats["num_empty"] += 1 84 | return None 85 | 86 | for i, lines in enumerate(zip(*inputs), start=1): 87 | enc_lines = list(map(encode_line, lines)) 88 | if not any(enc_line is None for enc_line in enc_lines): 89 | for enc_line, output_h in zip(enc_lines, outputs): 90 | print(" ".join(enc_line), file=output_h) 91 | if i % 10000 == 0: 92 | print("processed {} lines".format(i), file=sys.stderr) 93 | 94 | print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr) 95 | print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr) 96 | 97 | 98 | if __name__ == "__main__": 99 | main() 100 | -------------------------------------------------------------------------------- /scripts/spm_train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from __future__ import absolute_import, division, print_function, unicode_literals 9 | 10 | import sys 11 | 12 | import sentencepiece as spm 13 | 14 | 15 | if __name__ == "__main__": 16 | spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:])) 17 | -------------------------------------------------------------------------------- /scripts/wav2vec_manifest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Data pre-processing: build vocabularies and binarize training data. 8 | """ 9 | 10 | import argparse 11 | import glob 12 | import os 13 | import soundfile 14 | import random 15 | 16 | 17 | def get_parser(): 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('root', metavar='DIR', help='root directory containing flac files to index') 20 | parser.add_argument('--valid-percent', default=0.01, type=float, metavar='D', 21 | help='percentage of data to use as validation set (between 0 and 1)') 22 | parser.add_argument('--dest', default='.', type=str, metavar='DIR', help='output directory') 23 | parser.add_argument('--ext', default='flac', type=str, metavar='EXT', help='extension to look for') 24 | parser.add_argument('--seed', default=42, type=int, metavar='N', help='random seed') 25 | parser.add_argument('--path-must-contain', default=None, type=str, metavar='FRAG', 26 | help='if set, path must contain this substring for a file to be included in the manifest') 27 | return parser 28 | 29 | 30 | def main(args): 31 | assert args.valid_percent >= 0 and args.valid_percent <= 1. 32 | 33 | dir_path = os.path.realpath(args.root) 34 | search_path = os.path.join(dir_path, '**/*.' + args.ext) 35 | rand = random.Random(args.seed) 36 | 37 | with open(os.path.join(args.dest, 'train.tsv'), 'w') as train_f, open( 38 | os.path.join(args.dest, 'valid.tsv'), 'w') as valid_f: 39 | print(dir_path, file=train_f) 40 | print(dir_path, file=valid_f) 41 | 42 | for fname in glob.iglob(search_path, recursive=True): 43 | file_path = os.path.realpath(fname) 44 | 45 | if args.path_must_contain and args.path_must_contain not in file_path: 46 | continue 47 | 48 | frames = soundfile.info(fname).frames 49 | dest = train_f if rand.random() > args.valid_percent else valid_f 50 | print('{}\t{}'.format(os.path.relpath(file_path, dir_path), frames), file=dest) 51 | 52 | 53 | if __name__ == '__main__': 54 | parser = get_parser() 55 | args = parser.parse_args() 56 | main(args) 57 | -------------------------------------------------------------------------------- /validate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | #!/usr/bin/env python3 -u 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import torch 9 | 10 | from fairseq import checkpoint_utils, options, progress_bar, utils 11 | 12 | 13 | def main(args, override_args=None): 14 | utils.import_user_module(args) 15 | 16 | use_fp16 = args.fp16 17 | use_cuda = torch.cuda.is_available() and not args.cpu 18 | 19 | if override_args is not None: 20 | overrides = vars(override_args) 21 | overrides.update(eval(getattr(override_args, 'model_overrides', '{}'))) 22 | else: 23 | overrides = None 24 | 25 | # Load ensemble 26 | print('| loading model(s) from {}'.format(args.path)) 27 | models, model_args, task = checkpoint_utils.load_model_ensemble_and_task( 28 | [args.path], 29 | arg_overrides=overrides, 30 | ) 31 | model = models[0] 32 | 33 | # Move models to GPU 34 | for model in models: 35 | if use_fp16: 36 | model.half() 37 | if use_cuda: 38 | model.cuda() 39 | 40 | # Print args 41 | print(model_args) 42 | 43 | # Build criterion 44 | criterion = task.build_criterion(model_args) 45 | criterion.eval() 46 | 47 | # Load valid dataset (we load training data below, based on the latest checkpoint) 48 | for subset in args.valid_subset.split(','): 49 | try: 50 | task.load_dataset(subset, combine=False, epoch=0) 51 | dataset = task.dataset(subset) 52 | except KeyError: 53 | raise Exception('Cannot find dataset: ' + subset) 54 | 55 | # Initialize data iterator 56 | itr = task.get_batch_iterator( 57 | dataset=dataset, 58 | max_tokens=args.max_tokens, 59 | max_sentences=args.max_sentences, 60 | max_positions=utils.resolve_max_positions( 61 | task.max_positions(), 62 | *[m.max_positions() for m in models], 63 | ), 64 | ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, 65 | required_batch_size_multiple=args.required_batch_size_multiple, 66 | seed=args.seed, 67 | num_workers=args.num_workers, 68 | ).next_epoch_itr(shuffle=False) 69 | progress = progress_bar.build_progress_bar( 70 | args, itr, 71 | prefix='valid on \'{}\' subset'.format(subset), 72 | no_progress_bar='simple' 73 | ) 74 | 75 | log_outputs = [] 76 | for i, sample in enumerate(progress): 77 | sample = utils.move_to_cuda(sample) if use_cuda else sample 78 | _loss, _sample_size, log_output = task.valid_step(sample, model, criterion) 79 | progress.log(log_output, step=i) 80 | log_outputs.append(log_output) 81 | 82 | log_output = task.aggregate_logging_outputs(log_outputs, criterion) 83 | utils.get_perplexity(log_output['nll_loss']) 84 | progress.print(log_output, tag=subset, step=i) 85 | 86 | 87 | def cli_main(): 88 | parser = options.get_validation_parser() 89 | args = options.parse_args_and_arch(parser) 90 | 91 | # only override args that are explicitly given on the command line 92 | override_parser = options.get_validation_parser() 93 | override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True) 94 | 95 | main(args, override_args) 96 | 97 | 98 | if __name__ == '__main__': 99 | cli_main() 100 | --------------------------------------------------------------------------------