├── .gitignore ├── README.md ├── bm25_make_batches.py ├── bm25_search.py ├── build_dstore.py ├── eval_lm-trime.py ├── eval_lm.py ├── fairseq ├── __init__.py ├── benchmark │ ├── dummy_lm.py │ ├── dummy_masked_lm.py │ └── dummy_model.py ├── binarizer.py ├── bleu.py ├── checkpoint_utils.py ├── clib │ ├── libbleu │ │ ├── libbleu.cpp │ │ └── module.cpp │ ├── libnat │ │ └── edit_dist.cpp │ └── libnat_cuda │ │ ├── binding.cpp │ │ ├── edit_dist.cu │ │ └── edit_dist.h ├── criterions │ ├── __init__.py │ ├── adaptive_loss.py │ ├── binary_cross_entropy.py │ ├── composite_loss.py │ ├── cross_entropy.py │ ├── fairseq_criterion.py │ ├── label_smoothed_cross_entropy.py │ ├── label_smoothed_cross_entropy_with_alignment.py │ ├── legacy_masked_lm.py │ ├── masked_lm.py │ ├── nat_loss.py │ ├── sentence_prediction.py │ ├── sentence_ranking.py │ ├── trime_ext_loss.py │ ├── trime_long_loss.py │ ├── trime_long_loss_same_device.py │ └── trime_loss.py ├── data │ ├── __init__.py │ ├── append_token_dataset.py │ ├── audio │ │ ├── __init__.py │ │ └── raw_audio_dataset.py │ ├── backtranslation_dataset.py │ ├── base_wrapper_dataset.py │ ├── colorize_dataset.py │ ├── concat_dataset.py │ ├── concat_sentences_dataset.py │ ├── data_utils.py │ ├── data_utils_fast.pyx │ ├── denoising_dataset.py │ ├── dictionary.py │ ├── encoders │ │ ├── __init__.py │ │ ├── fastbpe.py │ │ ├── gpt2_bpe.py │ │ ├── gpt2_bpe_utils.py │ │ ├── hf_bert_bpe.py │ │ ├── moses_tokenizer.py │ │ ├── nltk_tokenizer.py │ │ ├── sentencepiece_bpe.py │ │ ├── space_tokenizer.py │ │ ├── subword_nmt_bpe.py │ │ └── utils.py │ ├── fairseq_dataset.py │ ├── id_dataset.py │ ├── indexed_dataset.py │ ├── iterators.py │ ├── language_pair_dataset.py │ ├── legacy │ │ ├── __init__.py │ │ ├── block_pair_dataset.py │ │ ├── masked_lm_dataset.py │ │ └── masked_lm_dictionary.py │ ├── list_dataset.py │ ├── lm_context_window_dataset.py │ ├── lru_cache_dataset.py │ ├── mask_tokens_dataset.py │ ├── monolingual_dataset.py │ ├── multi_corpus_sampled_dataset.py │ ├── nested_dictionary_dataset.py │ ├── noising.py │ ├── num_samples_dataset.py │ ├── numel_dataset.py │ ├── offset_tokens_dataset.py │ ├── pad_dataset.py │ ├── plasma_utils.py │ ├── prepend_dataset.py │ ├── prepend_token_dataset.py │ ├── raw_label_dataset.py │ ├── replace_dataset.py │ ├── resampling_dataset.py │ ├── roll_dataset.py │ ├── round_robin_zip_datasets.py │ ├── sharded_dataset.py │ ├── sort_dataset.py │ ├── strip_token_dataset.py │ ├── subsample_dataset.py │ ├── token_block_dataset.py │ ├── token_block_utils_fast.pyx │ ├── transform_eos_dataset.py │ ├── transform_eos_lang_pair_dataset.py │ └── truncate_dataset.py ├── distributed_utils.py ├── file_io.py ├── file_utils.py ├── hub_utils.py ├── incremental_decoding_utils.py ├── iterative_refinement_generator.py ├── knnlm.py ├── legacy_distributed_data_parallel.py ├── meters.py ├── metrics.py ├── models │ ├── __init__.py │ ├── bart │ │ ├── __init__.py │ │ ├── hub_interface.py │ │ └── model.py │ ├── composite_encoder.py │ ├── distributed_fairseq_model.py │ ├── fairseq_decoder.py │ ├── fairseq_encoder.py │ ├── fairseq_incremental_decoder.py │ ├── fairseq_model.py │ ├── fconv.py │ ├── fconv_lm.py │ ├── fconv_self_att.py │ ├── lightconv.py │ ├── lightconv_lm.py │ ├── lstm.py │ ├── lstm_lm.py │ ├── masked_lm.py │ ├── model_utils.py │ ├── multilingual_transformer.py │ ├── nat │ │ ├── __init__.py │ │ ├── cmlm_transformer.py │ │ ├── fairseq_nat_model.py │ │ ├── insertion_transformer.py │ │ ├── iterative_nonautoregressive_transformer.py │ │ ├── levenshtein_transformer.py │ │ ├── levenshtein_utils.py │ │ ├── nat_crf_transformer.py │ │ ├── nonautoregressive_ensembles.py │ │ └── nonautoregressive_transformer.py │ ├── roberta │ │ ├── __init__.py │ │ ├── alignment_utils.py │ │ ├── hub_interface.py │ │ ├── model.py │ │ ├── model_camembert.py │ │ └── model_xlmr.py │ ├── transformer.py │ ├── transformer_from_pretrained_xlm.py │ ├── transformer_lm.py │ └── wav2vec.py ├── modules │ ├── __init__.py │ ├── adaptive_input.py │ ├── adaptive_softmax.py │ ├── beamable_mm.py │ ├── character_token_embedder.py │ ├── conv_tbc.py │ ├── cuda_utils.cu │ ├── downsampled_multihead_attention.py │ ├── dynamic_convolution.py │ ├── dynamic_crf_layer.py │ ├── dynamicconv_layer │ │ ├── __init__.py │ │ ├── cuda_function_gen.py │ │ ├── dynamicconv_cuda.cpp │ │ ├── dynamicconv_cuda.cuh │ │ ├── dynamicconv_cuda_kernel.cu │ │ ├── dynamicconv_layer.py │ │ ├── dynamiconv_cpu.cpp │ │ └── setup.py │ ├── gelu.py │ ├── grad_multiply.py │ ├── highway.py │ ├── layer_norm.py │ ├── learned_positional_embedding.py │ ├── lightconv_layer │ │ ├── __init__.py │ │ ├── cuda_function_gen.py │ │ ├── lightconv_cuda.cpp │ │ ├── lightconv_cuda.cuh │ │ ├── lightconv_cuda_kernel.cu │ │ ├── lightconv_layer.py │ │ └── setup.py │ ├── lightweight_convolution.py │ ├── linearized_convolution.py │ ├── logsumexp_moe.py │ ├── mean_pool_gating_network.py │ ├── multihead_attention.py │ ├── positional_embedding.py │ ├── scalar_bias.py │ ├── sinusoidal_positional_embedding.py │ ├── sparse_multihead_attention.py │ ├── sparse_transformer_sentence_encoder.py │ ├── sparse_transformer_sentence_encoder_layer.py │ ├── transformer_layer.py │ ├── transformer_sentence_encoder.py │ ├── transformer_sentence_encoder_layer.py │ ├── unfold.py │ └── vggblock.py ├── optim │ ├── __init__.py │ ├── adadelta.py │ ├── adafactor.py │ ├── adagrad.py │ ├── adam.py │ ├── adamax.py │ ├── bmuf.py │ ├── fairseq_optimizer.py │ ├── fp16_optimizer.py │ ├── fused_adam.py │ ├── fused_lamb.py │ ├── lr_scheduler │ │ ├── __init__.py │ │ ├── cosine_lr_scheduler.py │ │ ├── fairseq_lr_scheduler.py │ │ ├── fixed_schedule.py │ │ ├── inverse_square_root_schedule.py │ │ ├── linear_schedule_with_warmup.py │ │ ├── polynomial_decay_schedule.py │ │ ├── reduce_lr_on_plateau.py │ │ ├── tri_stage_lr_scheduler.py │ │ └── triangular_lr_scheduler.py │ ├── nag.py │ └── sgd.py ├── options.py ├── pdb.py ├── progress_bar.py ├── registry.py ├── search.py ├── sequence_generator.py ├── sequence_scorer.py ├── tasks │ ├── __init__.py │ ├── audio_pretraining.py │ ├── cross_lingual_lm.py │ ├── denoising.py │ ├── fairseq_task.py │ ├── language_modeling.py │ ├── legacy_masked_lm.py │ ├── masked_lm.py │ ├── multilingual_masked_lm.py │ ├── multilingual_translation.py │ ├── semisupervised_translation.py │ ├── sentence_prediction.py │ ├── sentence_ranking.py │ ├── translation.py │ ├── translation_from_pretrained_xlm.py │ ├── translation_lev.py │ └── translation_moe.py ├── tokenizer.py ├── trainer.py └── utils.py ├── fairseq_cli ├── __init__.py ├── eval_lm.py ├── generate.py ├── interactive.py ├── preprocess.py ├── score.py ├── train.py └── validate.py ├── get_data.sh ├── images ├── batching.png └── method.png ├── machine_translation ├── .gitignore ├── README.md ├── bm25_make_batches.py ├── bm25_search.py ├── examples │ └── translation │ │ ├── README.md │ │ ├── prepare-iwslt14.sh │ │ ├── prepare-iwslt17-multilingual.sh │ │ ├── prepare-wmt14en2de.sh │ │ └── prepare-wmt14en2fr.sh ├── experimental_generate.py ├── fairseq │ ├── __init__.py │ ├── benchmark │ │ ├── __init__.py │ │ ├── dummy_lm.py │ │ ├── dummy_masked_lm.py │ │ ├── dummy_model.py │ │ └── dummy_mt.py │ ├── binarizer.py │ ├── checkpoint_utils.py │ ├── clib │ │ ├── libbleu │ │ │ ├── libbleu.cpp │ │ │ └── module.cpp │ │ ├── libnat │ │ │ └── edit_dist.cpp │ │ └── libnat_cuda │ │ │ ├── binding.cpp │ │ │ ├── edit_dist.cu │ │ │ └── edit_dist.h │ ├── criterions │ │ ├── __init__.py │ │ ├── adaptive_loss.py │ │ ├── composite_loss.py │ │ ├── cross_entropy.py │ │ ├── ctc.py │ │ ├── fairseq_criterion.py │ │ ├── label_smoothed_cross_entropy.py │ │ ├── label_smoothed_cross_entropy_with_alignment.py │ │ ├── legacy_masked_lm.py │ │ ├── masked_lm.py │ │ ├── nat_loss.py │ │ ├── sentence_prediction.py │ │ ├── sentence_ranking.py │ │ ├── trime_mt_loss.py │ │ └── wav2vec_criterion.py │ ├── data │ │ ├── __init__.py │ │ ├── add_target_dataset.py │ │ ├── append_token_dataset.py │ │ ├── audio │ │ │ ├── __init__.py │ │ │ ├── audio_utils.py │ │ │ ├── feature_transforms │ │ │ │ ├── __init__.py │ │ │ │ ├── global_cmvn.py │ │ │ │ ├── specaugment.py │ │ │ │ └── utterance_cmvn.py │ │ │ ├── raw_audio_dataset.py │ │ │ └── speech_to_text_dataset.py │ │ ├── backtranslation_dataset.py │ │ ├── base_wrapper_dataset.py │ │ ├── bucket_pad_length_dataset.py │ │ ├── colorize_dataset.py │ │ ├── concat_dataset.py │ │ ├── concat_sentences_dataset.py │ │ ├── data_utils.py │ │ ├── data_utils_fast.pyx │ │ ├── denoising_dataset.py │ │ ├── dictionary.py │ │ ├── encoders │ │ │ ├── __init__.py │ │ │ ├── byte_bpe.py │ │ │ ├── byte_utils.py │ │ │ ├── bytes.py │ │ │ ├── characters.py │ │ │ ├── fastbpe.py │ │ │ ├── gpt2_bpe.py │ │ │ ├── gpt2_bpe_utils.py │ │ │ ├── hf_bert_bpe.py │ │ │ ├── hf_byte_bpe.py │ │ │ ├── moses_tokenizer.py │ │ │ ├── nltk_tokenizer.py │ │ │ ├── sentencepiece_bpe.py │ │ │ ├── space_tokenizer.py │ │ │ ├── subword_nmt_bpe.py │ │ │ └── utils.py │ │ ├── fairseq_dataset.py │ │ ├── fasta_dataset.py │ │ ├── id_dataset.py │ │ ├── indexed_dataset.py │ │ ├── iterators.py │ │ ├── language_pair_dataset.py │ │ ├── legacy │ │ │ ├── __init__.py │ │ │ ├── block_pair_dataset.py │ │ │ ├── masked_lm_dataset.py │ │ │ └── masked_lm_dictionary.py │ │ ├── list_dataset.py │ │ ├── lm_context_window_dataset.py │ │ ├── lru_cache_dataset.py │ │ ├── mask_tokens_dataset.py │ │ ├── monolingual_dataset.py │ │ ├── multi_corpus_dataset.py │ │ ├── multi_corpus_sampled_dataset.py │ │ ├── multilingual │ │ │ ├── __init__.py │ │ │ ├── multilingual_data_manager.py │ │ │ ├── multilingual_utils.py │ │ │ ├── sampled_multi_dataset.py │ │ │ ├── sampled_multi_epoch_dataset.py │ │ │ └── sampling_method.py │ │ ├── nested_dictionary_dataset.py │ │ ├── noising.py │ │ ├── num_samples_dataset.py │ │ ├── numel_dataset.py │ │ ├── offset_tokens_dataset.py │ │ ├── pad_dataset.py │ │ ├── plasma_utils.py │ │ ├── prepend_dataset.py │ │ ├── prepend_token_dataset.py │ │ ├── raw_label_dataset.py │ │ ├── replace_dataset.py │ │ ├── resampling_dataset.py │ │ ├── roll_dataset.py │ │ ├── round_robin_zip_datasets.py │ │ ├── shorten_dataset.py │ │ ├── sort_dataset.py │ │ ├── strip_token_dataset.py │ │ ├── subsample_dataset.py │ │ ├── token_block_dataset.py │ │ ├── token_block_utils_fast.pyx │ │ ├── transform_eos_dataset.py │ │ └── transform_eos_lang_pair_dataset.py │ ├── dataclass │ │ ├── __init__.py │ │ ├── constants.py │ │ ├── data_class.py │ │ └── utils.py │ ├── distributed_utils.py │ ├── file_io.py │ ├── file_utils.py │ ├── hub_utils.py │ ├── incremental_decoding_utils.py │ ├── iterative_refinement_generator.py │ ├── legacy_distributed_data_parallel.py │ ├── logging │ │ ├── __init__.py │ │ ├── meters.py │ │ ├── metrics.py │ │ └── progress_bar.py │ ├── model_parallel │ │ ├── __init__.py │ │ ├── criterions │ │ │ ├── __init__.py │ │ │ └── vocab_parallel_cross_entropy.py │ │ ├── megatron_trainer.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── pipeline_parallel_transformer │ │ │ │ ├── __init__.py │ │ │ │ ├── layers.py │ │ │ │ └── model.py │ │ │ ├── roberta │ │ │ │ ├── __init__.py │ │ │ │ └── model.py │ │ │ ├── transformer.py │ │ │ └── transformer_lm.py │ │ └── modules │ │ │ ├── __init__.py │ │ │ ├── multihead_attention.py │ │ │ ├── transformer_layer.py │ │ │ ├── transformer_sentence_encoder.py │ │ │ └── transformer_sentence_encoder_layer.py │ ├── models │ │ ├── __init__.py │ │ ├── bart │ │ │ ├── __init__.py │ │ │ ├── hub_interface.py │ │ │ └── model.py │ │ ├── composite_encoder.py │ │ ├── distributed_fairseq_model.py │ │ ├── fairseq_decoder.py │ │ ├── fairseq_encoder.py │ │ ├── fairseq_incremental_decoder.py │ │ ├── fairseq_model.py │ │ ├── fconv.py │ │ ├── fconv_lm.py │ │ ├── fconv_self_att.py │ │ ├── huggingface │ │ │ ├── __init__.py │ │ │ └── hf_gpt2.py │ │ ├── lightconv.py │ │ ├── lightconv_lm.py │ │ ├── lstm.py │ │ ├── lstm_lm.py │ │ ├── masked_lm.py │ │ ├── model_utils.py │ │ ├── multilingual_transformer.py │ │ ├── nat │ │ │ ├── __init__.py │ │ │ ├── cmlm_transformer.py │ │ │ ├── fairseq_nat_model.py │ │ │ ├── insertion_transformer.py │ │ │ ├── iterative_nonautoregressive_transformer.py │ │ │ ├── levenshtein_transformer.py │ │ │ ├── levenshtein_utils.py │ │ │ ├── nat_crf_transformer.py │ │ │ ├── nonautoregressive_ensembles.py │ │ │ └── nonautoregressive_transformer.py │ │ ├── roberta │ │ │ ├── __init__.py │ │ │ ├── alignment_utils.py │ │ │ ├── hub_interface.py │ │ │ ├── model.py │ │ │ ├── model_camembert.py │ │ │ └── model_xlmr.py │ │ ├── speech_to_text │ │ │ ├── __init__.py │ │ │ ├── berard.py │ │ │ └── s2t_transformer.py │ │ ├── transformer.py │ │ ├── transformer_align.py │ │ ├── transformer_from_pretrained_xlm.py │ │ ├── transformer_lm.py │ │ └── wav2vec │ │ │ ├── __init__.py │ │ │ ├── wav2vec.py │ │ │ ├── wav2vec2.py │ │ │ └── wav2vec2_asr.py │ ├── modules │ │ ├── __init__.py │ │ ├── adaptive_input.py │ │ ├── adaptive_softmax.py │ │ ├── beamable_mm.py │ │ ├── character_token_embedder.py │ │ ├── conv_tbc.py │ │ ├── cross_entropy.py │ │ ├── cuda_utils.cu │ │ ├── downsampled_multihead_attention.py │ │ ├── dynamic_convolution.py │ │ ├── dynamic_crf_layer.py │ │ ├── dynamicconv_layer │ │ │ ├── __init__.py │ │ │ ├── cuda_function_gen.py │ │ │ ├── dynamicconv_cuda.cpp │ │ │ ├── dynamicconv_cuda.cuh │ │ │ ├── dynamicconv_cuda_kernel.cu │ │ │ ├── dynamicconv_layer.py │ │ │ ├── dynamiconv_cpu.cpp │ │ │ └── setup.py │ │ ├── fairseq_dropout.py │ │ ├── fp32_group_norm.py │ │ ├── gelu.py │ │ ├── grad_multiply.py │ │ ├── gumbel_vector_quantizer.py │ │ ├── kmeans_vector_quantizer.py │ │ ├── knn_datastore.py │ │ ├── layer_drop.py │ │ ├── layer_norm.py │ │ ├── learned_positional_embedding.py │ │ ├── lightconv_layer │ │ │ ├── __init__.py │ │ │ ├── cuda_function_gen.py │ │ │ ├── lightconv_cuda.cpp │ │ │ ├── lightconv_cuda.cuh │ │ │ ├── lightconv_cuda_kernel.cu │ │ │ ├── lightconv_layer.py │ │ │ └── setup.py │ │ ├── lightweight_convolution.py │ │ ├── linearized_convolution.py │ │ ├── multihead_attention.py │ │ ├── positional_embedding.py │ │ ├── quant_noise.py │ │ ├── quantization │ │ │ ├── __init__.py │ │ │ ├── pq │ │ │ │ ├── __init__.py │ │ │ │ ├── em.py │ │ │ │ ├── modules │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── qconv.py │ │ │ │ │ ├── qemb.py │ │ │ │ │ └── qlinear.py │ │ │ │ ├── pq.py │ │ │ │ └── utils.py │ │ │ ├── quantization_options.py │ │ │ └── scalar │ │ │ │ ├── __init__.py │ │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ ├── qact.py │ │ │ │ ├── qconv.py │ │ │ │ ├── qemb.py │ │ │ │ └── qlinear.py │ │ │ │ ├── ops.py │ │ │ │ └── utils.py │ │ ├── same_pad.py │ │ ├── scalar_bias.py │ │ ├── sinusoidal_positional_embedding.py │ │ ├── sparse_multihead_attention.py │ │ ├── sparse_transformer_sentence_encoder.py │ │ ├── sparse_transformer_sentence_encoder_layer.py │ │ ├── transformer_layer.py │ │ ├── transformer_sentence_encoder.py │ │ ├── transformer_sentence_encoder_layer.py │ │ ├── transpose_last.py │ │ ├── unfold.py │ │ └── vggblock.py │ ├── nan_detector.py │ ├── optim │ │ ├── __init__.py │ │ ├── adadelta.py │ │ ├── adafactor.py │ │ ├── adagrad.py │ │ ├── adam.py │ │ ├── adamax.py │ │ ├── bmuf.py │ │ ├── dynamic_loss_scaler.py │ │ ├── fairseq_optimizer.py │ │ ├── fp16_optimizer.py │ │ ├── fused_adam.py │ │ ├── fused_lamb.py │ │ ├── lr_scheduler │ │ │ ├── __init__.py │ │ │ ├── cosine_lr_scheduler.py │ │ │ ├── fairseq_lr_scheduler.py │ │ │ ├── fixed_schedule.py │ │ │ ├── inverse_square_root_schedule.py │ │ │ ├── polynomial_decay_schedule.py │ │ │ ├── reduce_lr_on_plateau.py │ │ │ ├── tri_stage_lr_scheduler.py │ │ │ └── triangular_lr_scheduler.py │ │ ├── nag.py │ │ ├── sgd.py │ │ └── shard.py │ ├── options.py │ ├── pdb.py │ ├── quantization_utils.py │ ├── registry.py │ ├── scoring │ │ ├── __init__.py │ │ ├── bleu.py │ │ ├── chrf.py │ │ ├── tokenizer.py │ │ └── wer.py │ ├── search.py │ ├── sequence_generator.py │ ├── sequence_scorer.py │ ├── tasks │ │ ├── __init__.py │ │ ├── audio_pretraining.py │ │ ├── cross_lingual_lm.py │ │ ├── denoising.py │ │ ├── fairseq_task.py │ │ ├── language_modeling.py │ │ ├── legacy_masked_lm.py │ │ ├── masked_lm.py │ │ ├── multilingual_denoising.py │ │ ├── multilingual_masked_lm.py │ │ ├── multilingual_translation.py │ │ ├── semisupervised_translation.py │ │ ├── sentence_prediction.py │ │ ├── sentence_ranking.py │ │ ├── speech_to_text.py │ │ ├── translation.py │ │ ├── translation_from_pretrained_bart.py │ │ ├── translation_from_pretrained_xlm.py │ │ ├── translation_lev.py │ │ └── translation_multi_simple_epoch.py │ ├── token_generation_constraints.py │ ├── tokenizer.py │ ├── trainer.py │ └── utils.py ├── fairseq_cli │ ├── __init__.py │ ├── eval_lm.py │ ├── generate.py │ ├── interactive.py │ ├── preprocess.py │ ├── score.py │ ├── train.py │ └── validate.py ├── save_datastore.py ├── setup.py ├── train.py ├── train_datastore.py └── train_datastore_gpu.py ├── poster.pdf ├── preprocess.py ├── run_pretrained_models.md ├── setup.py ├── slides.pdf ├── train.py └── train_scripts ├── enwik8-38M-trime.sh ├── enwik8-38M-trime_long.sh ├── wiki103-150M-trime.sh ├── wiki103-150M-trime_long.sh ├── wiki103-247M-trime.sh ├── wiki103-247M-trime_ext.sh └── wiki103-247M-trime_long.sh /bm25_make_batches.py: -------------------------------------------------------------------------------- 1 | import json 2 | from tqdm import tqdm 3 | import sys 4 | import os 5 | 6 | import argparse 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('--results_path', type=str, help='the path where to save the retrieval results') 10 | parser.add_argument('--batch_file', type=str, help='the path of the output batch file') 11 | 12 | parser.add_argument('--num_shards', type=int, default=1) 13 | args = parser.parse_args() 14 | 15 | all_res = [] 16 | for i in range(args.num_shards): 17 | with open(os.path.join(args.results_path, 'shard%d.json'%i), 'r') as f: 18 | data = json.load(f) 19 | all_res += data 20 | 21 | import random 22 | random.seed(1) 23 | 24 | S = set(range(len(all_res))) 25 | ids = list(range(len(all_res))) 26 | random.shuffle(ids) 27 | p = 1 28 | 29 | x = ids[0] 30 | S.remove(x) 31 | indices = [x] 32 | for i in range(len(all_res) - 1): 33 | found = False 34 | for y in all_res[x]['retrieval'][1:]: 35 | if y in S: 36 | found = True 37 | x = y 38 | break 39 | if not found: 40 | while ids[p] not in S: 41 | p += 1 42 | x = ids[p] 43 | S.remove(x) 44 | indices.append(x) 45 | 46 | print('total indices', len(indices)) 47 | 48 | with open(args.batch_file, 'w') as f: 49 | json.dump(indices, f) 50 | -------------------------------------------------------------------------------- /bm25_search.py: -------------------------------------------------------------------------------- 1 | from pyserini.search.lucene import LuceneSearcher 2 | import json 3 | import sys 4 | import os 5 | 6 | import argparse 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('--index_path', type=str, help='the path of BM25 index') 10 | parser.add_argument('--segments_path', type=str, help='the path of the file storing all the segments') 11 | parser.add_argument('--results_path', type=str, help='the path where to save the retrieval results') 12 | 13 | parser.add_argument('--num_shards', type=int, default=1) 14 | parser.add_argument('--shard_id', type=int, default=0) 15 | 16 | args = parser.parse_args() 17 | 18 | print('num shards: {}; shard id: {}'.format(args.num_shards, args.shard_id)) 19 | with open(args.segments_path, 'r') as f: 20 | segments = json.load(f) 21 | 22 | searcher = LuceneSearcher(args.index_path) 23 | 24 | N = len(segments) 25 | N_per_S = (N + args.num_shards - 1) // args.num_shards 26 | 27 | ret_results = [] 28 | for b in segments[N_per_S * args.shard_id : N_per_S * (args.shard_id + 1)]: 29 | try: 30 | hits = searcher.search(b['contents'], 20) 31 | ret = {'id': b['id'], 'retrieval': [h.docid for h in hits]} 32 | except e: 33 | print('error!', e) 34 | ret = {'id': b['id'], 'retrieval': [b['id']]} 35 | ret_results.append(ret) 36 | 37 | if not os.path.exists(args.results_path): 38 | os.mkdir(args.results_path) 39 | 40 | with open(os.path.join(args.results_path, 'shard%d.json'%args.shard_id), 'w') as f: 41 | json.dump(ret_results, f) 42 | -------------------------------------------------------------------------------- /eval_lm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from fairseq_cli.eval_lm import cli_main 8 | 9 | 10 | if __name__ == '__main__': 11 | cli_main() 12 | -------------------------------------------------------------------------------- /fairseq/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | __all__ = ['pdb'] 7 | __version__ = '0.9.0' 8 | 9 | import fairseq.criterions # noqa 10 | import fairseq.models # noqa 11 | import fairseq.modules # noqa 12 | import fairseq.optim # noqa 13 | import fairseq.optim.lr_scheduler # noqa 14 | import fairseq.pdb # noqa 15 | import fairseq.tasks # noqa 16 | 17 | import fairseq.benchmark # noqa 18 | -------------------------------------------------------------------------------- /fairseq/clib/libbleu/module.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include 10 | 11 | 12 | static PyMethodDef method_def[] = { 13 | {NULL, NULL, 0, NULL} 14 | }; 15 | 16 | static struct PyModuleDef module_def = { 17 | PyModuleDef_HEAD_INIT, 18 | "libbleu", /* name of module */ 19 | NULL, /* module documentation, may be NULL */ 20 | -1, /* size of per-interpreter state of the module, 21 | or -1 if the module keeps state in global variables. */ 22 | method_def 23 | }; 24 | 25 | 26 | #if PY_MAJOR_VERSION == 2 27 | PyMODINIT_FUNC init_libbleu() 28 | #else 29 | PyMODINIT_FUNC PyInit_libbleu() 30 | #endif 31 | { 32 | PyObject *m = PyModule_Create(&module_def); 33 | if (!m) { 34 | return NULL; 35 | } 36 | return m; 37 | } 38 | -------------------------------------------------------------------------------- /fairseq/clib/libnat_cuda/binding.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | /* 10 | This code is partially adpoted from https://github.com/1ytic/pytorch-edit-distance 11 | */ 12 | 13 | #include "edit_dist.h" 14 | #include 15 | 16 | #ifndef TORCH_CHECK 17 | #define TORCH_CHECK AT_CHECK 18 | #endif 19 | 20 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 21 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 22 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 23 | 24 | 25 | torch::Tensor LevenshteinDistance( 26 | torch::Tensor source, 27 | torch::Tensor target, 28 | torch::Tensor source_length, 29 | torch::Tensor target_length) { 30 | 31 | CHECK_INPUT(source); 32 | CHECK_INPUT(target); 33 | CHECK_INPUT(source_length); 34 | CHECK_INPUT(target_length); 35 | return LevenshteinDistanceCuda(source, target, source_length, target_length); 36 | } 37 | 38 | torch::Tensor GenerateDeletionLabel( 39 | torch::Tensor source, 40 | torch::Tensor operations) { 41 | 42 | CHECK_INPUT(source); 43 | CHECK_INPUT(operations); 44 | return GenerateDeletionLabelCuda(source, operations); 45 | } 46 | 47 | std::pair GenerateInsertionLabel( 48 | torch::Tensor target, 49 | torch::Tensor operations) { 50 | 51 | CHECK_INPUT(target); 52 | CHECK_INPUT(operations); 53 | return GenerateInsertionLabelCuda(target, operations); 54 | } 55 | 56 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 57 | m.def("levenshtein_distance", &LevenshteinDistance, "Levenshtein distance"); 58 | m.def("generate_deletion_labels", &GenerateDeletionLabel, "Generate Deletion Label"); 59 | m.def("generate_insertion_labels", &GenerateInsertionLabel, "Generate Insertion Label"); 60 | } 61 | -------------------------------------------------------------------------------- /fairseq/clib/libnat_cuda/edit_dist.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | 11 | #include 12 | 13 | torch::Tensor LevenshteinDistanceCuda( 14 | torch::Tensor source, 15 | torch::Tensor target, 16 | torch::Tensor source_length, 17 | torch::Tensor target_length); 18 | 19 | torch::Tensor GenerateDeletionLabelCuda( 20 | torch::Tensor source, 21 | torch::Tensor operations); 22 | 23 | std::pair GenerateInsertionLabelCuda( 24 | torch::Tensor source, 25 | torch::Tensor operations); 26 | -------------------------------------------------------------------------------- /fairseq/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.criterions.fairseq_criterion import FairseqCriterion 11 | 12 | 13 | build_criterion, register_criterion, CRITERION_REGISTRY = registry.setup_registry( 14 | '--criterion', 15 | base_class=FairseqCriterion, 16 | default='cross_entropy', 17 | ) 18 | 19 | 20 | # automatically import any Python files in the criterions/ directory 21 | for file in os.listdir(os.path.dirname(__file__)): 22 | if file.endswith('.py') and not file.startswith('_'): 23 | module = file[:file.find('.py')] 24 | importlib.import_module('fairseq.criterions.' + module) 25 | -------------------------------------------------------------------------------- /fairseq/data/append_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class AppendTokenDataset(BaseWrapperDataset): 13 | 14 | def __init__(self, dataset, token=None): 15 | super().__init__(dataset) 16 | self.token = token 17 | if token is not None: 18 | self._sizes = np.array(dataset.sizes) + 1 19 | else: 20 | self._sizes = dataset.sizes 21 | 22 | def __getitem__(self, idx): 23 | item = self.dataset[idx] 24 | if self.token is not None: 25 | item = torch.cat([item, item.new([self.token])]) 26 | return item 27 | 28 | @property 29 | def sizes(self): 30 | return self._sizes 31 | 32 | def num_tokens(self, index): 33 | n = self.dataset.num_tokens(index) 34 | if self.token is not None: 35 | n += 1 36 | return n 37 | 38 | def size(self, index): 39 | n = self.dataset.size(index) 40 | if self.token is not None: 41 | n += 1 42 | return n 43 | -------------------------------------------------------------------------------- /fairseq/data/audio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/fairseq/data/audio/__init__.py -------------------------------------------------------------------------------- /fairseq/data/base_wrapper_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from torch.utils.data.dataloader import default_collate 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class BaseWrapperDataset(FairseqDataset): 12 | 13 | def __init__(self, dataset): 14 | super().__init__() 15 | self.dataset = dataset 16 | 17 | def __getitem__(self, index): 18 | return self.dataset[index] 19 | 20 | def __len__(self): 21 | return len(self.dataset) 22 | 23 | def collater(self, samples): 24 | if hasattr(self.dataset, 'collater'): 25 | return self.dataset.collater(samples) 26 | else: 27 | return default_collate(samples) 28 | 29 | @property 30 | def sizes(self): 31 | return self.dataset.sizes 32 | 33 | def num_tokens(self, index): 34 | return self.dataset.num_tokens(index) 35 | 36 | def size(self, index): 37 | return self.dataset.size(index) 38 | 39 | def ordered_indices(self): 40 | return self.dataset.ordered_indices() 41 | 42 | @property 43 | def supports_prefetch(self): 44 | return getattr(self.dataset, 'supports_prefetch', False) 45 | 46 | def prefetch(self, indices): 47 | self.dataset.prefetch(indices) 48 | 49 | def set_epoch(self, epoch): 50 | super().set_epoch(epoch) 51 | if hasattr(self.dataset, 'set_epoch'): 52 | self.dataset.set_epoch(epoch) 53 | -------------------------------------------------------------------------------- /fairseq/data/colorize_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class ColorizeDataset(BaseWrapperDataset): 12 | """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """ 13 | def __init__(self, dataset, color_getter): 14 | super().__init__(dataset) 15 | self.color_getter = color_getter 16 | 17 | def collater(self, samples): 18 | base_collate = super().collater(samples) 19 | if len(base_collate) > 0: 20 | base_collate["net_input"]["colors"] = torch.tensor( 21 | list(self.color_getter(self.dataset, s["id"]) for s in samples), 22 | dtype=torch.long, 23 | ) 24 | return base_collate 25 | -------------------------------------------------------------------------------- /fairseq/data/concat_sentences_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class ConcatSentencesDataset(FairseqDataset): 12 | 13 | def __init__(self, *datasets): 14 | super().__init__() 15 | self.datasets = datasets 16 | assert all(len(ds) == len(datasets[0]) for ds in datasets), \ 17 | 'datasets must have the same length' 18 | 19 | def __getitem__(self, index): 20 | return torch.cat([ds[index] for ds in self.datasets]) 21 | 22 | def __len__(self): 23 | return len(self.datasets[0]) 24 | 25 | def collater(self, samples): 26 | return self.datasets[0].collater(samples) 27 | 28 | @property 29 | def sizes(self): 30 | return sum(ds.sizes for ds in self.datasets) 31 | 32 | def num_tokens(self, index): 33 | return sum(ds.num_tokens(index) for ds in self.datasets) 34 | 35 | def size(self, index): 36 | return sum(ds.size(index) for ds in self.datasets) 37 | 38 | def ordered_indices(self): 39 | return self.datasets[0].ordered_indices() 40 | 41 | @property 42 | def supports_prefetch(self): 43 | return any( 44 | getattr(ds, 'supports_prefetch', False) for ds in self.datasets 45 | ) 46 | 47 | def prefetch(self, indices): 48 | for ds in self.datasets: 49 | if getattr(ds, 'supports_prefetch', False): 50 | ds.prefetch(indices) 51 | 52 | def set_epoch(self, epoch): 53 | super().set_epoch(epoch) 54 | for ds in self.datasets: 55 | if hasattr(ds, 'set_epoch'): 56 | ds.set_epoch(epoch) 57 | -------------------------------------------------------------------------------- /fairseq/data/encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | 12 | 13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY = registry.setup_registry( 14 | '--tokenizer', 15 | default=None, 16 | ) 17 | 18 | 19 | build_bpe, register_bpe, BPE_REGISTRY = registry.setup_registry( 20 | '--bpe', 21 | default=None, 22 | ) 23 | 24 | 25 | # automatically import any Python files in the encoders/ directory 26 | for file in os.listdir(os.path.dirname(__file__)): 27 | if file.endswith('.py') and not file.startswith('_'): 28 | module = file[:file.find('.py')] 29 | importlib.import_module('fairseq.data.encoders.' + module) 30 | -------------------------------------------------------------------------------- /fairseq/data/encoders/fastbpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('fastbpe') 11 | class fastBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--bpe-codes', type=str, 17 | help='path to fastBPE BPE') 18 | # fmt: on 19 | 20 | def __init__(self, args): 21 | if args.bpe_codes is None: 22 | raise ValueError('--bpe-codes is required for --bpe=subword_nmt') 23 | codes = file_utils.cached_path(args.bpe_codes) 24 | try: 25 | import fastBPE 26 | self.bpe = fastBPE.fastBPE(codes) 27 | self.bpe_symbol = "@@ " 28 | except ImportError: 29 | raise ImportError('Please install fastBPE with: pip install fastBPE') 30 | 31 | def encode(self, x: str) -> str: 32 | return self.bpe.apply([x])[0] 33 | 34 | def decode(self, x: str) -> str: 35 | return (x + ' ').replace(self.bpe_symbol, '').rstrip() 36 | -------------------------------------------------------------------------------- /fairseq/data/encoders/gpt2_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | from .gpt2_bpe_utils import get_encoder 10 | 11 | 12 | DEFAULT_ENCODER_JSON = 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' 13 | DEFAULT_VOCAB_BPE = 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' 14 | 15 | 16 | @register_bpe('gpt2') 17 | class GPT2BPE(object): 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | # fmt: off 22 | parser.add_argument('--gpt2-encoder-json', type=str, 23 | default=DEFAULT_ENCODER_JSON, 24 | help='path to encoder.json') 25 | parser.add_argument('--gpt2-vocab-bpe', type=str, 26 | default=DEFAULT_VOCAB_BPE, 27 | help='path to vocab.bpe') 28 | # fmt: on 29 | 30 | def __init__(self, args): 31 | encoder_json = file_utils.cached_path( 32 | getattr(args, 'gpt2_encoder_json', DEFAULT_ENCODER_JSON) 33 | ) 34 | vocab_bpe = file_utils.cached_path( 35 | getattr(args, 'gpt2_vocab_bpe', DEFAULT_VOCAB_BPE) 36 | ) 37 | self.bpe = get_encoder(encoder_json, vocab_bpe) 38 | 39 | def encode(self, x: str) -> str: 40 | return ' '.join(map(str, self.bpe.encode(x))) 41 | 42 | def decode(self, x: str) -> str: 43 | return self.bpe.decode([ 44 | int(tok) if tok not in {'', ''} else tok 45 | for tok in x.split() 46 | ]) 47 | 48 | def is_beginning_of_word(self, x: str) -> bool: 49 | return self.decode(x).startswith(' ') 50 | -------------------------------------------------------------------------------- /fairseq/data/encoders/hf_bert_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_bpe 7 | 8 | 9 | @register_bpe('bert') 10 | class BertBPE(object): 11 | 12 | @staticmethod 13 | def add_args(parser): 14 | # fmt: off 15 | parser.add_argument('--bpe-cased', action='store_true', 16 | help='set for cased BPE', 17 | default=False) 18 | parser.add_argument('--bpe-vocab-file', type=str, 19 | help='bpe vocab file.') 20 | # fmt: on 21 | 22 | def __init__(self, args): 23 | try: 24 | from pytorch_transformers import BertTokenizer 25 | from pytorch_transformers.tokenization_utils import clean_up_tokenization 26 | except ImportError: 27 | raise ImportError( 28 | 'Please install 1.0.0 version of pytorch_transformers' 29 | 'with: pip install pytorch-transformers' 30 | ) 31 | 32 | if 'bpe_vocab_file' in args: 33 | self.bert_tokenizer = BertTokenizer( 34 | args.bpe_vocab_file, 35 | do_lower_case=not args.bpe_cased 36 | ) 37 | else: 38 | vocab_file_name = 'bert-base-cased' if args.bpe_cased else 'bert-base-uncased' 39 | self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name) 40 | self.clean_up_tokenization = clean_up_tokenization 41 | 42 | def encode(self, x: str) -> str: 43 | return ' '.join(self.bert_tokenizer.tokenize(x)) 44 | 45 | def decode(self, x: str) -> str: 46 | return self.clean_up_tokenization( 47 | self.bert_tokenizer.convert_tokens_to_string(x.split(' ')) 48 | ) 49 | 50 | def is_beginning_of_word(self, x: str) -> bool: 51 | return not x.startswith('##') 52 | -------------------------------------------------------------------------------- /fairseq/data/encoders/nltk_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_tokenizer 7 | 8 | 9 | @register_tokenizer('nltk') 10 | class NLTKTokenizer(object): 11 | 12 | def __init__(self, source_lang=None, target_lang=None): 13 | try: 14 | from nltk.tokenize import word_tokenize 15 | self.word_tokenize = word_tokenize 16 | except ImportError: 17 | raise ImportError('Please install nltk with: pip install nltk') 18 | 19 | def encode(self, x: str) -> str: 20 | return ' '.join(self.word_tokenize(x)) 21 | 22 | def decode(self, x: str) -> str: 23 | return x 24 | -------------------------------------------------------------------------------- /fairseq/data/encoders/sentencepiece_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('sentencepiece') 11 | class SentencepieceBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--sentencepiece-vocab', type=str, 17 | help='path to sentencepiece vocab') 18 | # fmt: on 19 | 20 | def __init__(self, args): 21 | vocab = file_utils.cached_path(args.sentencepiece_vocab) 22 | try: 23 | import sentencepiece as spm 24 | self.sp = spm.SentencePieceProcessor() 25 | self.sp.Load(vocab) 26 | except ImportError: 27 | raise ImportError('Please install sentencepiece with: pip install sentencepiece') 28 | 29 | def encode(self, x: str) -> str: 30 | return ' '.join(self.sp.EncodeAsPieces(x)) 31 | 32 | def decode(self, x: str) -> str: 33 | return x.replace(' ', '').replace('\u2581', ' ').strip() 34 | 35 | def is_beginning_of_word(self, x: str) -> bool: 36 | if x in ['', '', '', '']: 37 | # special elements are always considered beginnings 38 | # HACK: this logic is already present in fairseq/tasks/masked_lm.py 39 | # but these special tokens are also contained in the sentencepiece 40 | # vocabulary which causes duplicate special tokens. This hack makes 41 | # sure that they are all taken into account. 42 | return True 43 | return x.startswith('\u2581') 44 | -------------------------------------------------------------------------------- /fairseq/data/encoders/space_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | from fairseq.data.encoders import register_tokenizer 9 | 10 | 11 | @register_tokenizer('space') 12 | class SpaceTokenizer(object): 13 | 14 | def __init__(self, source_lang=None, target_lang=None): 15 | self.space_tok = re.compile(r"\s+") 16 | 17 | def encode(self, x: str) -> str: 18 | return self.space_tok.sub(' ', x) 19 | 20 | def decode(self, x: str) -> str: 21 | return x 22 | -------------------------------------------------------------------------------- /fairseq/data/encoders/subword_nmt_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('subword_nmt') 11 | class SubwordNMTBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--bpe-codes', type=str, 17 | help='path to subword NMT BPE') 18 | parser.add_argument('--bpe-separator', default='@@', 19 | help='BPE separator') 20 | # fmt: on 21 | 22 | def __init__(self, args): 23 | if args.bpe_codes is None: 24 | raise ValueError('--bpe-codes is required for --bpe=subword_nmt') 25 | codes = file_utils.cached_path(args.bpe_codes) 26 | try: 27 | from subword_nmt import apply_bpe 28 | bpe_parser = apply_bpe.create_parser() 29 | bpe_args = bpe_parser.parse_args([ 30 | '--codes', codes, 31 | '--separator', args.bpe_separator, 32 | ]) 33 | self.bpe = apply_bpe.BPE( 34 | bpe_args.codes, 35 | bpe_args.merges, 36 | bpe_args.separator, 37 | None, 38 | bpe_args.glossaries, 39 | ) 40 | self.bpe_symbol = bpe_args.separator + ' ' 41 | except ImportError: 42 | raise ImportError('Please install subword_nmt with: pip install subword-nmt') 43 | 44 | def encode(self, x: str) -> str: 45 | return self.bpe.process_line(x) 46 | 47 | def decode(self, x: str) -> str: 48 | return (x + ' ').replace(self.bpe_symbol, '').rstrip() 49 | -------------------------------------------------------------------------------- /fairseq/data/encoders/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from fairseq.data import encoders 8 | 9 | 10 | def get_whole_word_mask(args, dictionary): 11 | bpe = encoders.build_bpe(args) 12 | if bpe is not None: 13 | def is_beginning_of_word(i): 14 | if i < dictionary.nspecial: 15 | # special elements are always considered beginnings 16 | return True 17 | tok = dictionary[i] 18 | if tok.startswith('madeupword'): 19 | return True 20 | try: 21 | return bpe.is_beginning_of_word(tok) 22 | except ValueError: 23 | return True 24 | mask_whole_words = torch.ByteTensor(list( 25 | map(is_beginning_of_word, range(len(dictionary))) 26 | )) 27 | return mask_whole_words 28 | return None 29 | -------------------------------------------------------------------------------- /fairseq/data/id_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class IdDataset(FairseqDataset): 12 | 13 | def __getitem__(self, index): 14 | return index 15 | 16 | def __len__(self): 17 | return 0 18 | 19 | def collater(self, samples): 20 | return torch.tensor(samples) 21 | -------------------------------------------------------------------------------- /fairseq/data/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary 7 | from .block_pair_dataset import BlockPairDataset 8 | from .masked_lm_dataset import MaskedLMDataset 9 | 10 | __all__ = [ 11 | 'BertDictionary', 12 | 'BlockPairDataset', 13 | 'MaskedLMDataset', 14 | 'MaskedLMDictionary', 15 | ] 16 | -------------------------------------------------------------------------------- /fairseq/data/legacy/masked_lm_dictionary.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import Dictionary 7 | 8 | 9 | class MaskedLMDictionary(Dictionary): 10 | """ 11 | Dictionary for Masked Language Modelling tasks. This extends Dictionary by 12 | adding the mask symbol. 13 | """ 14 | def __init__( 15 | self, 16 | pad='', 17 | eos='', 18 | unk='', 19 | mask='', 20 | ): 21 | super().__init__(pad, eos, unk) 22 | self.mask_word = mask 23 | self.mask_index = self.add_symbol(mask) 24 | self.nspecial = len(self.symbols) 25 | 26 | def mask(self): 27 | """Helper to get index of mask symbol""" 28 | return self.mask_index 29 | 30 | 31 | class BertDictionary(MaskedLMDictionary): 32 | """ 33 | Dictionary for BERT task. This extends MaskedLMDictionary by adding support 34 | for cls and sep symbols. 35 | """ 36 | def __init__( 37 | self, 38 | pad='', 39 | eos='', 40 | unk='', 41 | mask='', 42 | cls='', 43 | sep='' 44 | ): 45 | super().__init__(pad, eos, unk, mask) 46 | self.cls_word = cls 47 | self.sep_word = sep 48 | self.cls_index = self.add_symbol(cls) 49 | self.sep_index = self.add_symbol(sep) 50 | self.nspecial = len(self.symbols) 51 | 52 | def cls(self): 53 | """Helper to get index of cls symbol""" 54 | return self.cls_index 55 | 56 | def sep(self): 57 | """Helper to get index of sep symbol""" 58 | return self.sep_index 59 | -------------------------------------------------------------------------------- /fairseq/data/list_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ListDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, sizes=None): 12 | super().__init__(dataset) 13 | self._sizes = sizes 14 | 15 | def __iter__(self): 16 | for x in self.dataset: 17 | yield x 18 | 19 | def collater(self, samples): 20 | return samples 21 | 22 | @property 23 | def sizes(self): 24 | return self._sizes 25 | 26 | def num_tokens(self, index): 27 | return self.sizes[index] 28 | 29 | def size(self, index): 30 | return self.sizes[index] 31 | 32 | def set_epoch(self, epoch): 33 | pass 34 | -------------------------------------------------------------------------------- /fairseq/data/lru_cache_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from functools import lru_cache 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class LRUCacheDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, token=None): 14 | super().__init__(dataset) 15 | 16 | @lru_cache(maxsize=8) 17 | def __getitem__(self, index): 18 | return self.dataset[index] 19 | 20 | @lru_cache(maxsize=8) 21 | def collater(self, samples): 22 | return self.dataset.collater(samples) 23 | -------------------------------------------------------------------------------- /fairseq/data/num_samples_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqDataset 7 | 8 | 9 | class NumSamplesDataset(FairseqDataset): 10 | 11 | def __getitem__(self, index): 12 | return 1 13 | 14 | def __len__(self): 15 | return 0 16 | 17 | def collater(self, samples): 18 | return sum(samples) 19 | -------------------------------------------------------------------------------- /fairseq/data/numel_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class NumelDataset(BaseWrapperDataset): 13 | 14 | def __init__(self, dataset, reduce=False): 15 | super().__init__(dataset) 16 | self.reduce = reduce 17 | 18 | def __getitem__(self, index): 19 | item = self.dataset[index] 20 | if torch.is_tensor(item): 21 | return torch.numel(item) 22 | else: 23 | return np.size(item) 24 | 25 | def __len__(self): 26 | return len(self.dataset) 27 | 28 | def collater(self, samples): 29 | if self.reduce: 30 | return sum(samples) 31 | else: 32 | return torch.tensor(samples) 33 | -------------------------------------------------------------------------------- /fairseq/data/offset_tokens_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class OffsetTokensDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, offset): 12 | super().__init__(dataset) 13 | self.offset = offset 14 | 15 | def __getitem__(self, idx): 16 | return self.dataset[idx] + self.offset 17 | -------------------------------------------------------------------------------- /fairseq/data/pad_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import data_utils 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class PadDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, pad_idx, left_pad): 14 | super().__init__(dataset) 15 | self.pad_idx = pad_idx 16 | self.left_pad = left_pad 17 | 18 | def collater(self, samples): 19 | return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) 20 | 21 | 22 | class LeftPadDataset(PadDataset): 23 | 24 | def __init__(self, dataset, pad_idx): 25 | super().__init__(dataset, pad_idx, left_pad=True) 26 | 27 | 28 | class RightPadDataset(PadDataset): 29 | 30 | def __init__(self, dataset, pad_idx): 31 | super().__init__(dataset, pad_idx, left_pad=False) 32 | -------------------------------------------------------------------------------- /fairseq/data/prepend_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): 14 | super().__init__(dataset) 15 | self.prepend_getter = prepend_getter 16 | self.ensure_first_token = ensure_first_token_is 17 | 18 | def __getitem__(self, idx): 19 | item = self.dataset[idx] 20 | is_tuple = isinstance(item, tuple) 21 | src = item[0] if is_tuple else item 22 | 23 | assert self.ensure_first_token is None or src[0] == self.ensure_first_token 24 | prepend_idx = self.prepend_getter(self.dataset, idx) 25 | assert isinstance(prepend_idx, int) 26 | src[0] = prepend_idx 27 | item = tuple((src,) + item[1:]) if is_tuple else src 28 | return item 29 | -------------------------------------------------------------------------------- /fairseq/data/prepend_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependTokenDataset(BaseWrapperDataset): 13 | 14 | def __init__(self, dataset, token=None): 15 | super().__init__(dataset) 16 | self.token = token 17 | if token is not None: 18 | self._sizes = np.array(dataset.sizes) + 1 19 | else: 20 | self._sizes = dataset.sizes 21 | 22 | def __getitem__(self, idx): 23 | item = self.dataset[idx] 24 | if self.token is not None: 25 | item = torch.cat([item.new([self.token]), item]) 26 | return item 27 | 28 | @property 29 | def sizes(self): 30 | return self._sizes 31 | 32 | def num_tokens(self, index): 33 | n = self.dataset.num_tokens(index) 34 | if self.token is not None: 35 | n += 1 36 | return n 37 | 38 | def size(self, index): 39 | n = self.dataset.size(index) 40 | if self.token is not None: 41 | n += 1 42 | return n 43 | -------------------------------------------------------------------------------- /fairseq/data/raw_label_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class RawLabelDataset(FairseqDataset): 12 | 13 | def __init__(self, labels): 14 | super().__init__() 15 | self.labels = labels 16 | 17 | def __getitem__(self, index): 18 | return self.labels[index] 19 | 20 | def __len__(self): 21 | return len(self.labels) 22 | 23 | def collater(self, samples): 24 | return torch.tensor(samples) 25 | -------------------------------------------------------------------------------- /fairseq/data/replace_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ReplaceDataset(BaseWrapperDataset): 10 | """Replaces tokens found in the dataset by a specified replacement token 11 | 12 | Args: 13 | dataset (~torch.utils.data.Dataset): dataset to replace tokens in 14 | replace_map(Dictionary[int,int]): map of token to replace -> replacement token 15 | offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be 16 | as many as the number of objects returned by the underlying dataset __getitem__ method. 17 | """ 18 | 19 | def __init__(self, dataset, replace_map, offsets): 20 | super().__init__(dataset) 21 | assert len(replace_map) > 0 22 | self.replace_map = replace_map 23 | self.offsets = offsets 24 | 25 | def __getitem__(self, index): 26 | item = self.dataset[index] 27 | is_tuple = isinstance(item, tuple) 28 | srcs = item if is_tuple else [item] 29 | 30 | for offset, src in zip(self.offsets, srcs): 31 | for k, v in self.replace_map.items(): 32 | src_off = src[offset:] if offset >= 0 else src[:offset] 33 | src_off.masked_fill_(src_off == k, v) 34 | 35 | item = srcs if is_tuple else srcs[0] 36 | return item 37 | -------------------------------------------------------------------------------- /fairseq/data/roll_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class RollDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, shifts): 14 | super().__init__(dataset) 15 | self.shifts = shifts 16 | 17 | def __getitem__(self, index): 18 | item = self.dataset[index] 19 | return torch.roll(item, self.shifts) 20 | -------------------------------------------------------------------------------- /fairseq/data/sharded_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import itertools 7 | import os 8 | import random 9 | 10 | from . import BaseWrapperDataset 11 | from fairseq.data import data_utils 12 | 13 | 14 | class ShardedDataset(BaseWrapperDataset): 15 | """A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS. 16 | 17 | Loads a dataset which has been sharded into multiple files. each shard is only loaded for each specific epoch 18 | 19 | """ 20 | 21 | def __init__( 22 | self, 23 | dictionary, 24 | dataset_impl: str, 25 | path: str, 26 | split: str, 27 | epoch: int, 28 | name: str = None, 29 | combine: bool = False, 30 | seed: int = 0, 31 | ): 32 | self._name = name if name is not None else os.path.basename(path) 33 | num_shards = 0 34 | for i in itertools.count(): 35 | if not os.path.exists(os.path.join(path, "shard" + str(i))): 36 | break 37 | num_shards += 1 38 | 39 | if num_shards > 0 and split == "train": 40 | random.seed(seed ^ epoch) 41 | shard = random.randint(0, num_shards - 1) 42 | split_path = os.path.join(path, "shard" + str(shard), split) 43 | else: 44 | split_path = os.path.join(path, split) 45 | if os.path.isdir(split_path): 46 | split_path = os.path.join(split_path, split) 47 | 48 | dataset = data_utils.load_indexed_dataset( 49 | split_path, dictionary, dataset_impl, combine=combine 50 | ) 51 | if dataset is None: 52 | raise FileNotFoundError( 53 | "Dataset not found: {} ({})".format(split, split_path) 54 | ) 55 | 56 | super().__init__(dataset) 57 | 58 | @property 59 | def name(self): 60 | return self._name 61 | -------------------------------------------------------------------------------- /fairseq/data/sort_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class SortDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, sort_order): 14 | super().__init__(dataset) 15 | if not isinstance(sort_order, (list, tuple)): 16 | sort_order = [sort_order] 17 | self.sort_order = sort_order 18 | 19 | assert all(len(so) == len(dataset) for so in sort_order) 20 | 21 | def ordered_indices(self): 22 | return np.lexsort(self.sort_order) 23 | -------------------------------------------------------------------------------- /fairseq/data/strip_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class StripTokenDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, id_to_strip): 12 | super().__init__(dataset) 13 | self.id_to_strip = id_to_strip 14 | 15 | def __getitem__(self, index): 16 | item = self.dataset[index] 17 | return item[item.ne(self.id_to_strip)] 18 | -------------------------------------------------------------------------------- /fairseq/data/truncate_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class TruncateDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, truncation_length): 14 | super().__init__(dataset) 15 | assert truncation_length is not None 16 | self.truncation_length = truncation_length 17 | self.dataset = dataset 18 | 19 | def __getitem__(self, index): 20 | item = self.dataset[index] 21 | item_len = item.size(0) 22 | if item_len > self.truncation_length: 23 | item = item[:self.truncation_length] 24 | return item 25 | 26 | @property 27 | def sizes(self): 28 | return np.minimum(self.dataset.sizes, self.truncation_length) 29 | 30 | def __len__(self): 31 | return len(self.dataset) 32 | -------------------------------------------------------------------------------- /fairseq/incremental_decoding_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from typing import Dict, Optional 7 | import uuid 8 | 9 | from torch import Tensor 10 | 11 | 12 | class FairseqIncrementalState(object): 13 | 14 | def __init__(self, *args, **kwargs): 15 | super().__init__(*args, **kwargs) 16 | self.init_incremental_state() 17 | 18 | def init_incremental_state(self): 19 | self._incremental_state_id = str(uuid.uuid4()) 20 | 21 | def _get_full_incremental_state_key(self, key: str) -> str: 22 | return "{}.{}".format(self._incremental_state_id, key) 23 | 24 | def get_incremental_state( 25 | self, 26 | incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], 27 | key: str, 28 | ) -> Optional[Dict[str, Optional[Tensor]]]: 29 | """Helper for getting incremental state for an nn.Module.""" 30 | full_key = self._get_full_incremental_state_key(key) 31 | if incremental_state is None or full_key not in incremental_state: 32 | return None 33 | return incremental_state[full_key] 34 | 35 | def set_incremental_state( 36 | self, 37 | incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], 38 | key: str, 39 | value: Dict[str, Optional[Tensor]], 40 | ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: 41 | """Helper for setting incremental state for an nn.Module.""" 42 | if incremental_state is not None: 43 | full_key = self._get_full_incremental_state_key(key) 44 | incremental_state[full_key] = value 45 | return incremental_state 46 | 47 | 48 | def with_incremental_state(cls): 49 | cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.__bases__ if b != FairseqIncrementalState) 50 | return cls 51 | -------------------------------------------------------------------------------- /fairseq/models/bart/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | -------------------------------------------------------------------------------- /fairseq/models/fairseq_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | 9 | class FairseqEncoder(nn.Module): 10 | """Base class for encoders.""" 11 | 12 | def __init__(self, dictionary): 13 | super().__init__() 14 | self.dictionary = dictionary 15 | 16 | def forward(self, src_tokens, src_lengths=None, **kwargs): 17 | """ 18 | Args: 19 | src_tokens (LongTensor): tokens in the source language of shape 20 | `(batch, src_len)` 21 | src_lengths (LongTensor): lengths of each source sentence of shape 22 | `(batch)` 23 | """ 24 | raise NotImplementedError 25 | 26 | def reorder_encoder_out(self, encoder_out, new_order): 27 | """ 28 | Reorder encoder output according to `new_order`. 29 | 30 | Args: 31 | encoder_out: output from the ``forward()`` method 32 | new_order (LongTensor): desired order 33 | 34 | Returns: 35 | `encoder_out` rearranged according to `new_order` 36 | """ 37 | raise NotImplementedError 38 | 39 | def max_positions(self): 40 | """Maximum input length supported by the encoder.""" 41 | return 1e6 # an arbitrary large number 42 | 43 | def upgrade_state_dict(self, state_dict): 44 | """Upgrade a (possibly old) state dict for new versions of fairseq.""" 45 | return state_dict 46 | -------------------------------------------------------------------------------- /fairseq/models/nat/__init__.py: -------------------------------------------------------------------------------- 1 | from .fairseq_nat_model import * 2 | from .nonautoregressive_transformer import * 3 | from .nat_crf_transformer import * 4 | from .iterative_nonautoregressive_transformer import * 5 | from .cmlm_transformer import * 6 | from .levenshtein_transformer import * 7 | from .insertion_transformer import * 8 | -------------------------------------------------------------------------------- /fairseq/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | from .model_camembert import * # noqa 9 | from .model_xlmr import * # noqa 10 | -------------------------------------------------------------------------------- /fairseq/models/roberta/model_camembert.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | CamemBERT: a Tasty French Language Model 7 | """ 8 | 9 | from fairseq.models import register_model 10 | 11 | from .hub_interface import RobertaHubInterface 12 | from .model import RobertaModel 13 | 14 | 15 | @register_model('camembert') 16 | class CamembertModel(RobertaModel): 17 | 18 | @classmethod 19 | def hub_models(cls): 20 | return { 21 | 'camembert.v0': 'http://dl.fbaipublicfiles.com/fairseq/models/camembert.v0.tar.gz', 22 | } 23 | 24 | @classmethod 25 | def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs): 26 | from fairseq import hub_utils 27 | x = hub_utils.from_pretrained( 28 | model_name_or_path, 29 | checkpoint_file, 30 | data_name_or_path, 31 | archive_map=cls.hub_models(), 32 | bpe=bpe, 33 | load_checkpoint_heads=True, 34 | **kwargs, 35 | ) 36 | return RobertaHubInterface(x['args'], x['task'], x['models'][0]) 37 | -------------------------------------------------------------------------------- /fairseq/models/roberta/model_xlmr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | Unsupervised Cross-lingual Representation Learning at Scale 7 | """ 8 | 9 | from fairseq.models import register_model 10 | 11 | from .hub_interface import RobertaHubInterface 12 | from .model import RobertaModel 13 | 14 | 15 | @register_model('xlmr') 16 | class XLMRModel(RobertaModel): 17 | 18 | @classmethod 19 | def hub_models(cls): 20 | return { 21 | 'xlmr.base': 'http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz', 22 | 'xlmr.large': 'http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz', 23 | } 24 | 25 | @classmethod 26 | def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs): 27 | from fairseq import hub_utils 28 | x = hub_utils.from_pretrained( 29 | model_name_or_path, 30 | checkpoint_file, 31 | data_name_or_path, 32 | archive_map=cls.hub_models(), 33 | bpe=bpe, 34 | load_checkpoint_heads=True, 35 | **kwargs, 36 | ) 37 | return RobertaHubInterface(x['args'], x['task'], x['models'][0]) 38 | -------------------------------------------------------------------------------- /fairseq/modules/beamable_mm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class BeamableMM(nn.Module): 11 | """This module provides an optimized MM for beam decoding with attention. 12 | 13 | It leverage the fact that the source-side of the input is replicated beam 14 | times and the target-side of the input is of width one. This layer speeds up 15 | inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} 16 | with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. 17 | """ 18 | def __init__(self, beam_size=None): 19 | super(BeamableMM, self).__init__() 20 | self.beam_size = beam_size 21 | 22 | def forward(self, input1, input2): 23 | if ( 24 | not self.training and # test mode 25 | self.beam_size is not None and # beam size is set 26 | input1.dim() == 3 and # only support batched input 27 | input1.size(1) == 1 # single time step update 28 | ): 29 | bsz, beam = input1.size(0), self.beam_size 30 | 31 | # bsz x 1 x nhu --> bsz/beam x beam x nhu 32 | input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) 33 | 34 | # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu 35 | input2 = input2.unfold(0, beam, beam)[:, :, :, 0] 36 | 37 | # use non batched operation if bsz = beam 38 | if input1.size(0) == 1: 39 | output = torch.mm(input1[0, :, :], input2[0, :, :]) 40 | else: 41 | output = input1.bmm(input2) 42 | return output.view(bsz, 1, -1) 43 | else: 44 | return input1.bmm(input2) 45 | 46 | def set_beam_size(self, beam_size): 47 | self.beam_size = beam_size 48 | -------------------------------------------------------------------------------- /fairseq/modules/conv_tbc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from torch.nn.modules.utils import _single 8 | 9 | 10 | class ConvTBC(torch.nn.Module): 11 | """1D convolution over an input of shape (time x batch x channel) 12 | 13 | The implementation uses gemm to perform the convolution. This implementation 14 | is faster than cuDNN for small kernel sizes. 15 | """ 16 | def __init__(self, in_channels, out_channels, kernel_size, padding=0): 17 | super(ConvTBC, self).__init__() 18 | self.in_channels = in_channels 19 | self.out_channels = out_channels 20 | self.kernel_size = _single(kernel_size) 21 | self.padding = _single(padding) 22 | 23 | self.weight = torch.nn.Parameter(torch.Tensor( 24 | self.kernel_size[0], in_channels, out_channels)) 25 | self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) 26 | 27 | def forward(self, input): 28 | return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0]) 29 | 30 | def __repr__(self): 31 | s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}' 32 | ', padding={padding}') 33 | if self.bias is None: 34 | s += ', bias=False' 35 | s += ')' 36 | return s.format(name=self.__class__.__name__, **self.__dict__) 37 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .dynamicconv_layer import DynamicconvLayer # noqa 7 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | std::vector dynamicconv_cuda_forward( 12 | at::Tensor input, 13 | at::Tensor filters, 14 | int padding_l); 15 | 16 | std::vector dynamicconv_cuda_backward( 17 | at::Tensor gradOutput, 18 | int padding_l, 19 | at::Tensor input, 20 | at::Tensor filters); 21 | 22 | 23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 26 | 27 | std::vector dynamicconv_forward( 28 | at::Tensor input, 29 | at::Tensor filters, 30 | int padding_l) { 31 | 32 | CHECK_INPUT(input); 33 | CHECK_INPUT(filters); 34 | 35 | return dynamicconv_cuda_forward(input, filters, 36 | padding_l); 37 | } 38 | 39 | std::vector dynamicconv_backward( 40 | at::Tensor gradOutput, 41 | int padding_l, 42 | at::Tensor input, 43 | at::Tensor filters) { 44 | 45 | CHECK_INPUT(gradOutput); 46 | CHECK_INPUT(input); 47 | CHECK_INPUT(filters); 48 | 49 | return dynamicconv_cuda_backward(gradOutput, padding_l, 50 | input, filters); 51 | } 52 | 53 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 54 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)"); 55 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)"); 56 | } 57 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | #define SHFL_MASK 0xffffffff 27 | 28 | template 29 | __global__ 30 | void dynamicconv_forward_kernel(const scalar_t* input, 31 | const scalar_t* weight, 32 | int minibatch, 33 | int sequenceLength, 34 | int numFeatures, 35 | int numFiltersInBlock, 36 | int numHeads, 37 | scalar_t* output); 38 | 39 | template 40 | __global__ 41 | void dynamicconv_backward_kernel( 42 | const scalar_t* gradOutput, // B * C * T 43 | const scalar_t* input, // B * C * T 44 | const scalar_t* weight, 45 | int minibatch, 46 | int sequenceLength, 47 | int numFeatures, 48 | int numFiltersInBlock, 49 | int numHeads, 50 | scalar_t* gradWeight, 51 | scalar_t* gradInput); // B * H * k * T 52 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | std::vector dynamicconv_cpu_forward( 5 | float* input, 6 | float* filters, 7 | int padding_l); 8 | 9 | std::vector dynamicconv_cpu_backward( 10 | float* gradOutput, 11 | int padding_l, 12 | float* input, 13 | float* filters); 14 | 15 | std::vector dynamicconv_forward( 16 | float* input, 17 | float* filters, 18 | int padding_l) { 19 | 20 | return dynamicconv_cpu_forward(input, filters, padding_l); 21 | } 22 | 23 | std::vector dynamicconv_backward( 24 | float* gradOutput, 25 | int padding_l, 26 | float* input, 27 | float* filters) { 28 | 29 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters); 30 | } 31 | 32 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 33 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)"); 34 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)"); 35 | } 36 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 9 | 10 | setup( 11 | name='dynamicconv_layer', 12 | ext_modules=[ 13 | CUDAExtension( 14 | name='dynamicconv_cuda', 15 | sources=[ 16 | 'dynamicconv_cuda.cpp', 17 | 'dynamicconv_cuda_kernel.cu', 18 | ], 19 | ), 20 | ], 21 | cmdclass={ 22 | 'build_ext': BuildExtension 23 | }) 24 | -------------------------------------------------------------------------------- /fairseq/modules/gelu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with 7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs 8 | """ 9 | 10 | import math 11 | 12 | import torch 13 | 14 | 15 | def gelu_accurate(x): 16 | if not hasattr(gelu_accurate, "_a"): 17 | gelu_accurate._a = math.sqrt(2 / math.pi) 18 | return 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) 19 | 20 | 21 | def gelu(x: torch.Tensor) -> torch.Tensor: 22 | if hasattr(torch.nn.functional, 'gelu'): 23 | return torch.nn.functional.gelu(x.float()).type_as(x) 24 | else: 25 | return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) 26 | -------------------------------------------------------------------------------- /fairseq/modules/grad_multiply.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class GradMultiply(torch.autograd.Function): 10 | @staticmethod 11 | def forward(ctx, x, scale): 12 | ctx.scale = scale 13 | res = x.new(x) 14 | return res 15 | 16 | @staticmethod 17 | def backward(ctx, grad): 18 | return grad * ctx.scale, None 19 | -------------------------------------------------------------------------------- /fairseq/modules/highway.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from torch import nn 9 | 10 | 11 | class Highway(torch.nn.Module): 12 | """ 13 | A `Highway layer `_. 14 | Adopted from the AllenNLP implementation. 15 | """ 16 | 17 | def __init__( 18 | self, 19 | input_dim: int, 20 | num_layers: int = 1 21 | ): 22 | super(Highway, self).__init__() 23 | self.input_dim = input_dim 24 | self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2) 25 | for _ in range(num_layers)]) 26 | self.activation = nn.ReLU() 27 | 28 | self.reset_parameters() 29 | 30 | def reset_parameters(self): 31 | for layer in self.layers: 32 | # As per comment in AllenNLP: 33 | # We should bias the highway layer to just carry its input forward. We do that by 34 | # setting the bias on `B(x)` to be positive, because that means `g` will be biased to 35 | # be high, so we will carry the input forward. The bias on `B(x)` is the second half 36 | # of the bias vector in each Linear layer. 37 | nn.init.constant_(layer.bias[self.input_dim:], 1) 38 | 39 | nn.init.constant_(layer.bias[:self.input_dim], 0) 40 | nn.init.xavier_normal_(layer.weight) 41 | 42 | def forward( 43 | self, 44 | x: torch.Tensor 45 | ): 46 | for layer in self.layers: 47 | projection = layer(x) 48 | proj_x, gate = projection.chunk(2, dim=-1) 49 | proj_x = self.activation(proj_x) 50 | gate = torch.sigmoid(gate) 51 | x = gate * x + (gate.new_tensor([1]) - gate) * proj_x 52 | return x 53 | -------------------------------------------------------------------------------- /fairseq/modules/layer_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): 10 | if not export and torch.cuda.is_available(): 11 | try: 12 | from apex.normalization import FusedLayerNorm 13 | return FusedLayerNorm(normalized_shape, eps, elementwise_affine) 14 | except ImportError: 15 | pass 16 | return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) 17 | -------------------------------------------------------------------------------- /fairseq/modules/learned_positional_embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | from fairseq import utils 9 | 10 | 11 | class LearnedPositionalEmbedding(nn.Embedding): 12 | """ 13 | This module learns positional embeddings up to a fixed maximum size. 14 | Padding ids are ignored by either offsetting based on padding_idx 15 | or by setting padding_idx to None and ensuring that the appropriate 16 | position ids are passed to the forward function. 17 | """ 18 | 19 | def __init__( 20 | self, 21 | num_embeddings: int, 22 | embedding_dim: int, 23 | padding_idx: int, 24 | ): 25 | super().__init__(num_embeddings, embedding_dim, padding_idx) 26 | self.onnx_trace = False 27 | if self.padding_idx is not None: 28 | self.max_positions = self.num_embeddings - self.padding_idx - 1 29 | else: 30 | self.max_positions = self.num_embeddings 31 | 32 | def forward(self, input, incremental_state=None, positions=None): 33 | """Input is expected to be of size [bsz x seqlen].""" 34 | assert ( 35 | (positions is None) or (self.padding_idx is None) 36 | ), "If positions is pre-computed then padding_idx should not be set." 37 | 38 | if positions is None: 39 | if incremental_state is not None: 40 | # positions is the same for every token when decoding a single step 41 | # Without the int() cast, it doesn't work in some cases when exporting to ONNX 42 | positions = input.data.new(1, 1).fill_(int(self.padding_idx + input.size(1))) 43 | else: 44 | positions = utils.make_positions( 45 | input, self.padding_idx, onnx_trace=self.onnx_trace, 46 | ) 47 | return super().forward(positions) 48 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .lightconv_layer import LightconvLayer # noqa 7 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/lightconv_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | std::vector lightconv_cuda_forward( 12 | at::Tensor input, 13 | at::Tensor filters, 14 | int padding_l); 15 | 16 | std::vector lightconv_cuda_backward( 17 | at::Tensor gradOutput, 18 | int padding_l, 19 | at::Tensor input, 20 | at::Tensor filters); 21 | 22 | 23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 26 | 27 | std::vector lightconv_forward( 28 | at::Tensor input, 29 | at::Tensor filters, 30 | int padding_l) { 31 | 32 | CHECK_INPUT(input); 33 | CHECK_INPUT(filters); 34 | 35 | return lightconv_cuda_forward(input, filters, padding_l); 36 | } 37 | 38 | std::vector lightconv_backward( 39 | at::Tensor gradOutput, 40 | int padding_l, 41 | at::Tensor input, 42 | at::Tensor filters) { 43 | 44 | CHECK_INPUT(gradOutput); 45 | CHECK_INPUT(input); 46 | CHECK_INPUT(filters); 47 | 48 | return lightconv_cuda_backward(gradOutput, padding_l, input, filters); 49 | } 50 | 51 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 52 | m.def("forward", &lightconv_forward, "lighconv forward (CUDA)"); 53 | m.def("backward", &lightconv_backward, "lighconv backward (CUDA)"); 54 | } 55 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 9 | 10 | setup( 11 | name='lightconv_layer', 12 | ext_modules=[ 13 | CUDAExtension('lightconv_cuda', [ 14 | 'lightconv_cuda.cpp', 15 | 'lightconv_cuda_kernel.cu', 16 | ]), 17 | ], 18 | cmdclass={ 19 | 'build_ext': BuildExtension 20 | }) 21 | -------------------------------------------------------------------------------- /fairseq/modules/logsumexp_moe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class LogSumExpMoE(torch.autograd.Function): 10 | """Standard LogSumExp forward pass, but use *posterior* for the backward. 11 | 12 | See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" 13 | (Shen et al., 2019) `_. 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, logp, posterior, dim=-1): 18 | ctx.save_for_backward(posterior) 19 | ctx.dim = dim 20 | return torch.logsumexp(logp, dim=dim) 21 | 22 | @staticmethod 23 | def backward(ctx, grad_output): 24 | posterior, = ctx.saved_tensors 25 | grad_logp = grad_output.unsqueeze(ctx.dim) * posterior 26 | return grad_logp, None, None 27 | -------------------------------------------------------------------------------- /fairseq/modules/positional_embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | from .learned_positional_embedding import LearnedPositionalEmbedding 8 | from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding 9 | 10 | 11 | def PositionalEmbedding( 12 | num_embeddings: int, 13 | embedding_dim: int, 14 | padding_idx: int, 15 | learned: bool = False, 16 | ): 17 | if learned: 18 | # if padding_idx is specified then offset the embedding ids by 19 | # this index and adjust num_embeddings appropriately 20 | # TODO: The right place for this offset would be inside 21 | # LearnedPositionalEmbedding. Move this there for a cleaner implementation. 22 | if padding_idx is not None: 23 | num_embeddings = num_embeddings + padding_idx + 1 24 | m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) 25 | nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) 26 | if padding_idx is not None: 27 | nn.init.constant_(m.weight[padding_idx], 0) 28 | else: 29 | m = SinusoidalPositionalEmbedding( 30 | embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, 31 | ) 32 | return m 33 | -------------------------------------------------------------------------------- /fairseq/modules/scalar_bias.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | # 6 | 7 | import torch 8 | 9 | 10 | class ScalarBias(torch.autograd.Function): 11 | """ 12 | Adds a vector of scalars, used in self-attention mechanism to allow 13 | the model to optionally attend to this vector instead of the past 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, input, dim, bias_init): 18 | size = list(input.size()) 19 | size[dim] += 1 20 | output = input.new(*size).fill_(bias_init) 21 | output.narrow(dim, 1, size[dim] - 1).copy_(input) 22 | ctx.dim = dim 23 | return output 24 | 25 | @staticmethod 26 | def backward(ctx, grad): 27 | return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None 28 | 29 | 30 | def scalar_bias(input, dim, bias_init=0): 31 | return ScalarBias.apply(input, dim, bias_init) 32 | -------------------------------------------------------------------------------- /fairseq/modules/sparse_transformer_sentence_encoder_layer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.modules import TransformerSentenceEncoderLayer 7 | from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention 8 | 9 | 10 | class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): 11 | """ 12 | Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) 13 | """ 14 | 15 | def __init__( 16 | self, 17 | embedding_dim: int = 768, 18 | ffn_embedding_dim: int = 3072, 19 | num_attention_heads: int = 8, 20 | dropout: float = 0.1, 21 | attention_dropout: float = 0.1, 22 | activation_dropout: float = 0.1, 23 | activation_fn: str = 'relu', 24 | add_bias_kv: bool = False, 25 | add_zero_attn: bool = False, 26 | export: bool = False, 27 | is_bidirectional: bool = True, 28 | stride: int = 32, 29 | expressivity: int = 8, 30 | ) -> None: 31 | 32 | super().__init__( 33 | embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, 34 | attention_dropout, activation_dropout, activation_fn, add_bias_kv, 35 | add_zero_attn, export 36 | ) 37 | 38 | self.self_attn = SparseMultiheadAttention( 39 | self.embedding_dim, 40 | num_attention_heads, 41 | dropout=attention_dropout, 42 | add_bias_kv=add_bias_kv, 43 | add_zero_attn=add_zero_attn, 44 | self_attention=True, 45 | is_bidirectional=is_bidirectional, 46 | stride=stride, 47 | expressivity=expressivity, 48 | ) 49 | -------------------------------------------------------------------------------- /fairseq/modules/unfold.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn.functional as F 7 | 8 | 9 | def unfold1d(x, kernel_size, padding_l, pad_value=0): 10 | '''unfold T x B x C to T x B x C x K''' 11 | if kernel_size > 1: 12 | T, B, C = x.size() 13 | x = F.pad(x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value) 14 | x = x.as_strided((T, B, C, kernel_size), (B*C, C, 1, B*C)) 15 | else: 16 | x = x.unsqueeze(3) 17 | return x 18 | -------------------------------------------------------------------------------- /fairseq/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.optim.fairseq_optimizer import FairseqOptimizer 11 | from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer 12 | from fairseq.optim.bmuf import FairseqBMUF # noqa 13 | 14 | 15 | __all__ = [ 16 | 'FairseqOptimizer', 17 | 'FP16Optimizer', 18 | 'MemoryEfficientFP16Optimizer', 19 | ] 20 | 21 | 22 | build_optimizer, register_optimizer, OPTIMIZER_REGISTRY = registry.setup_registry( 23 | '--optimizer', 24 | base_class=FairseqOptimizer, 25 | default='nag', 26 | ) 27 | 28 | 29 | # automatically import any Python files in the optim/ directory 30 | for file in os.listdir(os.path.dirname(__file__)): 31 | if file.endswith('.py') and not file.startswith('_'): 32 | module = file[:file.find('.py')] 33 | importlib.import_module('fairseq.optim.' + module) 34 | -------------------------------------------------------------------------------- /fairseq/optim/adadelta.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('adadelta') 12 | class Adadelta(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', 22 | help='coefficient used for computing a running average of squared gradients') 23 | parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', 24 | help='term added to the denominator to improve numerical stability') 25 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 26 | help='weight decay') 27 | parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') 28 | # fmt: on 29 | 30 | @property 31 | def optimizer_config(self): 32 | """ 33 | Return a kwarg dictionary that will be used to override optimizer 34 | args stored in checkpoints. This allows us to load a checkpoint and 35 | resume training using a different set of optimizer args, e.g., with a 36 | different learning rate. 37 | """ 38 | return { 39 | 'lr': self.args.lr[0], 40 | 'rho': self.args.adadelta_rho, 41 | 'eps': self.args.adadelta_eps, 42 | 'weight_decay': self.args.weight_decay, 43 | } 44 | 45 | @property 46 | def supports_flat_params(self): 47 | return True 48 | -------------------------------------------------------------------------------- /fairseq/optim/adagrad.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('adagrad') 12 | class Adagrad(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 22 | help='weight decay') 23 | # fmt: on 24 | 25 | @property 26 | def optimizer_config(self): 27 | """ 28 | Return a kwarg dictionary that will be used to override optimizer 29 | args stored in checkpoints. This allows us to load a checkpoint and 30 | resume training using a different set of optimizer args, e.g., with a 31 | different learning rate. 32 | """ 33 | return { 34 | 'lr': self.args.lr[0], 35 | 'weight_decay': self.args.weight_decay, 36 | } 37 | 38 | @property 39 | def supports_flat_params(self): 40 | return True 41 | -------------------------------------------------------------------------------- /fairseq/optim/fused_lamb.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.optim import FairseqOptimizer, register_optimizer 7 | 8 | 9 | @register_optimizer('lamb') 10 | class FairseqLAMB(FairseqOptimizer): 11 | """LAMB optimizer.""" 12 | 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | try: 16 | from apex.optimizers import FusedLAMB 17 | self._optimizer = FusedLAMB(params, **self.optimizer_config) 18 | except ImportError: 19 | raise ImportError('Please install apex to use LAMB optimizer') 20 | 21 | @staticmethod 22 | def add_args(parser): 23 | """Add optimizer-specific arguments to the parser.""" 24 | # fmt: off 25 | parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B', 26 | help='betas for LAMB optimizer') 27 | parser.add_argument('--lamb-eps', type=float, default=1e-8, metavar='D', 28 | help='epsilon for LAMB optimizer') 29 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 30 | help='weight decay') 31 | # fmt: on 32 | 33 | @property 34 | def optimizer_config(self): 35 | """ 36 | Return a kwarg dictionary that will be used to override optimizer 37 | args stored in checkpoints. This allows us to load a checkpoint and 38 | resume training using a different set of optimizer args, e.g., with a 39 | different learning rate. 40 | """ 41 | return { 42 | 'lr': self.args.lr[0], 43 | 'betas': eval(self.args.lamb_betas), 44 | 'eps': self.args.lamb_eps, 45 | 'weight_decay': self.args.weight_decay, 46 | } 47 | 48 | @property 49 | def supports_flat_params(self): 50 | return False 51 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import FairseqLRScheduler 11 | 12 | 13 | build_lr_scheduler, register_lr_scheduler, LR_SCHEDULER_REGISTRY = registry.setup_registry( 14 | '--lr-scheduler', 15 | base_class=FairseqLRScheduler, 16 | default='fixed', 17 | ) 18 | 19 | # automatically import any Python files in the optim/lr_scheduler/ directory 20 | for file in os.listdir(os.path.dirname(__file__)): 21 | if file.endswith('.py') and not file.startswith('_'): 22 | module = file[:file.find('.py')] 23 | importlib.import_module('fairseq.optim.lr_scheduler.' + module) 24 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .. import FairseqOptimizer 7 | 8 | 9 | class FairseqLRScheduler(object): 10 | 11 | def __init__(self, args, optimizer): 12 | super().__init__() 13 | if not isinstance(optimizer, FairseqOptimizer): 14 | raise ValueError('optimizer must be an instance of FairseqOptimizer') 15 | self.args = args 16 | self.optimizer = optimizer 17 | self.best = None 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | """Add arguments to the parser for this LR scheduler.""" 22 | pass 23 | 24 | def state_dict(self): 25 | """Return the LR scheduler state dict.""" 26 | return {'best': self.best} 27 | 28 | def load_state_dict(self, state_dict): 29 | """Load an LR scheduler state dict.""" 30 | self.best = state_dict['best'] 31 | 32 | def step(self, epoch, val_loss=None): 33 | """Update the learning rate at the end of the given epoch.""" 34 | if val_loss is not None: 35 | if self.best is None: 36 | self.best = val_loss 37 | else: 38 | self.best = min(self.best, val_loss) 39 | 40 | def step_update(self, num_updates): 41 | """Update the learning rate after each update.""" 42 | return self.optimizer.get_lr() 43 | -------------------------------------------------------------------------------- /fairseq/optim/lr_scheduler/linear_schedule_with_warmup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqLRScheduler, register_lr_scheduler 7 | 8 | 9 | @register_lr_scheduler('linear') 10 | class LinearSchedule(FairseqLRScheduler): 11 | def __init__(self, args, optimizer): 12 | super().__init__(args, optimizer) 13 | if len(args.lr) > 1: 14 | raise ValueError( 15 | 'Cannot use a fixed learning rate schedule with linear.' 16 | ' Consider --lr-scheduler=fixed instead.' 17 | ) 18 | self.base_lr = args.lr[0] 19 | self.tot_steps = args.max_update 20 | self.warmup_steps = max(0, int(self.tot_steps * args.warmup_ratio)) 21 | self.optimizer.set_lr(self.base_lr) 22 | 23 | @staticmethod 24 | def add_args(parser): 25 | """Add arguments to the parser for this LR scheduler.""" 26 | parser.add_argument('--warmup-ratio', default=0.1, type=float, metavar='r', 27 | help='warmup the learning rate linearly for the first r*T updates') 28 | 29 | def step(self, epoch, val_loss=None): 30 | """Update the learning rate at the end of the given epoch.""" 31 | super().step(epoch, val_loss) 32 | # we don't change the learning rate at epoch boundaries 33 | return self.optimizer.get_lr() 34 | 35 | def step_update(self, num_updates): 36 | """Update the learning rate after each update.""" 37 | if num_updates < self.warmup_steps: 38 | self.lr = self.base_lr * (float(num_updates + 1) / float(max(1, (self.warmup_steps + 1)))) 39 | else: 40 | self.lr = self.base_lr * (float(self.tot_steps - num_updates) / float(max(1, self.tot_steps - self.warmup_steps))) 41 | self.optimizer.set_lr(self.lr) 42 | return self.lr 43 | -------------------------------------------------------------------------------- /fairseq/optim/sgd.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('sgd') 12 | class SGD(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.SGD(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--momentum', default=0.0, type=float, metavar='M', 22 | help='momentum factor') 23 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 24 | help='weight decay') 25 | # fmt: on 26 | 27 | @property 28 | def optimizer_config(self): 29 | """ 30 | Return a kwarg dictionary that will be used to override optimizer 31 | args stored in checkpoints. This allows us to load a checkpoint and 32 | resume training using a different set of optimizer args, e.g., with a 33 | different learning rate. 34 | """ 35 | return { 36 | 'lr': self.args.lr[0], 37 | 'momentum': self.args.momentum, 38 | 'weight_decay': self.args.weight_decay, 39 | } 40 | 41 | @property 42 | def supports_flat_params(self): 43 | return True 44 | -------------------------------------------------------------------------------- /fairseq/pdb.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import multiprocessing 7 | import os 8 | import pdb 9 | import sys 10 | 11 | 12 | __all__ = ['set_trace'] 13 | 14 | 15 | _stdin = [None] 16 | _stdin_lock = multiprocessing.Lock() 17 | try: 18 | _stdin_fd = sys.stdin.fileno() 19 | except Exception: 20 | _stdin_fd = None 21 | 22 | 23 | class MultiprocessingPdb(pdb.Pdb): 24 | """A Pdb wrapper that works in a multiprocessing environment. 25 | 26 | Usage: `from fairseq import pdb; pdb.set_trace()` 27 | """ 28 | 29 | def __init__(self): 30 | pdb.Pdb.__init__(self, nosigint=True) 31 | 32 | def _cmdloop(self): 33 | stdin_bak = sys.stdin 34 | with _stdin_lock: 35 | try: 36 | if _stdin_fd is not None: 37 | if not _stdin[0]: 38 | _stdin[0] = os.fdopen(_stdin_fd) 39 | sys.stdin = _stdin[0] 40 | self.cmdloop() 41 | finally: 42 | sys.stdin = stdin_bak 43 | 44 | 45 | def set_trace(): 46 | pdb = MultiprocessingPdb() 47 | pdb.set_trace(sys._getframe().f_back) 48 | -------------------------------------------------------------------------------- /fairseq/tasks/translation_from_pretrained_xlm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary 7 | from fairseq.tasks.translation import TranslationTask 8 | 9 | from . import register_task 10 | 11 | 12 | @register_task("translation_from_pretrained_xlm") 13 | class TranslationFromPretrainedXLMTask(TranslationTask): 14 | """ 15 | Same as TranslationTask except use the MaskedLMDictionary class so that 16 | we can load data that was binarized with the MaskedLMDictionary class. 17 | 18 | This task should be used for the entire training pipeline when we want to 19 | train an NMT model from a pretrained XLM checkpoint: binarizing NMT data, 20 | training NMT with the pretrained XLM checkpoint, and subsequent evaluation 21 | of that trained model. 22 | """ 23 | 24 | @classmethod 25 | def load_dictionary(cls, filename): 26 | """Load the masked LM dictionary from the filename 27 | 28 | Args: 29 | filename (str): the filename 30 | """ 31 | return MaskedLMDictionary.load(filename) 32 | -------------------------------------------------------------------------------- /fairseq/tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | SPACE_NORMALIZER = re.compile(r"\s+") 9 | 10 | 11 | def tokenize_line(line): 12 | line = SPACE_NORMALIZER.sub(" ", line) 13 | line = line.strip() 14 | return line.split() 15 | -------------------------------------------------------------------------------- /fairseq_cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/fairseq_cli/__init__.py -------------------------------------------------------------------------------- /get_data.sh: -------------------------------------------------------------------------------- 1 | dataset=$1 2 | 3 | if [ "${dataset}" = "wikitext-103" ]; then 4 | echo Download and process wikitext-103 5 | 6 | mkdir -p data-bin/wikitext-103 7 | cd data-bin/wikitext-103 8 | 9 | mkdir raw_data 10 | cd raw_data 11 | wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip 12 | unzip wikitext-103-v1.zip 13 | 14 | cd ../../.. 15 | python preprocess.py \ 16 | --only-source \ 17 | --trainpref data-bin/wikitext-103/raw_data/wikitext-103/wiki.train.tokens \ 18 | --validpref data-bin/wikitext-103/raw_data/wikitext-103/wiki.valid.tokens \ 19 | --testpref data-bin/wikitext-103/raw_data/wikitext-103/wiki.test.tokens \ 20 | --destdir data-bin/wikitext-103 \ 21 | --workers 20 22 | 23 | elif [ "${dataset}" = "enwik8" ]; then 24 | echo Download and process enwik8 25 | 26 | mkdir -p data-bin/enwik8 27 | cd data-bin/enwik8 28 | 29 | mkdir raw_data 30 | cd raw_data 31 | wget --continue http://mattmahoney.net/dc/enwik8.zip 32 | wget https://raw.githubusercontent.com/salesforce/awd-lstm-lm/master/data/enwik8/prep_enwik8.py 33 | python prep_enwik8.py 34 | 35 | cd ../../.. 36 | python preprocess.py \ 37 | --only-source \ 38 | --trainpref data-bin/enwik8/raw_data/train.txt \ 39 | --validpref data-bin/enwik8/raw_data/valid.txt \ 40 | --testpref data-bin/enwik8/raw_data/test.txt \ 41 | --destdir data-bin/enwik8 \ 42 | --workers 20 43 | else 44 | echo "Dataset ${dataset} is not supported!" 45 | fi 46 | -------------------------------------------------------------------------------- /images/batching.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/images/batching.png -------------------------------------------------------------------------------- /images/method.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/images/method.png -------------------------------------------------------------------------------- /machine_translation/bm25_make_batches.py: -------------------------------------------------------------------------------- 1 | import json 2 | from tqdm import tqdm 3 | import sys 4 | import os 5 | 6 | import argparse 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('--results_path', type=str, help='the path where to save the retrieval results') 10 | parser.add_argument('--batch_file', type=str, help='the path of the output batch file') 11 | 12 | parser.add_argument('--num_shards', type=int, default=1) 13 | args = parser.parse_args() 14 | 15 | all_res = [] 16 | for i in range(args.num_shards): 17 | with open(os.path.join(args.results_path, 'shard%d.json'%i), 'r') as f: 18 | data = json.load(f) 19 | all_res += data 20 | 21 | all_res.sort(key=lambda x: x['id']) 22 | for i in range(len(all_res)): 23 | assert i == all_res[i]['id'] 24 | 25 | import random 26 | random.seed(1) 27 | 28 | S = set(range(len(all_res))) 29 | ids = list(range(len(all_res))) 30 | random.shuffle(ids) 31 | 32 | p = 1 33 | 34 | num_found = 0 35 | x = ids[0] 36 | S.remove(x) 37 | indices = [x] 38 | for i in range(len(all_res) - 1): 39 | found = False 40 | for y in all_res[x]['retrieval'][1:]: 41 | if y in S: 42 | found = True 43 | num_found += 1 44 | x = y 45 | break 46 | if not found: 47 | while ids[p] not in S: 48 | p += 1 49 | x = ids[p] 50 | S.remove(x) 51 | indices.append(x) 52 | 53 | print('num found', num_found) 54 | print('num indices', len(indices)) 55 | 56 | with open(args.batch_file, 'w') as f: 57 | json.dump(indices, f) 58 | -------------------------------------------------------------------------------- /machine_translation/bm25_search.py: -------------------------------------------------------------------------------- 1 | from pyserini.search.lucene import LuceneSearcher 2 | import json 3 | from tqdm import tqdm 4 | import sys 5 | import os 6 | 7 | import argparse 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('--index_path', type=str, help='the path of BM25 index') 11 | parser.add_argument('--samples_path', type=str, help='the path of the file storing all the segments') 12 | parser.add_argument('--results_path', type=str, help='the path where to save the retrieval results') 13 | 14 | parser.add_argument('--num_shards', type=int, default=1) 15 | parser.add_argument('--shard_id', type=int, default=0) 16 | 17 | args = parser.parse_args() 18 | 19 | print('num shards: {}; shard id: {}'.format(args.num_shards, args.shard_id)) 20 | with open(args.samples_path, 'r') as f: 21 | blocks = json.load(f) 22 | 23 | with open(args.samples_path, 'r') as f: 24 | blocks = json.load(f) 25 | 26 | id2c = {int(x['id']): x['contents'] for x in blocks} 27 | 28 | searcher = LuceneSearcher(args.index_path) 29 | 30 | N = len(blocks) 31 | N_per_S = (N + args.num_shards - 1) // args.num_shards 32 | 33 | ret_results = [] 34 | for b in tqdm(blocks[N_per_S * args.shard_id: N_per_S * (args.shard_id + 1)]): 35 | try: 36 | hits = searcher.search(b['contents'], 20) 37 | ret = {'id': b['id'], 'retrieval': [int(h.docid) for h in hits]} 38 | except: 39 | print('error!') 40 | ret = {'id': b['id'], 'retrieval': [int(b['id'])]} 41 | ret_results.append(ret) 42 | 43 | if not os.path.exists(args.results_path): 44 | os.mkdir(args.results_path) 45 | 46 | with open(os.path.join(args.results_path, 'shard%d.json'%args.shard_id), 'w') as f: 47 | json.dump(ret_results, f) 48 | -------------------------------------------------------------------------------- /machine_translation/fairseq/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | __all__ = ["pdb"] 8 | __version__ = "0.10.1" 9 | 10 | import sys 11 | 12 | # backwards compatibility to support `from fairseq.meters import AverageMeter` 13 | from fairseq.logging import meters, metrics, progress_bar # noqa 14 | 15 | sys.modules["fairseq.meters"] = meters 16 | sys.modules["fairseq.metrics"] = metrics 17 | sys.modules["fairseq.progress_bar"] = progress_bar 18 | 19 | import fairseq.criterions # noqa 20 | import fairseq.models # noqa 21 | import fairseq.modules # noqa 22 | import fairseq.optim # noqa 23 | import fairseq.optim.lr_scheduler # noqa 24 | import fairseq.pdb # noqa 25 | import fairseq.scoring # noqa 26 | import fairseq.tasks # noqa 27 | import fairseq.token_generation_constraints # noqa 28 | 29 | import fairseq.benchmark # noqa 30 | import fairseq.model_parallel # noqa 31 | -------------------------------------------------------------------------------- /machine_translation/fairseq/benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | # import models/tasks to register them 7 | from . import dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa 8 | -------------------------------------------------------------------------------- /machine_translation/fairseq/clib/libbleu/module.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include 10 | 11 | 12 | static PyMethodDef method_def[] = { 13 | {NULL, NULL, 0, NULL} 14 | }; 15 | 16 | static struct PyModuleDef module_def = { 17 | PyModuleDef_HEAD_INIT, 18 | "libbleu", /* name of module */ 19 | NULL, /* module documentation, may be NULL */ 20 | -1, /* size of per-interpreter state of the module, 21 | or -1 if the module keeps state in global variables. */ 22 | method_def 23 | }; 24 | 25 | 26 | #if PY_MAJOR_VERSION == 2 27 | PyMODINIT_FUNC init_libbleu() 28 | #else 29 | PyMODINIT_FUNC PyInit_libbleu() 30 | #endif 31 | { 32 | PyObject *m = PyModule_Create(&module_def); 33 | if (!m) { 34 | return NULL; 35 | } 36 | return m; 37 | } 38 | -------------------------------------------------------------------------------- /machine_translation/fairseq/clib/libnat_cuda/edit_dist.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | 11 | #include 12 | 13 | torch::Tensor LevenshteinDistanceCuda( 14 | torch::Tensor source, 15 | torch::Tensor target, 16 | torch::Tensor source_length, 17 | torch::Tensor target_length); 18 | 19 | torch::Tensor GenerateDeletionLabelCuda( 20 | torch::Tensor source, 21 | torch::Tensor operations); 22 | 23 | std::pair GenerateInsertionLabelCuda( 24 | torch::Tensor source, 25 | torch::Tensor operations); 26 | -------------------------------------------------------------------------------- /machine_translation/fairseq/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | import importlib 8 | import os 9 | from argparse import Namespace 10 | from typing import Union 11 | 12 | from fairseq import registry 13 | from fairseq.criterions.fairseq_criterion import ( # noqa 14 | FairseqCriterion, 15 | LegacyFairseqCriterion, 16 | ) 17 | from omegaconf import DictConfig 18 | 19 | 20 | ( 21 | build_criterion_, 22 | register_criterion, 23 | CRITERION_REGISTRY, 24 | CRITERION_DATACLASS_REGISTRY, 25 | ) = registry.setup_registry( 26 | "--criterion", base_class=FairseqCriterion, default="cross_entropy" 27 | ) 28 | 29 | 30 | def build_criterion(criterion_cfg: Union[DictConfig, Namespace], task): 31 | return build_criterion_(criterion_cfg, task) 32 | 33 | 34 | # automatically import any Python files in the criterions/ directory 35 | for file in os.listdir(os.path.dirname(__file__)): 36 | if file.endswith(".py") and not file.startswith("_"): 37 | file_name = file[: file.find(".py")] 38 | importlib.import_module("fairseq.criterions." + file_name) 39 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/append_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class AppendTokenDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, token=None): 14 | super().__init__(dataset) 15 | self.token = token 16 | if token is not None: 17 | self._sizes = np.array(dataset.sizes) + 1 18 | else: 19 | self._sizes = dataset.sizes 20 | 21 | def __getitem__(self, idx): 22 | item = self.dataset[idx] 23 | if self.token is not None: 24 | item = torch.cat([item, item.new([self.token])]) 25 | return item 26 | 27 | @property 28 | def sizes(self): 29 | return self._sizes 30 | 31 | def num_tokens(self, index): 32 | n = self.dataset.num_tokens(index) 33 | if self.token is not None: 34 | n += 1 35 | return n 36 | 37 | def size(self, index): 38 | n = self.dataset.size(index) 39 | if self.token is not None: 40 | n += 1 41 | return n 42 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/audio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/machine_translation/fairseq/data/audio/__init__.py -------------------------------------------------------------------------------- /machine_translation/fairseq/data/audio/feature_transforms/global_cmvn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from fairseq.data.audio.feature_transforms import ( 3 | AudioFeatureTransform, 4 | register_audio_feature_transform, 5 | ) 6 | 7 | 8 | @register_audio_feature_transform("global_cmvn") 9 | class GlobalCMVN(AudioFeatureTransform): 10 | """Global CMVN (cepstral mean and variance normalization). The global mean 11 | and variance need to be pre-computed and stored in NumPy format (.npz).""" 12 | 13 | @classmethod 14 | def from_config_dict(cls, config=None): 15 | _config = {} if config is None else config 16 | return GlobalCMVN(_config.get("stats_npz_path")) 17 | 18 | def __init__(self, stats_npz_path): 19 | stats = np.load(stats_npz_path) 20 | self.mean, self.std = stats["mean"], stats["std"] 21 | 22 | def __call__(self, x): 23 | x = np.subtract(x, self.mean) 24 | x = np.divide(x, self.std) 25 | return x 26 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/audio/feature_transforms/utterance_cmvn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from fairseq.data.audio.feature_transforms import ( 3 | AudioFeatureTransform, 4 | register_audio_feature_transform, 5 | ) 6 | 7 | 8 | @register_audio_feature_transform("utterance_cmvn") 9 | class UtteranceCMVN(AudioFeatureTransform): 10 | """Utterance-level CMVN (cepstral mean and variance normalization)""" 11 | 12 | @classmethod 13 | def from_config_dict(cls, config=None): 14 | _config = {} if config is None else config 15 | return UtteranceCMVN( 16 | _config.get("norm_means", True), 17 | _config.get("norm_vars", True), 18 | ) 19 | 20 | def __init__(self, norm_means=True, norm_vars=True): 21 | self.norm_means, self.norm_vars = norm_means, norm_vars 22 | 23 | def __repr__(self): 24 | return ( 25 | self.__class__.__name__ 26 | + f"(norm_means={self.norm_means}, norm_vars={self.norm_vars})" 27 | ) 28 | 29 | def __call__(self, x): 30 | mean = x.mean(axis=0) 31 | square_sums = (x ** 2).sum(axis=0) 32 | 33 | if self.norm_means: 34 | x = np.subtract(x, mean) 35 | if self.norm_vars: 36 | var = square_sums / x.shape[0] - mean ** 2 37 | std = np.sqrt(np.maximum(var, 1e-10)) 38 | x = np.divide(x, std) 39 | 40 | return x 41 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/colorize_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class ColorizeDataset(BaseWrapperDataset): 12 | """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """ 13 | 14 | def __init__(self, dataset, color_getter): 15 | super().__init__(dataset) 16 | self.color_getter = color_getter 17 | 18 | def collater(self, samples): 19 | base_collate = super().collater(samples) 20 | if len(base_collate) > 0: 21 | base_collate["net_input"]["colors"] = torch.tensor( 22 | list(self.color_getter(self.dataset, s["id"]) for s in samples), 23 | dtype=torch.long, 24 | ) 25 | return base_collate 26 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/concat_sentences_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class ConcatSentencesDataset(FairseqDataset): 12 | def __init__(self, *datasets): 13 | super().__init__() 14 | self.datasets = datasets 15 | assert all( 16 | len(ds) == len(datasets[0]) for ds in datasets 17 | ), "datasets must have the same length" 18 | 19 | def __getitem__(self, index): 20 | return torch.cat([ds[index] for ds in self.datasets]) 21 | 22 | def __len__(self): 23 | return len(self.datasets[0]) 24 | 25 | def collater(self, samples): 26 | return self.datasets[0].collater(samples) 27 | 28 | @property 29 | def sizes(self): 30 | return sum(ds.sizes for ds in self.datasets) 31 | 32 | def num_tokens(self, index): 33 | return sum(ds.num_tokens(index) for ds in self.datasets) 34 | 35 | def size(self, index): 36 | return sum(ds.size(index) for ds in self.datasets) 37 | 38 | def ordered_indices(self): 39 | return self.datasets[0].ordered_indices() 40 | 41 | @property 42 | def supports_prefetch(self): 43 | return any(getattr(ds, "supports_prefetch", False) for ds in self.datasets) 44 | 45 | def prefetch(self, indices): 46 | for ds in self.datasets: 47 | if getattr(ds, "supports_prefetch", False): 48 | ds.prefetch(indices) 49 | 50 | def set_epoch(self, epoch): 51 | super().set_epoch(epoch) 52 | for ds in self.datasets: 53 | if hasattr(ds, "set_epoch"): 54 | ds.set_epoch(epoch) 55 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | 12 | 13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry( 14 | "--tokenizer", 15 | default=None, 16 | ) 17 | 18 | 19 | build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry( 20 | "--bpe", 21 | default=None, 22 | ) 23 | 24 | 25 | # automatically import any Python files in the encoders/ directory 26 | for file in os.listdir(os.path.dirname(__file__)): 27 | if file.endswith(".py") and not file.startswith("_"): 28 | module = file[: file.find(".py")] 29 | importlib.import_module("fairseq.data.encoders." + module) 30 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/byte_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from fairseq import file_utils 8 | from fairseq.data.encoders import register_bpe 9 | from fairseq.data.encoders.byte_utils import ( 10 | SPACE, 11 | SPACE_ESCAPE, 12 | byte_encode, 13 | smart_byte_decode, 14 | ) 15 | 16 | 17 | @register_bpe("byte_bpe") 18 | class ByteBPE(object): 19 | @staticmethod 20 | def add_args(parser): 21 | # fmt: off 22 | parser.add_argument('--sentencepiece-model-path', type=str, 23 | help='path to sentencepiece model') 24 | # fmt: on 25 | 26 | def __init__(self, args): 27 | vocab = file_utils.cached_path(args.sentencepiece_model_path) 28 | try: 29 | import sentencepiece as spm 30 | 31 | self.sp = spm.SentencePieceProcessor() 32 | self.sp.Load(vocab) 33 | except ImportError: 34 | raise ImportError( 35 | "Please install sentencepiece with: pip install sentencepiece" 36 | ) 37 | 38 | def encode(self, x: str) -> str: 39 | byte_encoded = byte_encode(x) 40 | return SPACE.join(self.sp.EncodeAsPieces(byte_encoded)) 41 | 42 | @staticmethod 43 | def decode(x: str) -> str: 44 | unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) 45 | return smart_byte_decode(unescaped) 46 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/byte_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | 9 | WHITESPACE_NORMALIZER = re.compile(r"\s+") 10 | SPACE = chr(32) 11 | SPACE_ESCAPE = chr(9601) 12 | # excluding non-breaking space (160) here 13 | PRINTABLE_LATIN = set( 14 | list(range(32, 126 + 1)) + list(range(161, 172 + 1)) + list(range(174, 255 + 1)) 15 | ) 16 | BYTE_TO_BCHAR = { 17 | b: chr(b) if b in PRINTABLE_LATIN else chr(256 + b) for b in range(256) 18 | } 19 | BCHAR_TO_BYTE = {bc: b for b, bc in BYTE_TO_BCHAR.items()} 20 | 21 | 22 | def byte_encode(x: str) -> str: 23 | normalized = WHITESPACE_NORMALIZER.sub(SPACE, x) 24 | return "".join([BYTE_TO_BCHAR[b] for b in normalized.encode("utf-8")]) 25 | 26 | 27 | def byte_decode(x: str) -> str: 28 | try: 29 | return bytes([BCHAR_TO_BYTE[bc] for bc in x]).decode("utf-8") 30 | except ValueError: 31 | return "" 32 | 33 | 34 | def smart_byte_decode(x: str) -> str: 35 | output = byte_decode(x) 36 | if output == "": 37 | # DP the best recovery (max valid chars) if it's broken 38 | n_bytes = len(x) 39 | f = [0 for _ in range(n_bytes + 1)] 40 | pt = [0 for _ in range(n_bytes + 1)] 41 | for i in range(1, n_bytes + 1): 42 | f[i], pt[i] = f[i - 1], i - 1 43 | for j in range(1, min(4, i) + 1): 44 | if f[i - j] + 1 > f[i] and len(byte_decode(x[i - j : i])) > 0: 45 | f[i], pt[i] = f[i - j] + 1, i - j 46 | cur_pt = n_bytes 47 | while cur_pt > 0: 48 | if f[cur_pt] == f[pt[cur_pt]] + 1: 49 | output = byte_decode(x[pt[cur_pt] : cur_pt]) + output 50 | cur_pt = pt[cur_pt] 51 | return output 52 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/bytes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from fairseq.data.encoders import register_bpe 8 | from fairseq.data.encoders.byte_utils import ( 9 | SPACE, 10 | SPACE_ESCAPE, 11 | byte_encode, 12 | smart_byte_decode, 13 | ) 14 | 15 | 16 | @register_bpe("bytes") 17 | class Bytes(object): 18 | def __init__(self, args): 19 | pass 20 | 21 | @staticmethod 22 | def add_args(parser): 23 | pass 24 | 25 | @staticmethod 26 | def encode(x: str) -> str: 27 | encoded = byte_encode(x) 28 | escaped = encoded.replace(SPACE, SPACE_ESCAPE) 29 | return SPACE.join(list(escaped)) 30 | 31 | @staticmethod 32 | def decode(x: str) -> str: 33 | unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) 34 | return smart_byte_decode(unescaped) 35 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/characters.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | SPACE = chr(32) 11 | SPACE_ESCAPE = chr(9601) 12 | 13 | 14 | @register_bpe("characters") 15 | class Characters(object): 16 | def __init__(self, args): 17 | pass 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | pass 22 | 23 | @staticmethod 24 | def encode(x: str) -> str: 25 | escaped = x.replace(SPACE, SPACE_ESCAPE) 26 | return SPACE.join(list(escaped)) 27 | 28 | @staticmethod 29 | def decode(x: str) -> str: 30 | return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) 31 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/fastbpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe("fastbpe") 11 | class fastBPE(object): 12 | @staticmethod 13 | def add_args(parser): 14 | # fmt: off 15 | parser.add_argument('--bpe-codes', type=str, 16 | help='path to fastBPE BPE') 17 | # fmt: on 18 | 19 | def __init__(self, args): 20 | if args.bpe_codes is None: 21 | raise ValueError("--bpe-codes is required for --bpe=fastbpe") 22 | codes = file_utils.cached_path(args.bpe_codes) 23 | try: 24 | import fastBPE 25 | 26 | self.bpe = fastBPE.fastBPE(codes) 27 | self.bpe_symbol = "@@ " 28 | except ImportError: 29 | raise ImportError("Please install fastBPE with: pip install fastBPE") 30 | 31 | def encode(self, x: str) -> str: 32 | return self.bpe.apply([x])[0] 33 | 34 | def decode(self, x: str) -> str: 35 | return (x + " ").replace(self.bpe_symbol, "").rstrip() 36 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/gpt2_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | from .gpt2_bpe_utils import get_encoder 10 | 11 | 12 | DEFAULT_ENCODER_JSON = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json" 13 | DEFAULT_VOCAB_BPE = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe" 14 | 15 | 16 | @register_bpe("gpt2") 17 | class GPT2BPE(object): 18 | @staticmethod 19 | def add_args(parser): 20 | # fmt: off 21 | parser.add_argument('--gpt2-encoder-json', type=str, 22 | default=DEFAULT_ENCODER_JSON, 23 | help='path to encoder.json') 24 | parser.add_argument('--gpt2-vocab-bpe', type=str, 25 | default=DEFAULT_VOCAB_BPE, 26 | help='path to vocab.bpe') 27 | # fmt: on 28 | 29 | def __init__(self, args): 30 | encoder_json = file_utils.cached_path( 31 | getattr(args, "gpt2_encoder_json", DEFAULT_ENCODER_JSON) 32 | ) 33 | vocab_bpe = file_utils.cached_path( 34 | getattr(args, "gpt2_vocab_bpe", DEFAULT_VOCAB_BPE) 35 | ) 36 | self.bpe = get_encoder(encoder_json, vocab_bpe) 37 | 38 | def encode(self, x: str) -> str: 39 | return " ".join(map(str, self.bpe.encode(x))) 40 | 41 | def decode(self, x: str) -> str: 42 | return self.bpe.decode( 43 | [int(tok) if tok not in {"", ""} else tok for tok in x.split()] 44 | ) 45 | 46 | def is_beginning_of_word(self, x: str) -> bool: 47 | return self.decode(x).startswith(" ") 48 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/hf_bert_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_bpe 7 | 8 | 9 | @register_bpe("bert") 10 | class BertBPE(object): 11 | @staticmethod 12 | def add_args(parser): 13 | # fmt: off 14 | parser.add_argument('--bpe-cased', action='store_true', 15 | help='set for cased BPE', 16 | default=False) 17 | parser.add_argument('--bpe-vocab-file', type=str, 18 | help='bpe vocab file.') 19 | # fmt: on 20 | 21 | def __init__(self, args): 22 | try: 23 | from transformers import BertTokenizer 24 | except ImportError: 25 | raise ImportError( 26 | "Please install transformers with: pip install transformers" 27 | ) 28 | 29 | if "bpe_vocab_file" in args: 30 | self.bert_tokenizer = BertTokenizer( 31 | args.bpe_vocab_file, do_lower_case=not args.bpe_cased 32 | ) 33 | else: 34 | vocab_file_name = ( 35 | "bert-base-cased" if args.bpe_cased else "bert-base-uncased" 36 | ) 37 | self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name) 38 | 39 | def encode(self, x: str) -> str: 40 | return " ".join(self.bert_tokenizer.tokenize(x)) 41 | 42 | def decode(self, x: str) -> str: 43 | return self.bert_tokenizer.clean_up_tokenization( 44 | self.bert_tokenizer.convert_tokens_to_string(x.split(" ")) 45 | ) 46 | 47 | def is_beginning_of_word(self, x: str) -> bool: 48 | return not x.startswith("##") 49 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/hf_byte_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_bpe 7 | 8 | 9 | @register_bpe("hf_byte_bpe") 10 | class HuggingFaceByteLevelBPE(object): 11 | @staticmethod 12 | def add_args(parser): 13 | # fmt: off 14 | parser.add_argument('--bpe-merges', help='path to merges.txt') 15 | parser.add_argument('--bpe-vocab', help='path to vocab.json') 16 | parser.add_argument('--bpe-add-prefix-space', action='store_true', 17 | help='add prefix space before encoding') 18 | # fmt: on 19 | 20 | def __init__(self, args): 21 | try: 22 | from tokenizers import ByteLevelBPETokenizer 23 | except ImportError: 24 | raise ImportError( 25 | "Please install huggingface/tokenizers with: " "pip install tokenizers" 26 | ) 27 | 28 | self.bpe = ByteLevelBPETokenizer( 29 | args.bpe_vocab, 30 | args.bpe_merges, 31 | add_prefix_space=getattr(args, "bpe_add_prefix_space", False), 32 | ) 33 | 34 | def encode(self, x: str) -> str: 35 | return " ".join(map(str, self.bpe.encode(x).ids)) 36 | 37 | def decode(self, x: str) -> str: 38 | return self.bpe.decode( 39 | [int(tok) if tok not in {"", ""} else tok for tok in x.split()] 40 | ) 41 | 42 | def is_beginning_of_word(self, x: str) -> bool: 43 | return self.decode(x).startswith(" ") 44 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/nltk_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_tokenizer 7 | 8 | 9 | @register_tokenizer("nltk") 10 | class NLTKTokenizer(object): 11 | def __init__(self, source_lang=None, target_lang=None): 12 | try: 13 | from nltk.tokenize import word_tokenize 14 | 15 | self.word_tokenize = word_tokenize 16 | except ImportError: 17 | raise ImportError("Please install nltk with: pip install nltk") 18 | 19 | def encode(self, x: str) -> str: 20 | return " ".join(self.word_tokenize(x)) 21 | 22 | def decode(self, x: str) -> str: 23 | return x 24 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/sentencepiece_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe("sentencepiece") 11 | class SentencepieceBPE(object): 12 | @staticmethod 13 | def add_args(parser): 14 | # fmt: off 15 | parser.add_argument('--sentencepiece-model', type=str, 16 | help='path to sentencepiece model') 17 | # fmt: on 18 | 19 | def __init__(self, args): 20 | sentencepiece_model = file_utils.cached_path(args.sentencepiece_model) 21 | try: 22 | import sentencepiece as spm 23 | 24 | self.sp = spm.SentencePieceProcessor() 25 | self.sp.Load(sentencepiece_model) 26 | except ImportError: 27 | raise ImportError( 28 | "Please install sentencepiece with: pip install sentencepiece" 29 | ) 30 | 31 | def encode(self, x: str) -> str: 32 | return " ".join(self.sp.EncodeAsPieces(x)) 33 | 34 | def decode(self, x: str) -> str: 35 | return x.replace(" ", "").replace("\u2581", " ").strip() 36 | 37 | def is_beginning_of_word(self, x: str) -> bool: 38 | if x in ["", "", "", ""]: 39 | # special elements are always considered beginnings 40 | # HACK: this logic is already present in fairseq/tasks/masked_lm.py 41 | # but these special tokens are also contained in the sentencepiece 42 | # vocabulary which causes duplicate special tokens. This hack makes 43 | # sure that they are all taken into account. 44 | return True 45 | return x.startswith("\u2581") 46 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/space_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | from fairseq.data.encoders import register_tokenizer 9 | 10 | 11 | @register_tokenizer("space") 12 | class SpaceTokenizer(object): 13 | def __init__(self, source_lang=None, target_lang=None): 14 | self.space_tok = re.compile(r"\s+") 15 | 16 | def encode(self, x: str) -> str: 17 | return self.space_tok.sub(" ", x) 18 | 19 | def decode(self, x: str) -> str: 20 | return x 21 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/subword_nmt_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe("subword_nmt") 11 | class SubwordNMTBPE(object): 12 | @staticmethod 13 | def add_args(parser): 14 | # fmt: off 15 | parser.add_argument('--bpe-codes', type=str, 16 | help='path to subword NMT BPE') 17 | parser.add_argument('--bpe-separator', default='@@', 18 | help='BPE separator') 19 | # fmt: on 20 | 21 | def __init__(self, args): 22 | if args.bpe_codes is None: 23 | raise ValueError("--bpe-codes is required for --bpe=subword_nmt") 24 | codes = file_utils.cached_path(args.bpe_codes) 25 | try: 26 | from subword_nmt import apply_bpe 27 | 28 | bpe_parser = apply_bpe.create_parser() 29 | bpe_args = bpe_parser.parse_args( 30 | [ 31 | "--codes", 32 | codes, 33 | "--separator", 34 | args.bpe_separator, 35 | ] 36 | ) 37 | self.bpe = apply_bpe.BPE( 38 | bpe_args.codes, 39 | bpe_args.merges, 40 | bpe_args.separator, 41 | None, 42 | bpe_args.glossaries, 43 | ) 44 | self.bpe_symbol = bpe_args.separator + " " 45 | except ImportError: 46 | raise ImportError( 47 | "Please install subword_nmt with: pip install subword-nmt" 48 | ) 49 | 50 | def encode(self, x: str) -> str: 51 | return self.bpe.process_line(x) 52 | 53 | def decode(self, x: str) -> str: 54 | return (x + " ").replace(self.bpe_symbol, "").rstrip() 55 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/encoders/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from fairseq.data import encoders 8 | 9 | 10 | def get_whole_word_mask(args, dictionary): 11 | bpe = encoders.build_bpe(args) 12 | if bpe is not None: 13 | 14 | def is_beginning_of_word(i): 15 | if i < dictionary.nspecial: 16 | # special elements are always considered beginnings 17 | return True 18 | tok = dictionary[i] 19 | if tok.startswith("madeupword"): 20 | return True 21 | try: 22 | return bpe.is_beginning_of_word(tok) 23 | except ValueError: 24 | return True 25 | 26 | mask_whole_words = torch.ByteTensor( 27 | list(map(is_beginning_of_word, range(len(dictionary)))) 28 | ) 29 | return mask_whole_words 30 | return None 31 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/id_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class IdDataset(FairseqDataset): 12 | def __getitem__(self, index): 13 | return index 14 | 15 | def __len__(self): 16 | return 0 17 | 18 | def collater(self, samples): 19 | return torch.tensor(samples) 20 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .block_pair_dataset import BlockPairDataset 7 | from .masked_lm_dataset import MaskedLMDataset 8 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary 9 | 10 | 11 | __all__ = [ 12 | "BertDictionary", 13 | "BlockPairDataset", 14 | "MaskedLMDataset", 15 | "MaskedLMDictionary", 16 | ] 17 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/legacy/masked_lm_dictionary.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import Dictionary 7 | 8 | 9 | class MaskedLMDictionary(Dictionary): 10 | """ 11 | Dictionary for Masked Language Modelling tasks. This extends Dictionary by 12 | adding the mask symbol. 13 | """ 14 | 15 | def __init__( 16 | self, 17 | pad="", 18 | eos="", 19 | unk="", 20 | mask="", 21 | ): 22 | super().__init__(pad=pad, eos=eos, unk=unk) 23 | self.mask_word = mask 24 | self.mask_index = self.add_symbol(mask) 25 | self.nspecial = len(self.symbols) 26 | 27 | def mask(self): 28 | """Helper to get index of mask symbol""" 29 | return self.mask_index 30 | 31 | 32 | class BertDictionary(MaskedLMDictionary): 33 | """ 34 | Dictionary for BERT task. This extends MaskedLMDictionary by adding support 35 | for cls and sep symbols. 36 | """ 37 | 38 | def __init__( 39 | self, 40 | pad="", 41 | eos="", 42 | unk="", 43 | mask="", 44 | cls="", 45 | sep="", 46 | ): 47 | super().__init__(pad=pad, eos=eos, unk=unk, mask=mask) 48 | self.cls_word = cls 49 | self.sep_word = sep 50 | self.cls_index = self.add_symbol(cls) 51 | self.sep_index = self.add_symbol(sep) 52 | self.nspecial = len(self.symbols) 53 | 54 | def cls(self): 55 | """Helper to get index of cls symbol""" 56 | return self.cls_index 57 | 58 | def sep(self): 59 | """Helper to get index of sep symbol""" 60 | return self.sep_index 61 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/list_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ListDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, sizes=None): 11 | super().__init__(dataset) 12 | self._sizes = sizes 13 | 14 | def __iter__(self): 15 | for x in self.dataset: 16 | yield x 17 | 18 | def collater(self, samples): 19 | return samples 20 | 21 | @property 22 | def sizes(self): 23 | return self._sizes 24 | 25 | def num_tokens(self, index): 26 | return self.sizes[index] 27 | 28 | def size(self, index): 29 | return self.sizes[index] 30 | 31 | def set_epoch(self, epoch): 32 | pass 33 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/lru_cache_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from functools import lru_cache 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class LRUCacheDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, token=None): 13 | super().__init__(dataset) 14 | 15 | @lru_cache(maxsize=8) 16 | def __getitem__(self, index): 17 | return self.dataset[index] 18 | 19 | @lru_cache(maxsize=8) 20 | def collater(self, samples): 21 | return self.dataset.collater(samples) 22 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/multilingual/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/multilingual/multilingual_utils.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Dict, List, Optional, Sequence 3 | 4 | import torch 5 | from fairseq.data import Dictionary 6 | 7 | 8 | class EncoderLangtok(Enum): 9 | """ 10 | Prepend to the beginning of source sentence either the 11 | source or target language token. (src/tgt). 12 | """ 13 | 14 | src = "src" 15 | tgt = "tgt" 16 | 17 | 18 | class LangTokSpec(Enum): 19 | main = "main" 20 | mono_dae = "mono_dae" 21 | 22 | 23 | class LangTokStyle(Enum): 24 | multilingual = "multilingual" 25 | mbart = "mbart" 26 | 27 | 28 | @torch.jit.export 29 | def get_lang_tok( 30 | lang: str, lang_tok_style: str, spec: str = LangTokSpec.main.value 31 | ) -> str: 32 | # TOKEN_STYLES can't be defined outside this fn since it needs to be 33 | # TorchScriptable. 34 | TOKEN_STYLES: Dict[str, str] = { 35 | LangTokStyle.mbart.value: "[{}]", 36 | LangTokStyle.multilingual.value: "__{}__", 37 | } 38 | 39 | if spec.endswith("dae"): 40 | lang = f"{lang}_dae" 41 | elif spec.endswith("mined"): 42 | lang = f"{lang}_mined" 43 | style = TOKEN_STYLES[lang_tok_style] 44 | return style.format(lang) 45 | 46 | 47 | def augment_dictionary( 48 | dictionary: Dictionary, 49 | language_list: List[str], 50 | lang_tok_style: str, 51 | langtoks_specs: Sequence[str] = (LangTokSpec.main.value,), 52 | extra_data: Optional[Dict[str, str]] = None, 53 | ) -> None: 54 | for spec in langtoks_specs: 55 | for language in language_list: 56 | dictionary.add_symbol( 57 | get_lang_tok(lang=language, lang_tok_style=lang_tok_style, spec=spec) 58 | ) 59 | 60 | if lang_tok_style == LangTokStyle.mbart.value or ( 61 | extra_data is not None and LangTokSpec.mono_dae.value in extra_data 62 | ): 63 | dictionary.add_symbol("") 64 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/num_samples_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqDataset 7 | 8 | 9 | class NumSamplesDataset(FairseqDataset): 10 | def __getitem__(self, index): 11 | return 1 12 | 13 | def __len__(self): 14 | return 0 15 | 16 | def collater(self, samples): 17 | return sum(samples) 18 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/numel_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class NumelDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, reduce=False): 14 | super().__init__(dataset) 15 | self.reduce = reduce 16 | 17 | def __getitem__(self, index): 18 | item = self.dataset[index] 19 | if torch.is_tensor(item): 20 | return torch.numel(item) 21 | else: 22 | return np.size(item) 23 | 24 | def __len__(self): 25 | return len(self.dataset) 26 | 27 | def collater(self, samples): 28 | if self.reduce: 29 | return sum(samples) 30 | else: 31 | return torch.tensor(samples) 32 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/offset_tokens_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class OffsetTokensDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, offset): 11 | super().__init__(dataset) 12 | self.offset = offset 13 | 14 | def __getitem__(self, idx): 15 | return self.dataset[idx] + self.offset 16 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/pad_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import data_utils 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class PadDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, pad_idx, left_pad): 13 | super().__init__(dataset) 14 | self.pad_idx = pad_idx 15 | self.left_pad = left_pad 16 | 17 | def collater(self, samples): 18 | return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) 19 | 20 | 21 | class LeftPadDataset(PadDataset): 22 | def __init__(self, dataset, pad_idx): 23 | super().__init__(dataset, pad_idx, left_pad=True) 24 | 25 | 26 | class RightPadDataset(PadDataset): 27 | def __init__(self, dataset, pad_idx): 28 | super().__init__(dataset, pad_idx, left_pad=False) 29 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/prepend_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): 14 | super().__init__(dataset) 15 | self.prepend_getter = prepend_getter 16 | self.ensure_first_token = ensure_first_token_is 17 | 18 | def __getitem__(self, idx): 19 | item = self.dataset[idx] 20 | is_tuple = isinstance(item, tuple) 21 | src = item[0] if is_tuple else item 22 | 23 | assert self.ensure_first_token is None or src[0] == self.ensure_first_token 24 | prepend_idx = self.prepend_getter(self.dataset, idx) 25 | assert isinstance(prepend_idx, int) 26 | src[0] = prepend_idx 27 | item = tuple((src,) + item[1:]) if is_tuple else src 28 | return item 29 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/prepend_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependTokenDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, token=None): 14 | super().__init__(dataset) 15 | self.token = token 16 | if token is not None: 17 | self._sizes = np.array(dataset.sizes) + 1 18 | else: 19 | self._sizes = dataset.sizes 20 | 21 | def __getitem__(self, idx): 22 | item = self.dataset[idx] 23 | if self.token is not None: 24 | item = torch.cat([item.new([self.token]), item]) 25 | return item 26 | 27 | @property 28 | def sizes(self): 29 | return self._sizes 30 | 31 | def num_tokens(self, index): 32 | n = self.dataset.num_tokens(index) 33 | if self.token is not None: 34 | n += 1 35 | return n 36 | 37 | def size(self, index): 38 | n = self.dataset.size(index) 39 | if self.token is not None: 40 | n += 1 41 | return n 42 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/raw_label_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class RawLabelDataset(FairseqDataset): 12 | def __init__(self, labels): 13 | super().__init__() 14 | self.labels = labels 15 | 16 | def __getitem__(self, index): 17 | return self.labels[index] 18 | 19 | def __len__(self): 20 | return len(self.labels) 21 | 22 | def collater(self, samples): 23 | return torch.tensor(samples) 24 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/replace_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ReplaceDataset(BaseWrapperDataset): 10 | """Replaces tokens found in the dataset by a specified replacement token 11 | 12 | Args: 13 | dataset (~torch.utils.data.Dataset): dataset to replace tokens in 14 | replace_map(Dictionary[int,int]): map of token to replace -> replacement token 15 | offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be 16 | as many as the number of objects returned by the underlying dataset __getitem__ method. 17 | """ 18 | 19 | def __init__(self, dataset, replace_map, offsets): 20 | super().__init__(dataset) 21 | assert len(replace_map) > 0 22 | self.replace_map = replace_map 23 | self.offsets = offsets 24 | 25 | def __getitem__(self, index): 26 | item = self.dataset[index] 27 | is_tuple = isinstance(item, tuple) 28 | srcs = item if is_tuple else [item] 29 | 30 | for offset, src in zip(self.offsets, srcs): 31 | for k, v in self.replace_map.items(): 32 | src_off = src[offset:] if offset >= 0 else src[:offset] 33 | src_off.masked_fill_(src_off == k, v) 34 | 35 | item = srcs if is_tuple else srcs[0] 36 | return item 37 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/roll_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class RollDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, shifts): 13 | super().__init__(dataset) 14 | self.shifts = shifts 15 | 16 | def __getitem__(self, index): 17 | item = self.dataset[index] 18 | return torch.roll(item, self.shifts) 19 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/sort_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class SortDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, sort_order): 13 | super().__init__(dataset) 14 | if not isinstance(sort_order, (list, tuple)): 15 | sort_order = [sort_order] 16 | self.sort_order = sort_order 17 | 18 | assert all(len(so) == len(dataset) for so in sort_order) 19 | 20 | def ordered_indices(self): 21 | return np.lexsort(self.sort_order) 22 | -------------------------------------------------------------------------------- /machine_translation/fairseq/data/strip_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class StripTokenDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, id_to_strip): 11 | super().__init__(dataset) 12 | self.id_to_strip = id_to_strip 13 | 14 | def __getitem__(self, index): 15 | item = self.dataset[index] 16 | while len(item) > 0 and item[-1] == self.id_to_strip: 17 | item = item[:-1] 18 | while len(item) > 0 and item[0] == self.id_to_strip: 19 | item = item[1:] 20 | return item 21 | -------------------------------------------------------------------------------- /machine_translation/fairseq/dataclass/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import ChoiceEnum, FairseqDataclass 7 | 8 | 9 | __all__ = ["FairseqDataclass", "ChoiceEnum"] 10 | -------------------------------------------------------------------------------- /machine_translation/fairseq/dataclass/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.dataclass.utils import ChoiceEnum 7 | 8 | 9 | LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"]) 10 | DDP_BACKEND_CHOICES = ChoiceEnum(["c10d", "no_c10d"]) 11 | DISTRIBUTED_WRAPPER_CHOICES = ChoiceEnum(["DDP", "SlowMo"]) 12 | ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"]) 13 | PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"]) 14 | -------------------------------------------------------------------------------- /machine_translation/fairseq/incremental_decoding_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import uuid 7 | from typing import Dict, Optional 8 | 9 | from torch import Tensor 10 | 11 | 12 | class FairseqIncrementalState(object): 13 | def __init__(self, *args, **kwargs): 14 | super().__init__(*args, **kwargs) 15 | self.init_incremental_state() 16 | 17 | def init_incremental_state(self): 18 | self._incremental_state_id = str(uuid.uuid4()) 19 | 20 | def _get_full_incremental_state_key(self, key: str) -> str: 21 | return "{}.{}".format(self._incremental_state_id, key) 22 | 23 | def get_incremental_state( 24 | self, 25 | incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], 26 | key: str, 27 | ) -> Optional[Dict[str, Optional[Tensor]]]: 28 | """Helper for getting incremental state for an nn.Module.""" 29 | full_key = self._get_full_incremental_state_key(key) 30 | if incremental_state is None or full_key not in incremental_state: 31 | return None 32 | return incremental_state[full_key] 33 | 34 | def set_incremental_state( 35 | self, 36 | incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], 37 | key: str, 38 | value: Dict[str, Optional[Tensor]], 39 | ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: 40 | """Helper for setting incremental state for an nn.Module.""" 41 | if incremental_state is not None: 42 | full_key = self._get_full_incremental_state_key(key) 43 | incremental_state[full_key] = value 44 | return incremental_state 45 | 46 | 47 | def with_incremental_state(cls): 48 | cls.__bases__ = (FairseqIncrementalState,) + tuple( 49 | b for b in cls.__bases__ if b != FairseqIncrementalState 50 | ) 51 | return cls 52 | -------------------------------------------------------------------------------- /machine_translation/fairseq/logging/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/machine_translation/fairseq/logging/__init__.py -------------------------------------------------------------------------------- /machine_translation/fairseq/model_parallel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import criterions, models, modules # noqa 7 | -------------------------------------------------------------------------------- /machine_translation/fairseq/model_parallel/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the criterions/ directory 11 | for file in os.listdir(os.path.dirname(__file__)): 12 | if file.endswith(".py") and not file.startswith("_"): 13 | module = file[: file.find(".py")] 14 | importlib.import_module("fairseq.model_parallel.criterions." + module) 15 | -------------------------------------------------------------------------------- /machine_translation/fairseq/model_parallel/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the models/ directory 11 | models_dir = os.path.dirname(__file__) 12 | for file in os.listdir(models_dir): 13 | path = os.path.join(models_dir, file) 14 | if ( 15 | not file.startswith("_") 16 | and not file.startswith(".") 17 | and (file.endswith(".py") or os.path.isdir(path)) 18 | ): 19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file 20 | module = importlib.import_module("fairseq.model_parallel.models." + model_name) 21 | -------------------------------------------------------------------------------- /machine_translation/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | -------------------------------------------------------------------------------- /machine_translation/fairseq/model_parallel/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | -------------------------------------------------------------------------------- /machine_translation/fairseq/model_parallel/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | from .multihead_attention import ModelParallelMultiheadAttention 8 | from .transformer_layer import ( 9 | ModelParallelTransformerEncoderLayer, 10 | ModelParallelTransformerDecoderLayer, 11 | ) 12 | from .transformer_sentence_encoder_layer import ( 13 | ModelParallelTransformerSentenceEncoderLayer, 14 | ) 15 | from .transformer_sentence_encoder import ModelParallelTransformerSentenceEncoder 16 | 17 | __all__ = [ 18 | "ModelParallelMultiheadAttention", 19 | "ModelParallelTransformerEncoderLayer", 20 | "ModelParallelTransformerDecoderLayer", 21 | "ModelParallelTransformerSentenceEncoder", 22 | "ModelParallelTransformerSentenceEncoderLayer", 23 | ] 24 | -------------------------------------------------------------------------------- /machine_translation/fairseq/models/bart/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | -------------------------------------------------------------------------------- /machine_translation/fairseq/models/huggingface/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the models/huggingface/ directory 11 | models_dir = os.path.dirname(__file__) 12 | for file in os.listdir(models_dir): 13 | path = os.path.join(models_dir, file) 14 | if ( 15 | not file.startswith("_") 16 | and not file.startswith(".") 17 | and (file.endswith(".py") or os.path.isdir(path)) 18 | ): 19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file 20 | module = importlib.import_module("fairseq.models.huggingface." + model_name) 21 | -------------------------------------------------------------------------------- /machine_translation/fairseq/models/nat/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | from .fairseq_nat_model import * 8 | from .nonautoregressive_transformer import * 9 | from .nat_crf_transformer import * 10 | from .iterative_nonautoregressive_transformer import * 11 | from .cmlm_transformer import * 12 | from .levenshtein_transformer import * 13 | from .insertion_transformer import * 14 | -------------------------------------------------------------------------------- /machine_translation/fairseq/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | from .model_camembert import * # noqa 9 | from .model_xlmr import * # noqa 10 | -------------------------------------------------------------------------------- /machine_translation/fairseq/models/roberta/model_xlmr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | Unsupervised Cross-lingual Representation Learning at Scale 7 | """ 8 | 9 | from fairseq.models import register_model 10 | 11 | from .hub_interface import RobertaHubInterface 12 | from .model import RobertaModel 13 | 14 | 15 | @register_model("xlmr") 16 | class XLMRModel(RobertaModel): 17 | @classmethod 18 | def hub_models(cls): 19 | return { 20 | "xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz", 21 | "xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz", 22 | } 23 | 24 | @classmethod 25 | def from_pretrained( 26 | cls, 27 | model_name_or_path, 28 | checkpoint_file="model.pt", 29 | data_name_or_path=".", 30 | bpe="sentencepiece", 31 | **kwargs 32 | ): 33 | from fairseq import hub_utils 34 | 35 | x = hub_utils.from_pretrained( 36 | model_name_or_path, 37 | checkpoint_file, 38 | data_name_or_path, 39 | archive_map=cls.hub_models(), 40 | bpe=bpe, 41 | load_checkpoint_heads=True, 42 | **kwargs, 43 | ) 44 | return RobertaHubInterface(x["args"], x["task"], x["models"][0]) 45 | -------------------------------------------------------------------------------- /machine_translation/fairseq/models/speech_to_text/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .berard import * # noqa 7 | from .s2t_transformer import * # noqa 8 | -------------------------------------------------------------------------------- /machine_translation/fairseq/models/wav2vec/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .wav2vec import * # noqa 7 | from .wav2vec2 import * # noqa 8 | from .wav2vec2_asr import * # noqa 9 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/beamable_mm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class BeamableMM(nn.Module): 11 | """This module provides an optimized MM for beam decoding with attention. 12 | 13 | It leverage the fact that the source-side of the input is replicated beam 14 | times and the target-side of the input is of width one. This layer speeds up 15 | inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} 16 | with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. 17 | """ 18 | 19 | def __init__(self, beam_size=None): 20 | super(BeamableMM, self).__init__() 21 | self.beam_size = beam_size 22 | 23 | def forward(self, input1, input2): 24 | if ( 25 | not self.training 26 | and self.beam_size is not None # test mode 27 | and input1.dim() == 3 # beam size is set 28 | and input1.size(1) # only support batched input 29 | == 1 # single time step update 30 | ): 31 | bsz, beam = input1.size(0), self.beam_size 32 | 33 | # bsz x 1 x nhu --> bsz/beam x beam x nhu 34 | input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) 35 | 36 | # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu 37 | input2 = input2.unfold(0, beam, beam)[:, :, :, 0] 38 | 39 | # use non batched operation if bsz = beam 40 | if input1.size(0) == 1: 41 | output = torch.mm(input1[0, :, :], input2[0, :, :]) 42 | else: 43 | output = input1.bmm(input2) 44 | return output.view(bsz, 1, -1) 45 | else: 46 | return input1.bmm(input2) 47 | 48 | def set_beam_size(self, beam_size): 49 | self.beam_size = beam_size 50 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/conv_tbc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from torch.nn.modules.utils import _single 8 | 9 | 10 | class ConvTBC(torch.nn.Module): 11 | """1D convolution over an input of shape (time x batch x channel) 12 | 13 | The implementation uses gemm to perform the convolution. This implementation 14 | is faster than cuDNN for small kernel sizes. 15 | """ 16 | 17 | def __init__(self, in_channels, out_channels, kernel_size, padding=0): 18 | super(ConvTBC, self).__init__() 19 | self.in_channels = in_channels 20 | self.out_channels = out_channels 21 | self.kernel_size = _single(kernel_size) 22 | self.padding = _single(padding) 23 | 24 | self.weight = torch.nn.Parameter( 25 | torch.Tensor(self.kernel_size[0], in_channels, out_channels) 26 | ) 27 | self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) 28 | 29 | def forward(self, input): 30 | return torch.conv_tbc( 31 | input.contiguous(), self.weight, self.bias, self.padding[0] 32 | ) 33 | 34 | def __repr__(self): 35 | s = ( 36 | "{name}({in_channels}, {out_channels}, kernel_size={kernel_size}" 37 | ", padding={padding}" 38 | ) 39 | if self.bias is None: 40 | s += ", bias=False" 41 | s += ")" 42 | return s.format(name=self.__class__.__name__, **self.__dict__) 43 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/cross_entropy.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import logging 7 | 8 | import torch 9 | import torch.nn.functional as F 10 | 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction="mean"): 16 | lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) 17 | return F.nll_loss( 18 | lprobs, 19 | target, 20 | ignore_index=ignore_index, 21 | reduction=reduction, 22 | ) 23 | 24 | 25 | try: 26 | import xentropy_cuda 27 | from apex.contrib import xentropy 28 | 29 | logger.info("using fused cross entropy") 30 | 31 | def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): 32 | if logits.device == torch.device("cpu"): 33 | return _cross_entropy_pytorch(logits, target, ignore_index, reduction) 34 | else: 35 | half_to_float = logits.dtype == torch.half 36 | losses = xentropy.SoftmaxCrossEntropyLoss.apply( 37 | logits, 38 | target, 39 | 0.0, 40 | ignore_index, 41 | half_to_float, 42 | ) 43 | if reduction == "sum": 44 | return losses.sum() 45 | elif reduction == "mean": 46 | if ignore_index >= 0: 47 | return losses.sum() / target.ne(ignore_index).sum() 48 | else: 49 | return losses.mean() 50 | elif reduction == "none": 51 | return losses 52 | else: 53 | raise NotImplementedError 54 | 55 | 56 | except ImportError: 57 | 58 | def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): 59 | return _cross_entropy_pytorch(logits, target, ignore_index, reduction) 60 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/dynamicconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .dynamicconv_layer import DynamicconvLayer # noqa 7 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | std::vector dynamicconv_cuda_forward( 12 | at::Tensor input, 13 | at::Tensor filters, 14 | int padding_l); 15 | 16 | std::vector dynamicconv_cuda_backward( 17 | at::Tensor gradOutput, 18 | int padding_l, 19 | at::Tensor input, 20 | at::Tensor filters); 21 | 22 | 23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 26 | 27 | std::vector dynamicconv_forward( 28 | at::Tensor input, 29 | at::Tensor filters, 30 | int padding_l) { 31 | 32 | CHECK_INPUT(input); 33 | CHECK_INPUT(filters); 34 | 35 | return dynamicconv_cuda_forward(input, filters, 36 | padding_l); 37 | } 38 | 39 | std::vector dynamicconv_backward( 40 | at::Tensor gradOutput, 41 | int padding_l, 42 | at::Tensor input, 43 | at::Tensor filters) { 44 | 45 | CHECK_INPUT(gradOutput); 46 | CHECK_INPUT(input); 47 | CHECK_INPUT(filters); 48 | 49 | return dynamicconv_cuda_backward(gradOutput, padding_l, 50 | input, filters); 51 | } 52 | 53 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 54 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)"); 55 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)"); 56 | } 57 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | #define SHFL_MASK 0xffffffff 27 | 28 | template 29 | __global__ 30 | void dynamicconv_forward_kernel(const scalar_t* input, 31 | const scalar_t* weight, 32 | int minibatch, 33 | int sequenceLength, 34 | int numFeatures, 35 | int numFiltersInBlock, 36 | int numHeads, 37 | scalar_t* output); 38 | 39 | template 40 | __global__ 41 | void dynamicconv_backward_kernel( 42 | const scalar_t* gradOutput, // B * C * T 43 | const scalar_t* input, // B * C * T 44 | const scalar_t* weight, 45 | int minibatch, 46 | int sequenceLength, 47 | int numFeatures, 48 | int numFiltersInBlock, 49 | int numHeads, 50 | scalar_t* gradWeight, 51 | scalar_t* gradInput); // B * H * k * T 52 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | std::vector dynamicconv_cpu_forward( 5 | float* input, 6 | float* filters, 7 | int padding_l); 8 | 9 | std::vector dynamicconv_cpu_backward( 10 | float* gradOutput, 11 | int padding_l, 12 | float* input, 13 | float* filters); 14 | 15 | std::vector dynamicconv_forward( 16 | float* input, 17 | float* filters, 18 | int padding_l) { 19 | 20 | return dynamicconv_cpu_forward(input, filters, padding_l); 21 | } 22 | 23 | std::vector dynamicconv_backward( 24 | float* gradOutput, 25 | int padding_l, 26 | float* input, 27 | float* filters) { 28 | 29 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters); 30 | } 31 | 32 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 33 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)"); 34 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)"); 35 | } 36 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/dynamicconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 9 | 10 | 11 | setup( 12 | name="dynamicconv_layer", 13 | ext_modules=[ 14 | CUDAExtension( 15 | name="dynamicconv_cuda", 16 | sources=[ 17 | "dynamicconv_cuda.cpp", 18 | "dynamicconv_cuda_kernel.cu", 19 | ], 20 | ), 21 | ], 22 | cmdclass={"build_ext": BuildExtension}, 23 | ) 24 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/fairseq_dropout.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import logging 7 | from typing import List, Optional 8 | 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | class FairseqDropout(nn.Module): 17 | def __init__(self, p, module_name=None): 18 | super().__init__() 19 | self.p = p 20 | self.module_name = module_name 21 | self.apply_during_inference = False 22 | 23 | def forward(self, x, inplace: bool = False): 24 | if self.training or self.apply_during_inference: 25 | return F.dropout(x, p=self.p, training=True, inplace=inplace) 26 | else: 27 | return x 28 | 29 | def make_generation_fast_( 30 | self, 31 | name: str, 32 | retain_dropout: bool = False, 33 | retain_dropout_modules: Optional[List[str]] = None, 34 | **kwargs 35 | ): 36 | if retain_dropout: 37 | if retain_dropout_modules is not None and self.module_name is None: 38 | logger.warning( 39 | "Cannot enable dropout during inference for module {} " 40 | "because module_name was not set".format(name) 41 | ) 42 | elif ( 43 | retain_dropout_modules is None # if None, apply to all modules 44 | or self.module_name in retain_dropout_modules 45 | ): 46 | logger.info( 47 | "Enabling dropout during inference for module: {}".format(name) 48 | ) 49 | self.apply_during_inference = True 50 | else: 51 | logger.info("Disabling dropout for module: {}".format(name)) 52 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/fp32_group_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | Layer norm done in fp32 (for fp16 training) 7 | """ 8 | 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | 12 | 13 | class Fp32GroupNorm(nn.GroupNorm): 14 | def __init__(self, *args, **kwargs): 15 | super().__init__(*args, **kwargs) 16 | 17 | def forward(self, input): 18 | output = F.group_norm( 19 | input.float(), 20 | self.num_groups, 21 | self.weight.float() if self.weight is not None else None, 22 | self.bias.float() if self.bias is not None else None, 23 | self.eps, 24 | ) 25 | return output.type_as(input) 26 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/gelu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with 7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs 8 | """ 9 | 10 | import math 11 | 12 | import torch 13 | import torch.nn as nn 14 | 15 | 16 | def gelu_accurate(x): 17 | if not hasattr(gelu_accurate, "_a"): 18 | gelu_accurate._a = math.sqrt(2 / math.pi) 19 | return ( 20 | 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) 21 | ) 22 | 23 | 24 | def gelu(x: torch.Tensor) -> torch.Tensor: 25 | return torch.nn.functional.gelu(x.float()).type_as(x) 26 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/grad_multiply.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class GradMultiply(torch.autograd.Function): 10 | @staticmethod 11 | def forward(ctx, x, scale): 12 | ctx.scale = scale 13 | res = x.new(x) 14 | return res 15 | 16 | @staticmethod 17 | def backward(ctx, grad): 18 | return grad * ctx.scale, None 19 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/layer_drop.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | LayerDrop as described in https://arxiv.org/abs/1909.11556. 7 | """ 8 | 9 | import torch 10 | import torch.nn as nn 11 | 12 | 13 | class LayerDropModuleList(nn.ModuleList): 14 | """ 15 | A LayerDrop implementation based on :class:`torch.nn.ModuleList`. 16 | 17 | We refresh the choice of which layers to drop every time we iterate 18 | over the LayerDropModuleList instance. During evaluation we always 19 | iterate over all layers. 20 | 21 | Usage:: 22 | 23 | layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3]) 24 | for layer in layers: # this might iterate over layers 1 and 3 25 | x = layer(x) 26 | for layer in layers: # this might iterate over all layers 27 | x = layer(x) 28 | for layer in layers: # this might not iterate over any layers 29 | x = layer(x) 30 | 31 | Args: 32 | p (float): probability of dropping out each layer 33 | modules (iterable, optional): an iterable of modules to add 34 | """ 35 | 36 | def __init__(self, p, modules=None): 37 | super().__init__(modules) 38 | self.p = p 39 | 40 | def __iter__(self): 41 | dropout_probs = torch.empty(len(self)).uniform_() 42 | for i, m in enumerate(super().__iter__()): 43 | if not self.training or (dropout_probs[i] > self.p): 44 | yield m 45 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/layer_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | 10 | 11 | try: 12 | from apex.normalization import FusedLayerNorm as _FusedLayerNorm 13 | 14 | has_fused_layernorm = True 15 | 16 | class FusedLayerNorm(_FusedLayerNorm): 17 | @torch.jit.unused 18 | def forward(self, x): 19 | if not x.is_cuda: 20 | return super().forward(x) 21 | else: 22 | with torch.cuda.device(x.device): 23 | return super().forward(x) 24 | 25 | 26 | except ImportError: 27 | has_fused_layernorm = False 28 | 29 | 30 | def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): 31 | if torch.jit.is_scripting(): 32 | export = True 33 | if not export and torch.cuda.is_available() and has_fused_layernorm: 34 | return FusedLayerNorm(normalized_shape, eps, elementwise_affine) 35 | return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) 36 | 37 | 38 | class Fp32LayerNorm(nn.LayerNorm): 39 | def __init__(self, *args, **kwargs): 40 | super().__init__(*args, **kwargs) 41 | 42 | def forward(self, input): 43 | output = F.layer_norm( 44 | input.float(), 45 | self.normalized_shape, 46 | self.weight.float() if self.weight is not None else None, 47 | self.bias.float() if self.bias is not None else None, 48 | self.eps, 49 | ) 50 | return output.type_as(input) 51 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/lightconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .lightconv_layer import LightconvLayer # noqa 7 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/lightconv_layer/lightconv_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | std::vector lightconv_cuda_forward( 12 | at::Tensor input, 13 | at::Tensor filters, 14 | int padding_l); 15 | 16 | std::vector lightconv_cuda_backward( 17 | at::Tensor gradOutput, 18 | int padding_l, 19 | at::Tensor input, 20 | at::Tensor filters); 21 | 22 | 23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 26 | 27 | std::vector lightconv_forward( 28 | at::Tensor input, 29 | at::Tensor filters, 30 | int padding_l) { 31 | 32 | CHECK_INPUT(input); 33 | CHECK_INPUT(filters); 34 | 35 | return lightconv_cuda_forward(input, filters, padding_l); 36 | } 37 | 38 | std::vector lightconv_backward( 39 | at::Tensor gradOutput, 40 | int padding_l, 41 | at::Tensor input, 42 | at::Tensor filters) { 43 | 44 | CHECK_INPUT(gradOutput); 45 | CHECK_INPUT(input); 46 | CHECK_INPUT(filters); 47 | 48 | return lightconv_cuda_backward(gradOutput, padding_l, input, filters); 49 | } 50 | 51 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 52 | m.def("forward", &lightconv_forward, "lighconv forward (CUDA)"); 53 | m.def("backward", &lightconv_backward, "lighconv backward (CUDA)"); 54 | } 55 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/lightconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 9 | 10 | 11 | setup( 12 | name="lightconv_layer", 13 | ext_modules=[ 14 | CUDAExtension( 15 | "lightconv_cuda", 16 | [ 17 | "lightconv_cuda.cpp", 18 | "lightconv_cuda_kernel.cu", 19 | ], 20 | ), 21 | ], 22 | cmdclass={"build_ext": BuildExtension}, 23 | ) 24 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/positional_embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | from .learned_positional_embedding import LearnedPositionalEmbedding 9 | from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding 10 | 11 | 12 | def PositionalEmbedding( 13 | num_embeddings: int, 14 | embedding_dim: int, 15 | padding_idx: int, 16 | learned: bool = False, 17 | ): 18 | if learned: 19 | # if padding_idx is specified then offset the embedding ids by 20 | # this index and adjust num_embeddings appropriately 21 | # TODO: The right place for this offset would be inside 22 | # LearnedPositionalEmbedding. Move this there for a cleaner implementation. 23 | if padding_idx is not None: 24 | num_embeddings = num_embeddings + padding_idx + 1 25 | m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) 26 | nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) 27 | if padding_idx is not None: 28 | nn.init.constant_(m.weight[padding_idx], 0) 29 | else: 30 | m = SinusoidalPositionalEmbedding( 31 | embedding_dim, 32 | padding_idx, 33 | init_size=num_embeddings + padding_idx + 1, 34 | ) 35 | return m 36 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/quantization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/machine_translation/fairseq/modules/quantization/__init__.py -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/quantization/pq/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import SizeTracker, quantize_model_ # NOQA 7 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/quantization/pq/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .qconv import PQConv2d # NOQA 7 | from .qemb import PQEmbedding # NOQA 8 | from .qlinear import PQLinear # NOQA 9 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/quantization/quantization_options.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | def parse_config_yaml(yaml_data): 8 | # Initialize to default options. 9 | quantization_options = { 10 | "n_centroids": { 11 | "Linear": ["in_features", {"*": 256}], 12 | "Embedding": ["embedding_dim", {"*": 256}], 13 | }, 14 | "block_sizes": { 15 | "Linear": ["fuzzy_name", {"fc": 8, "attn": 4, "emb": 4}], 16 | "Embedding": ["fuzzy_name", {"emb": 8}], 17 | }, 18 | "layers_to_quantize": [ 19 | "decoder\\.layers\\.\\d+\\.fc[12]", 20 | "decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01]", 21 | "decoder\\.layers\\.\\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj)", 22 | ], 23 | } 24 | 25 | if "n_centroids" in yaml_data: 26 | quantization_options["n_centroids"] = { 27 | layer: convert_yaml_to_tuple(layer_data) 28 | for layer, layer_data in yaml_data["n_centroids"].items() 29 | } 30 | if "block_sizes" in yaml_data: 31 | quantization_options["block_sizes"] = { 32 | layer: convert_yaml_to_tuple(layer_data) 33 | for layer, layer_data in yaml_data["block_sizes"].items() 34 | } 35 | if "layers_to_quantize" in yaml_data: 36 | quantization_options["layers_to_quantize"] = yaml_data["layers_to_quantize"] 37 | 38 | return quantization_options 39 | 40 | 41 | def convert_yaml_to_tuple(yaml_dictionary): 42 | """Converts a yaml dictionary with two keys: `key` and `value` into a two 43 | argument tuple of those values.""" 44 | return (yaml_dictionary["key"], yaml_dictionary["value"]) 45 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/quantization/scalar/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import quantize_model_ # NOQA 7 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/quantization/scalar/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .qact import ActivationQuantizer # NOQA 7 | from .qconv import IntConv2d # NOQA 8 | from .qemb import IntEmbedding # NOQA 9 | from .qlinear import IntLinear # NOQA 10 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/quantization/scalar/ops.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | def emulate_int(w, bits, method, scale=None, zero_point=None): 10 | q = globals()[f"emulate_int{bits}_{method}"] 11 | return q(w, scale=scale, zero_point=zero_point) 12 | 13 | 14 | def quantize(w, scale, zero_point): 15 | return ( 16 | torch.clamp(torch.round(w / scale + zero_point), 0, 255) - zero_point 17 | ) * scale 18 | 19 | 20 | def emulate_int8_histogram(w, scale=None, zero_point=None): 21 | if scale is None: 22 | obs = torch.quantization.observer.HistogramObserver() 23 | _ = obs(w.float()) 24 | scale, zero_point = obs.calculate_qparams() 25 | scale = scale.cuda().type_as(w) 26 | zero_point = zero_point.cuda().type_as(w) 27 | return quantize(w, scale, zero_point), scale, zero_point 28 | 29 | 30 | def emulate_int8_channel(w, scale=None, zero_point=None): 31 | if scale is None: 32 | obs = torch.quantization.observer.PerChannelMinMaxObserver( 33 | ch_axis=-1, qscheme=torch.per_channel_symmetric 34 | ) 35 | _ = obs(w) 36 | scale, zero_point, ch_axis = obs.get_qparams() 37 | scale = scale.cuda().type_as(w) 38 | zero_point = zero_point.cuda().type_as(w) 39 | return quantize(w, scale, zero_point), scale, zero_point 40 | 41 | 42 | def emulate_int8_tensor(w, scale=None, zero_point=None): 43 | if scale is None: 44 | obs = torch.quantization.observer.MinMaxObserver() 45 | _ = obs(w) 46 | scale, zero_point = obs.calculate_qparams() 47 | scale = scale.cuda().type_as(w) 48 | zero_point = zero_point.cuda().type_as(w) 49 | return quantize(w, scale, zero_point), scale, zero_point 50 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/same_pad.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from torch import nn 8 | 9 | 10 | class SamePad(nn.Module): 11 | def __init__(self, kernel_size): 12 | super().__init__() 13 | self.remove = kernel_size % 2 == 0 14 | 15 | def forward(self, x): 16 | if self.remove: 17 | x = x[:, :, :-1] 18 | return x 19 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/scalar_bias.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | # 6 | 7 | import torch 8 | 9 | 10 | class ScalarBias(torch.autograd.Function): 11 | """ 12 | Adds a vector of scalars, used in self-attention mechanism to allow 13 | the model to optionally attend to this vector instead of the past 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, input, dim, bias_init): 18 | size = list(input.size()) 19 | size[dim] += 1 20 | output = input.new(*size).fill_(bias_init) 21 | output.narrow(dim, 1, size[dim] - 1).copy_(input) 22 | ctx.dim = dim 23 | return output 24 | 25 | @staticmethod 26 | def backward(ctx, grad): 27 | return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None 28 | 29 | 30 | def scalar_bias(input, dim, bias_init=0): 31 | return ScalarBias.apply(input, dim, bias_init) 32 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/sparse_transformer_sentence_encoder_layer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.modules import TransformerSentenceEncoderLayer 7 | from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention 8 | 9 | 10 | class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): 11 | """ 12 | Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) 13 | """ 14 | 15 | def __init__( 16 | self, 17 | embedding_dim: int = 768, 18 | ffn_embedding_dim: int = 3072, 19 | num_attention_heads: int = 8, 20 | dropout: float = 0.1, 21 | attention_dropout: float = 0.1, 22 | activation_dropout: float = 0.1, 23 | activation_fn: str = "relu", 24 | export: bool = False, 25 | is_bidirectional: bool = True, 26 | stride: int = 32, 27 | expressivity: int = 8, 28 | ) -> None: 29 | 30 | super().__init__( 31 | embedding_dim, 32 | ffn_embedding_dim, 33 | num_attention_heads, 34 | dropout, 35 | attention_dropout, 36 | activation_dropout, 37 | activation_fn, 38 | export, 39 | ) 40 | 41 | self.self_attn = SparseMultiheadAttention( 42 | self.embedding_dim, 43 | num_attention_heads, 44 | dropout=attention_dropout, 45 | add_bias_kv=False, 46 | add_zero_attn=False, 47 | self_attention=True, 48 | is_bidirectional=is_bidirectional, 49 | stride=stride, 50 | expressivity=expressivity, 51 | ) 52 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/transpose_last.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | transpose last 2 dimensions of the input 7 | """ 8 | 9 | import torch.nn as nn 10 | 11 | 12 | class TransposeLast(nn.Module): 13 | def __init__(self, deconstruct_idx=None): 14 | super().__init__() 15 | self.deconstruct_idx = deconstruct_idx 16 | 17 | def forward(self, x): 18 | if self.deconstruct_idx is not None: 19 | x = x[self.deconstruct_idx] 20 | return x.transpose(-2, -1) 21 | -------------------------------------------------------------------------------- /machine_translation/fairseq/modules/unfold.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn.functional as F 7 | 8 | 9 | def unfold1d(x, kernel_size, padding_l, pad_value=0): 10 | """unfold T x B x C to T x B x C x K""" 11 | if kernel_size > 1: 12 | T, B, C = x.size() 13 | x = F.pad( 14 | x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value 15 | ) 16 | x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) 17 | else: 18 | x = x.unsqueeze(3) 19 | return x 20 | -------------------------------------------------------------------------------- /machine_translation/fairseq/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | import importlib 8 | import os 9 | from argparse import Namespace 10 | from typing import Union 11 | 12 | from fairseq import registry 13 | from fairseq.optim.bmuf import FairseqBMUF # noqa 14 | from fairseq.optim.fairseq_optimizer import ( # noqa 15 | FairseqOptimizer, 16 | LegacyFairseqOptimizer, 17 | ) 18 | from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer 19 | from fairseq.optim.shard import shard_ 20 | from omegaconf import DictConfig 21 | 22 | 23 | __all__ = [ 24 | "FairseqOptimizer", 25 | "FP16Optimizer", 26 | "MemoryEfficientFP16Optimizer", 27 | "shard_", 28 | ] 29 | 30 | 31 | ( 32 | _build_optimizer, 33 | register_optimizer, 34 | OPTIMIZER_REGISTRY, 35 | OPTIMIZER_DATACLASS_REGISTRY, 36 | ) = registry.setup_registry("--optimizer", base_class=FairseqOptimizer, required=True) 37 | 38 | 39 | def build_optimizer( 40 | optimizer_cfg: Union[DictConfig, Namespace], params, *extra_args, **extra_kwargs 41 | ): 42 | if all(isinstance(p, dict) for p in params): 43 | params = [t for p in params for t in p.values()] 44 | params = list(filter(lambda p: p.requires_grad, params)) 45 | return _build_optimizer(optimizer_cfg, params, *extra_args, **extra_kwargs) 46 | 47 | 48 | # automatically import any Python files in the optim/ directory 49 | for file in os.listdir(os.path.dirname(__file__)): 50 | if file.endswith(".py") and not file.startswith("_"): 51 | file_name = file[: file.find(".py")] 52 | importlib.import_module("fairseq.optim." + file_name) 53 | -------------------------------------------------------------------------------- /machine_translation/fairseq/optim/adagrad.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import LegacyFairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer("adagrad") 12 | class Adagrad(LegacyFairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 22 | help='weight decay') 23 | # fmt: on 24 | 25 | @property 26 | def optimizer_config(self): 27 | """ 28 | Return a kwarg dictionary that will be used to override optimizer 29 | args stored in checkpoints. This allows us to load a checkpoint and 30 | resume training using a different set of optimizer args, e.g., with a 31 | different learning rate. 32 | """ 33 | return { 34 | "lr": self.args.lr[0], 35 | "weight_decay": self.args.weight_decay, 36 | } 37 | 38 | @property 39 | def supports_flat_params(self): 40 | return True 41 | -------------------------------------------------------------------------------- /machine_translation/fairseq/optim/lr_scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | import importlib 8 | import os 9 | from argparse import Namespace 10 | from typing import Union 11 | 12 | from fairseq import registry 13 | from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import ( # noqa 14 | FairseqLRScheduler, 15 | LegacyFairseqLRScheduler, 16 | ) 17 | from omegaconf import DictConfig 18 | 19 | 20 | ( 21 | build_lr_scheduler_, 22 | register_lr_scheduler, 23 | LR_SCHEDULER_REGISTRY, 24 | LR_SCHEDULER_DATACLASS_REGISTRY, 25 | ) = registry.setup_registry( 26 | "--lr-scheduler", base_class=FairseqLRScheduler, default="fixed" 27 | ) 28 | 29 | 30 | def build_lr_scheduler(lr_scheduler_cfg: Union[DictConfig, Namespace], optimizer): 31 | return build_lr_scheduler_(lr_scheduler_cfg, optimizer) 32 | 33 | 34 | # automatically import any Python files in the optim/lr_scheduler/ directory 35 | for file in os.listdir(os.path.dirname(__file__)): 36 | if file.endswith(".py") and not file.startswith("_"): 37 | file_name = file[: file.find(".py")] 38 | importlib.import_module("fairseq.optim.lr_scheduler." + file_name) 39 | -------------------------------------------------------------------------------- /machine_translation/fairseq/optim/sgd.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import LegacyFairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer("sgd") 12 | class SGD(LegacyFairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.SGD(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--momentum', default=0.0, type=float, metavar='M', 22 | help='momentum factor') 23 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 24 | help='weight decay') 25 | # fmt: on 26 | 27 | @property 28 | def optimizer_config(self): 29 | """ 30 | Return a kwarg dictionary that will be used to override optimizer 31 | args stored in checkpoints. This allows us to load a checkpoint and 32 | resume training using a different set of optimizer args, e.g., with a 33 | different learning rate. 34 | """ 35 | return { 36 | "lr": self.args.lr[0], 37 | "momentum": self.args.momentum, 38 | "weight_decay": self.args.weight_decay, 39 | } 40 | 41 | @property 42 | def supports_flat_params(self): 43 | return True 44 | -------------------------------------------------------------------------------- /machine_translation/fairseq/optim/shard.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | try: 8 | from fairscale.optim import OSS 9 | 10 | _has_fairscale = True 11 | except ImportError: 12 | _has_fairscale = False 13 | 14 | 15 | def shard_(args, optimizer, group): 16 | if not _has_fairscale: 17 | raise ImportError( 18 | "\n\nPlease install the fairscale package:" "\n\n pip install fairscale" 19 | ) 20 | 21 | class FairseqOSS(OSS): 22 | @property 23 | def disable_mem_eff_fp16_loading_hack(self): 24 | return True 25 | 26 | def __getattr__(self, name): 27 | if name.startswith("supports") and hasattr(self.optim, name): 28 | return getattr(self.optim, name) 29 | raise AttributeError( 30 | "'FairseqOSS' object has no attribute {0!r}".format(name) 31 | ) 32 | 33 | torch_optimizer = optimizer.optimizer 34 | optim_cls = type(torch_optimizer) 35 | 36 | optimizer.optimizer = FairseqOSS( 37 | torch_optimizer.param_groups, 38 | optim_cls, 39 | group=group, 40 | **optimizer.optimizer_config 41 | ) 42 | -------------------------------------------------------------------------------- /machine_translation/fairseq/pdb.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import multiprocessing 7 | import os 8 | import pdb 9 | import sys 10 | 11 | 12 | __all__ = ["set_trace"] 13 | 14 | 15 | _stdin = [None] 16 | _stdin_lock = multiprocessing.Lock() 17 | try: 18 | _stdin_fd = sys.stdin.fileno() 19 | except Exception: 20 | _stdin_fd = None 21 | 22 | 23 | class MultiprocessingPdb(pdb.Pdb): 24 | """A Pdb wrapper that works in a multiprocessing environment. 25 | 26 | Usage: `from fairseq import pdb; pdb.set_trace()` 27 | """ 28 | 29 | def __init__(self): 30 | pdb.Pdb.__init__(self, nosigint=True) 31 | 32 | def _cmdloop(self): 33 | stdin_bak = sys.stdin 34 | with _stdin_lock: 35 | try: 36 | if _stdin_fd is not None: 37 | if not _stdin[0]: 38 | _stdin[0] = os.fdopen(_stdin_fd) 39 | sys.stdin = _stdin[0] 40 | self.cmdloop() 41 | finally: 42 | sys.stdin = stdin_bak 43 | 44 | 45 | def set_trace(): 46 | pdb = MultiprocessingPdb() 47 | pdb.set_trace(sys._getframe().f_back) 48 | -------------------------------------------------------------------------------- /machine_translation/fairseq/scoring/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import importlib 8 | import os 9 | from abc import ABC, abstractmethod 10 | 11 | from fairseq import registry 12 | 13 | 14 | class BaseScorer(ABC): 15 | def __init__(self, args): 16 | self.args = args 17 | self.ref = [] 18 | self.pred = [] 19 | 20 | @staticmethod 21 | def add_args(parser): 22 | pass 23 | 24 | def add_string(self, ref, pred): 25 | self.ref.append(ref) 26 | self.pred.append(pred) 27 | 28 | @abstractmethod 29 | def score(self) -> float: 30 | pass 31 | 32 | @abstractmethod 33 | def result_string(self) -> str: 34 | pass 35 | 36 | 37 | _build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry( 38 | "--scoring", default="bleu" 39 | ) 40 | 41 | 42 | def build_scorer(args, tgt_dict): 43 | from fairseq import utils 44 | 45 | if args.sacrebleu: 46 | utils.deprecation_warning( 47 | "--sacrebleu is deprecated. Please use --scoring sacrebleu instead." 48 | ) 49 | args.scoring = "sacrebleu" 50 | if args.scoring == "bleu": 51 | from fairseq.scoring import bleu 52 | 53 | return bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk()) 54 | return _build_scorer(args) 55 | 56 | 57 | # automatically import any Python files in the current directory 58 | for file in os.listdir(os.path.dirname(__file__)): 59 | if file.endswith(".py") and not file.startswith("_"): 60 | module = file[: file.find(".py")] 61 | importlib.import_module("fairseq.scoring." + module) 62 | -------------------------------------------------------------------------------- /machine_translation/fairseq/scoring/chrf.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.scoring import BaseScorer, register_scorer 7 | 8 | 9 | @register_scorer("chrf") 10 | class ChrFScorer(BaseScorer): 11 | def __init__(self, args): 12 | super(ChrFScorer, self).__init__(args) 13 | import sacrebleu 14 | 15 | self.sacrebleu = sacrebleu 16 | 17 | def add_string(self, ref, pred): 18 | self.ref.append(ref) 19 | self.pred.append(pred) 20 | 21 | def score(self, order=4): 22 | return self.result_string(order).score 23 | 24 | def result_string(self, order=4): 25 | if order != 4: 26 | raise NotImplementedError 27 | return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format() 28 | -------------------------------------------------------------------------------- /machine_translation/fairseq/tasks/translation_from_pretrained_xlm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary 7 | from fairseq.tasks.translation import TranslationTask 8 | 9 | from . import register_task 10 | 11 | 12 | @register_task("translation_from_pretrained_xlm") 13 | class TranslationFromPretrainedXLMTask(TranslationTask): 14 | """ 15 | Same as TranslationTask except use the MaskedLMDictionary class so that 16 | we can load data that was binarized with the MaskedLMDictionary class. 17 | 18 | This task should be used for the entire training pipeline when we want to 19 | train an NMT model from a pretrained XLM checkpoint: binarizing NMT data, 20 | training NMT with the pretrained XLM checkpoint, and subsequent evaluation 21 | of that trained model. 22 | """ 23 | 24 | @classmethod 25 | def load_dictionary(cls, filename): 26 | """Load the masked LM dictionary from the filename 27 | 28 | Args: 29 | filename (str): the filename 30 | """ 31 | return MaskedLMDictionary.load(filename) 32 | -------------------------------------------------------------------------------- /machine_translation/fairseq/tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | 9 | SPACE_NORMALIZER = re.compile(r"\s+") 10 | 11 | 12 | def tokenize_line(line): 13 | line = SPACE_NORMALIZER.sub(" ", line) 14 | line = line.strip() 15 | return line.split() 16 | -------------------------------------------------------------------------------- /machine_translation/fairseq_cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/machine_translation/fairseq_cli/__init__.py -------------------------------------------------------------------------------- /machine_translation/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead. 8 | """ 9 | 10 | from fairseq_cli.train import cli_main 11 | 12 | 13 | if __name__ == "__main__": 14 | cli_main() 15 | -------------------------------------------------------------------------------- /poster.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/poster.pdf -------------------------------------------------------------------------------- /preprocess.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from fairseq_cli.preprocess import cli_main 8 | 9 | 10 | if __name__ == '__main__': 11 | cli_main() 12 | -------------------------------------------------------------------------------- /slides.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/princeton-nlp/TRIME/3350ca9df9c423ee738ecc112787eae87b574300/slides.pdf -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from fairseq_cli.train import cli_main 8 | 9 | 10 | if __name__ == '__main__': 11 | cli_main() 12 | -------------------------------------------------------------------------------- /train_scripts/enwik8-38M-trime.sh: -------------------------------------------------------------------------------- 1 | python train.py --task language_modeling data-bin/enwik8 \ 2 | --save-dir output/enwik8-38M-trime \ 3 | --arch transformer_lm_enwik8 \ 4 | --criterion trime_loss \ 5 | --optimizer adam --adam-betas "(0.9, 0.98)" --weight-decay 0.01 \ 6 | --clip-norm 0.0 --max-update 400000 --max-lr 0.00025 --lr 0.0 \ 7 | --lr-scheduler cosine --warmup-updates 0 \ 8 | --max-tokens 12288 --update-freq 1 --tokens-per-sample 512 \ 9 | --seed 1 --sample-break-mode none --skip-invalid-size-inputs-valid-test \ 10 | --ddp-backend=no_c10d --adaptive-input --tie-adaptive-weights --adaptive-input-cutoff 208 --adaptive-softmax-cutoff 208 \ 11 | --knn-keytype last_ffn_input --adaptive-softmax-factor 1 --adaptive-input-factor 1 --fp16 \ 12 | --ce-warmup-epoch 3 -------------------------------------------------------------------------------- /train_scripts/enwik8-38M-trime_long.sh: -------------------------------------------------------------------------------- 1 | python train.py --task language_modeling data-bin/enwik8 \ 2 | --save-dir output/enwik8-38M-trime_long \ 3 | --arch transformer_lm_enwik8 \ 4 | --criterion trime_long_loss_same_device \ 5 | --optimizer adam --adam-betas "(0.9, 0.98)" --weight-decay 0.01 \ 6 | --clip-norm 0.0 --max-update 400000 --max-lr 0.00025 --lr 0.0 \ 7 | --lr-scheduler cosine --warmup-updates 0 \ 8 | --max-tokens 12288 --update-freq 1 --tokens-per-sample 512 \ 9 | --seed 1 --sample-break-mode none --skip-invalid-size-inputs-valid-test \ 10 | --ddp-backend=no_c10d --adaptive-input --tie-adaptive-weights --adaptive-input-cutoff 208 --adaptive-softmax-cutoff 208 \ 11 | --knn-keytype last_ffn_input --adaptive-softmax-factor 1 --adaptive-input-factor 1 --fp16 \ 12 | --ce-warmup-epoch 3 --keep-order 13 | -------------------------------------------------------------------------------- /train_scripts/wiki103-150M-trime.sh: -------------------------------------------------------------------------------- 1 | python train.py --task language_modeling data-bin/wikitext-103 \ 2 | --save-dir output/wiki103-150M-trime \ 3 | --arch transformer_lm_wiki103_150M \ 4 | --criterion trime_loss \ 5 | --optimizer adam --adam-betas "(0.9, 0.98)" --weight-decay 0.01 --clip-norm 0.0 \ 6 | --max-update 200000 --lr 0.0005 --lr-scheduler inverse_sqrt \ 7 | --warmup-updates 8000 --warmup-init-lr 1e-07 \ 8 | --max-tokens 9000 --update-freq 2 --tokens-per-sample 150 \ 9 | --seed 1 --sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --adaptive-input --tie-adaptive-weights --adaptive-input-cutoff 20000,60000 --adaptive-softmax-cutoff 20000,60000 \ 10 | --knn-keytype last_ffn_input --fp16 \ 11 | --ce-warmup-epoch 3 --required-batch-size-multiple 1 --adaptive-softmax-factor 1 --adaptive-input-factor 1 \ 12 | -------------------------------------------------------------------------------- /train_scripts/wiki103-150M-trime_long.sh: -------------------------------------------------------------------------------- 1 | python train.py --task language_modeling data-bin/wikitext-103 \ 2 | --save-dir output/wiki103-150M-trime_long \ 3 | --arch transformer_lm_wiki103_150M \ 4 | --criterion trime_long_loss_same_device \ 5 | --optimizer adam --adam-betas "(0.9, 0.98)" --weight-decay 0.01 --clip-norm 0.0 \ 6 | --max-update 200000 --lr 0.0005 --lr-scheduler inverse_sqrt \ 7 | --warmup-updates 8000 --warmup-init-lr 1e-07 \ 8 | --max-tokens 9000 --update-freq 2 --tokens-per-sample 150 \ 9 | --seed 1 --sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --adaptive-input --tie-adaptive-weights --adaptive-input-cutoff 20000,60000 --adaptive-softmax-cutoff 20000,60000 \ 10 | --knn-keytype last_ffn_input --fp16 \ 11 | --ce-warmup-epoch 3 --required-batch-size-multiple 1 --adaptive-softmax-factor 1 --adaptive-input-factor 1 \ 12 | --keep-order -------------------------------------------------------------------------------- /train_scripts/wiki103-247M-trime.sh: -------------------------------------------------------------------------------- 1 | python train.py --task language_modeling data-bin/wikitext-103 \ 2 | --save-dir output/wiki103-247M-trime \ 3 | --arch transformer_lm_wiki103 \ 4 | --max-update 286000 --max-lr 1.0 --t-mult 2 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 \ 5 | --warmup-updates 16000 --warmup-init-lr 1e-07 --min-lr 1e-09 --optimizer nag --lr 0.0001 --clip-norm 0.1 \ 6 | --criterion trime_loss --max-tokens 3072 --update-freq 6 --tokens-per-sample 3072 --seed 1 \ 7 | --sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --knn-keytype last_ffn_input --fp16 \ 8 | --ce-warmup-epoch 9 -------------------------------------------------------------------------------- /train_scripts/wiki103-247M-trime_ext.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | python train.py --task language_modeling data-bin/wikitext-103 \ 4 | --save-dir output/wiki103-247M-trime_ext \ 5 | --arch transformer_lm_wiki103 \ 6 | --max-update 286000 --max-lr 1.0 --t-mult 2 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 \ 7 | --warmup-updates 16000 --warmup-init-lr 1e-07 --min-lr 1e-09 --optimizer nag --lr 0.0001 --clip-norm 0.1 \ 8 | --criterion trime_ext_loss --max-tokens 3072 --update-freq 6 --tokens-per-sample 3072 --seed 1 \ 9 | --sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --knn-keytype last_ffn_input --fp16 \ 10 | --ce-warmup-epoch 9 --cross-sent-ratio 0.9 \ 11 | --predefined-batches data-bin/wikitext-103/wiki103-l3072-batches.json -------------------------------------------------------------------------------- /train_scripts/wiki103-247M-trime_long.sh: -------------------------------------------------------------------------------- 1 | python train.py --task language_modeling data-bin/wikitext-103 \ 2 | --save-dir output/wiki103-247M-trime_long \ 3 | --arch transformer_lm_wiki103 \ 4 | --max-update 286000 --max-lr 1.0 --t-mult 2 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 \ 5 | --warmup-updates 16000 --warmup-init-lr 1e-07 --min-lr 1e-09 --optimizer nag --lr 0.0001 --clip-norm 0.1 \ 6 | --criterion trime_long_loss --max-tokens 3072 --update-freq 6 --tokens-per-sample 3072 --seed 1 \ 7 | --sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=no_c10d --knn-keytype last_ffn_input --fp16 \ 8 | --ce-warmup-epoch 9 --keep-order --train-mem-size 12288 9 | --------------------------------------------------------------------------------