├── fairseq
├── data
│ ├── audio
│ │ ├── __init__.py
│ │ └── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ └── raw_audio_dataset.cpython-36.pyc
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── iterators.cpython-36.pyc
│ │ ├── noising.cpython-36.pyc
│ │ ├── data_utils.cpython-36.pyc
│ │ ├── dictionary.cpython-36.pyc
│ │ ├── id_dataset.cpython-36.pyc
│ │ ├── pad_dataset.cpython-36.pyc
│ │ ├── concat_dataset.cpython-36.pyc
│ │ ├── list_dataset.cpython-36.pyc
│ │ ├── numel_dataset.cpython-36.pyc
│ │ ├── plasma_utils.cpython-36.pyc
│ │ ├── sort_dataset.cpython-36.pyc
│ │ ├── fairseq_dataset.cpython-36.pyc
│ │ ├── indexed_dataset.cpython-36.pyc
│ │ ├── prepend_dataset.cpython-36.pyc
│ │ ├── replace_dataset.cpython-36.pyc
│ │ ├── sharded_dataset.cpython-36.pyc
│ │ ├── truncate_dataset.cpython-36.pyc
│ │ ├── lru_cache_dataset.cpython-36.pyc
│ │ ├── mask_tokens_dataset.cpython-36.pyc
│ │ ├── monolingual_dataset.cpython-36.pyc
│ │ ├── num_samples_dataset.cpython-36.pyc
│ │ ├── raw_label_dataset.cpython-36.pyc
│ │ ├── strip_token_dataset.cpython-36.pyc
│ │ ├── subsample_dataset.cpython-36.pyc
│ │ ├── token_block_dataset.cpython-36.pyc
│ │ ├── base_wrapper_dataset.cpython-36.pyc
│ │ ├── language_pair_dataset.cpython-36.pyc
│ │ ├── offset_tokens_dataset.cpython-36.pyc
│ │ ├── prepend_token_dataset.cpython-36.pyc
│ │ ├── transform_eos_dataset.cpython-36.pyc
│ │ ├── backtranslation_dataset.cpython-36.pyc
│ │ ├── concat_sentences_dataset.cpython-36.pyc
│ │ ├── round_robin_zip_datasets.cpython-36.pyc
│ │ ├── lm_context_window_dataset.cpython-36.pyc
│ │ ├── nested_dictionary_dataset.cpython-36.pyc
│ │ ├── multi_corpus_sampled_dataset.cpython-36.pyc
│ │ └── transform_eos_lang_pair_dataset.cpython-36.pyc
│ ├── encoders
│ │ ├── __pycache__
│ │ │ ├── fastbpe.cpython-36.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── gpt2_bpe.cpython-36.pyc
│ │ │ ├── hf_bert_bpe.cpython-36.pyc
│ │ │ ├── gpt2_bpe_utils.cpython-36.pyc
│ │ │ ├── moses_tokenizer.cpython-36.pyc
│ │ │ ├── nltk_tokenizer.cpython-36.pyc
│ │ │ ├── space_tokenizer.cpython-36.pyc
│ │ │ ├── subword_nmt_bpe.cpython-36.pyc
│ │ │ └── sentencepiece_bpe.cpython-36.pyc
│ │ ├── space_tokenizer.py
│ │ ├── nltk_tokenizer.py
│ │ ├── __init__.py
│ │ ├── sentencepiece_bpe.py
│ │ ├── fastbpe.py
│ │ ├── gpt2_bpe.py
│ │ ├── subword_nmt_bpe.py
│ │ ├── hf_bert_bpe.py
│ │ └── moses_tokenizer.py
│ ├── legacy
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── masked_lm_dataset.cpython-36.pyc
│ │ │ ├── block_pair_dataset.cpython-36.pyc
│ │ │ └── masked_lm_dictionary.cpython-36.pyc
│ │ ├── __init__.py
│ │ └── masked_lm_dictionary.py
│ ├── data_utils_fast.cpython-36m-x86_64-linux-gnu.so
│ ├── token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so
│ ├── num_samples_dataset.py
│ ├── id_dataset.py
│ ├── offset_tokens_dataset.py
│ ├── strip_token_dataset.py
│ ├── raw_label_dataset.py
│ ├── lru_cache_dataset.py
│ ├── sort_dataset.py
│ ├── list_dataset.py
│ ├── numel_dataset.py
│ ├── replace_dataset.py
│ ├── pad_dataset.py
│ ├── truncate_dataset.py
│ ├── prepend_dataset.py
│ ├── prepend_token_dataset.py
│ ├── base_wrapper_dataset.py
│ ├── concat_sentences_dataset.py
│ ├── fairseq_dataset.py
│ ├── subsample_dataset.py
│ ├── sharded_dataset.py
│ ├── data_utils_fast.pyx
│ ├── transform_eos_lang_pair_dataset.py
│ ├── plasma_utils.py
│ ├── lm_context_window_dataset.py
│ └── __init__.py
├── __pycache__
│ ├── bleu.cpython-36.pyc
│ ├── pdb.cpython-36.pyc
│ ├── meters.cpython-36.pyc
│ ├── search.cpython-36.pyc
│ ├── utils.cpython-36.pyc
│ ├── __init__.cpython-36.pyc
│ ├── binarizer.cpython-36.pyc
│ ├── options.cpython-36.pyc
│ ├── registry.cpython-36.pyc
│ ├── tokenizer.cpython-36.pyc
│ ├── trainer.cpython-36.pyc
│ ├── file_utils.cpython-36.pyc
│ ├── progress_bar.cpython-36.pyc
│ ├── checkpoint_utils.cpython-36.pyc
│ ├── distributed_utils.cpython-36.pyc
│ ├── sequence_generator.cpython-36.pyc
│ └── legacy_distributed_data_parallel.cpython-36.pyc
├── optim
│ ├── __pycache__
│ │ ├── nag.cpython-36.pyc
│ │ ├── sgd.cpython-36.pyc
│ │ ├── adam.cpython-36.pyc
│ │ ├── bmuf.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── adadelta.cpython-36.pyc
│ │ ├── adagrad.cpython-36.pyc
│ │ ├── adamax.cpython-36.pyc
│ │ ├── adafactor.cpython-36.pyc
│ │ ├── fp16_optimizer.cpython-36.pyc
│ │ └── fairseq_optimizer.cpython-36.pyc
│ ├── lr_scheduler
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── fixed_schedule.cpython-36.pyc
│ │ │ ├── cosine_lr_scheduler.cpython-36.pyc
│ │ │ ├── fairseq_lr_scheduler.cpython-36.pyc
│ │ │ ├── reduce_lr_on_plateau.cpython-36.pyc
│ │ │ ├── tri_stage_lr_scheduler.cpython-36.pyc
│ │ │ ├── polynomial_decay_schedule.cpython-36.pyc
│ │ │ ├── triangular_lr_scheduler.cpython-36.pyc
│ │ │ └── inverse_square_root_schedule.cpython-36.pyc
│ │ ├── __init__.py
│ │ ├── fairseq_lr_scheduler.py
│ │ ├── reduce_lr_on_plateau.py
│ │ ├── fixed_schedule.py
│ │ ├── triangular_lr_scheduler.py
│ │ ├── inverse_square_root_schedule.py
│ │ └── polynomial_decay_schedule.py
│ ├── __init__.py
│ ├── adagrad.py
│ ├── sgd.py
│ ├── adadelta.py
│ └── nag.py
├── models
│ ├── __pycache__
│ │ ├── lstm.cpython-36.pyc
│ │ ├── fconv.cpython-36.pyc
│ │ ├── wav2vec.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── fconv_lm.cpython-36.pyc
│ │ ├── lightconv.cpython-36.pyc
│ │ ├── masked_lm.cpython-36.pyc
│ │ ├── lightconv_lm.cpython-36.pyc
│ │ ├── transformer.cpython-36.pyc
│ │ ├── fairseq_model.cpython-36.pyc
│ │ ├── fconv_self_att.cpython-36.pyc
│ │ ├── transformer_bk.cpython-36.pyc
│ │ ├── transformer_lm.cpython-36.pyc
│ │ ├── composite_encoder.cpython-36.pyc
│ │ ├── fairseq_decoder.cpython-36.pyc
│ │ ├── fairseq_encoder.cpython-36.pyc
│ │ ├── multilingual_transformer.cpython-36.pyc
│ │ ├── distributed_fairseq_model.cpython-36.pyc
│ │ ├── fairseq_incremental_decoder.cpython-36.pyc
│ │ └── transformer_from_pretrained_xlm.cpython-36.pyc
│ ├── roberta
│ │ ├── __pycache__
│ │ │ ├── model.cpython-36.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ └── hub_interface.cpython-36.pyc
│ │ └── __init__.py
│ ├── fairseq_encoder.py
│ ├── composite_encoder.py
│ ├── distributed_fairseq_model.py
│ └── fairseq_decoder.py
├── libbleu.cpython-36m-x86_64-linux-gnu.so
├── modules
│ ├── __pycache__
│ │ ├── gelu.cpython-36.pyc
│ │ ├── unfold.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── conv_tbc.cpython-36.pyc
│ │ ├── highway.cpython-36.pyc
│ │ ├── vggblock.cpython-36.pyc
│ │ ├── beamable_mm.cpython-36.pyc
│ │ ├── layer_norm.cpython-36.pyc
│ │ ├── scalar_bias.cpython-36.pyc
│ │ ├── grad_multiply.cpython-36.pyc
│ │ ├── logsumexp_moe.cpython-36.pyc
│ │ ├── adaptive_input.cpython-36.pyc
│ │ ├── adaptive_softmax.cpython-36.pyc
│ │ ├── transformer_layer.cpython-36.pyc
│ │ ├── dynamic_convolution.cpython-36.pyc
│ │ ├── multihead_attention.cpython-36.pyc
│ │ ├── positional_embedding.cpython-36.pyc
│ │ ├── lightweight_convolution.cpython-36.pyc
│ │ ├── linearized_convolution.cpython-36.pyc
│ │ ├── character_token_embedder.cpython-36.pyc
│ │ ├── mean_pool_gating_network.cpython-36.pyc
│ │ ├── learned_positional_embedding.cpython-36.pyc
│ │ ├── transformer_sentence_encoder.cpython-36.pyc
│ │ ├── downsampled_multihead_attention.cpython-36.pyc
│ │ ├── sinusoidal_positional_embedding.cpython-36.pyc
│ │ └── transformer_sentence_encoder_layer.cpython-36.pyc
│ ├── lightconv_layer
│ │ ├── __init__.py
│ │ ├── setup.py
│ │ ├── lightconv_cuda.cpp
│ │ └── lightconv_cuda.cuh
│ ├── dynamicconv_layer
│ │ ├── __init__.py
│ │ ├── setup.py
│ │ ├── dynamiconv_cpu.cpp
│ │ ├── dynamicconv_cuda.cuh
│ │ └── dynamicconv_cuda.cpp
│ ├── grad_multiply.py
│ ├── unfold.py
│ ├── layer_norm.py
│ ├── gelu.py
│ ├── logsumexp_moe.py
│ ├── scalar_bias.py
│ ├── positional_embedding.py
│ ├── conv_tbc.py
│ ├── sparse_transformer_sentence_encoder_layer.py
│ ├── highway.py
│ ├── beamable_mm.py
│ ├── learned_positional_embedding.py
│ ├── mean_pool_gating_network.py
│ ├── __init__.py
│ ├── adaptive_input.py
│ ├── transformer_sentence_encoder_layer.py
│ └── sparse_transformer_sentence_encoder.py
├── tasks
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── masked_lm.cpython-36.pyc
│ │ ├── fairseq_task.cpython-36.pyc
│ │ ├── translation.cpython-36.pyc
│ │ ├── translation_moe.cpython-36.pyc
│ │ ├── audio_pretraining.cpython-36.pyc
│ │ ├── cross_lingual_lm.cpython-36.pyc
│ │ ├── language_modeling.cpython-36.pyc
│ │ ├── legacy_masked_lm.cpython-36.pyc
│ │ ├── sentence_ranking.cpython-36.pyc
│ │ ├── sentence_prediction.cpython-36.pyc
│ │ ├── multilingual_translation.cpython-36.pyc
│ │ ├── semisupervised_translation.cpython-36.pyc
│ │ └── translation_from_pretrained_xlm.cpython-36.pyc
│ ├── translation_from_pretrained_xlm.py
│ ├── audio_pretraining.py
│ └── __init__.py
├── criterions
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── masked_lm.cpython-36.pyc
│ │ ├── adaptive_loss.cpython-36.pyc
│ │ ├── cross_entropy.cpython-36.pyc
│ │ ├── composite_loss.cpython-36.pyc
│ │ ├── fairseq_criterion.cpython-36.pyc
│ │ ├── legacy_masked_lm.cpython-36.pyc
│ │ ├── sentence_ranking.cpython-36.pyc
│ │ ├── binary_cross_entropy.cpython-36.pyc
│ │ ├── sentence_prediction.cpython-36.pyc
│ │ └── label_smoothed_cross_entropy.cpython-36.pyc
│ ├── __init__.py
│ ├── fairseq_criterion.py
│ ├── masked_lm.py
│ ├── binary_cross_entropy.py
│ └── cross_entropy.py
├── trans_image_embedding.py
├── tokenizer.py
├── __init__.py
├── clib
│ └── libbleu
│ │ ├── module.cpp
│ │ └── libbleu.cpp
├── pdb.py
├── meters.py
├── binarizer.py
└── registry.py
├── .gitignore
├── .idea
├── encodings.xml
├── vcs.xml
├── modules.xml
├── misc.xml
├── deployment.xml
└── uvm-nmt.iml
├── sh_en2de_map.sh
├── hubconf.py
├── training_wmt16_en2ro.sh
├── training_wmt14_en2de.sh
└── score.py
/fairseq/data/audio/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | .idea/workspace.xml
3 | .idea/workspace.xml
4 |
--------------------------------------------------------------------------------
/fairseq/__pycache__/bleu.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/bleu.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/pdb.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/pdb.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/meters.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/meters.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/search.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/search.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/utils.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/binarizer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/binarizer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/options.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/options.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/registry.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/registry.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/tokenizer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/tokenizer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/trainer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/trainer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/nag.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/nag.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/sgd.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/sgd.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/file_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/file_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/lstm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/lstm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/adam.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/adam.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/bmuf.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/bmuf.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/progress_bar.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/progress_bar.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/iterators.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/iterators.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/noising.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/noising.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/libbleu.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/libbleu.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/fconv.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/fconv.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/wav2vec.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/wav2vec.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/gelu.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/gelu.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/unfold.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/unfold.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/adadelta.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/adadelta.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/adagrad.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/adagrad.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/adamax.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/adamax.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/checkpoint_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/checkpoint_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/data_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/data_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/dictionary.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/dictionary.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/id_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/id_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/pad_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/pad_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/fconv_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/fconv_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/lightconv.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/lightconv.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/masked_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/masked_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/conv_tbc.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/conv_tbc.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/highway.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/highway.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/vggblock.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/vggblock.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/adafactor.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/adafactor.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/masked_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/masked_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/distributed_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/distributed_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/__pycache__/sequence_generator.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/sequence_generator.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/concat_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/concat_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/list_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/list_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/numel_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/numel_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/plasma_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/plasma_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/sort_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/sort_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/audio/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/audio/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/lightconv_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/lightconv_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/transformer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/transformer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/beamable_mm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/beamable_mm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/layer_norm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/layer_norm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/scalar_bias.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/scalar_bias.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/fairseq_task.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/fairseq_task.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/translation.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/translation.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/masked_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/masked_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/fairseq_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/fairseq_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/indexed_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/indexed_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/prepend_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/prepend_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/replace_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/replace_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/sharded_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/sharded_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/truncate_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/truncate_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/fastbpe.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/fastbpe.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/legacy/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/legacy/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/fairseq_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/fairseq_model.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/fconv_self_att.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/fconv_self_att.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/transformer_bk.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/transformer_bk.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/transformer_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/transformer_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/roberta/__pycache__/model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/roberta/__pycache__/model.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/grad_multiply.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/grad_multiply.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/logsumexp_moe.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/logsumexp_moe.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/fp16_optimizer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/fp16_optimizer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/translation_moe.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/translation_moe.cpython-36.pyc
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/adaptive_loss.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/adaptive_loss.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/cross_entropy.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/cross_entropy.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/lru_cache_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/lru_cache_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/mask_tokens_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/mask_tokens_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/monolingual_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/monolingual_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/num_samples_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/num_samples_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/raw_label_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/raw_label_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/strip_token_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/strip_token_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/subsample_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/subsample_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/token_block_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/token_block_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/gpt2_bpe.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/gpt2_bpe.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/composite_encoder.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/composite_encoder.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/fairseq_decoder.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/fairseq_decoder.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/fairseq_encoder.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/fairseq_encoder.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/roberta/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/roberta/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/adaptive_input.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/adaptive_input.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/adaptive_softmax.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/adaptive_softmax.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/__pycache__/fairseq_optimizer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/__pycache__/fairseq_optimizer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/audio_pretraining.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/audio_pretraining.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/cross_lingual_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/cross_lingual_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/language_modeling.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/language_modeling.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/legacy_masked_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/legacy_masked_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/sentence_ranking.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/sentence_ranking.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/composite_loss.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/composite_loss.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/base_wrapper_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/base_wrapper_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/language_pair_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/language_pair_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/offset_tokens_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/offset_tokens_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/prepend_token_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/prepend_token_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/transform_eos_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/transform_eos_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/data_utils_fast.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/data_utils_fast.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/hf_bert_bpe.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/hf_bert_bpe.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/transformer_layer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/transformer_layer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/sentence_prediction.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/sentence_prediction.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/fairseq_criterion.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/fairseq_criterion.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/legacy_masked_lm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/legacy_masked_lm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/sentence_ranking.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/sentence_ranking.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/backtranslation_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/backtranslation_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/concat_sentences_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/concat_sentences_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/round_robin_zip_datasets.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/round_robin_zip_datasets.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/audio/__pycache__/raw_audio_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/audio/__pycache__/raw_audio_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/gpt2_bpe_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/gpt2_bpe_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/moses_tokenizer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/moses_tokenizer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/nltk_tokenizer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/nltk_tokenizer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/space_tokenizer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/space_tokenizer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/subword_nmt_bpe.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/subword_nmt_bpe.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/legacy/__pycache__/masked_lm_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/legacy/__pycache__/masked_lm_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/roberta/__pycache__/hub_interface.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/roberta/__pycache__/hub_interface.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/dynamic_convolution.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/dynamic_convolution.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/multihead_attention.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/multihead_attention.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/positional_embedding.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/positional_embedding.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/binary_cross_entropy.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/binary_cross_entropy.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/sentence_prediction.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/sentence_prediction.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/lm_context_window_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/lm_context_window_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/nested_dictionary_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/nested_dictionary_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/encoders/__pycache__/sentencepiece_bpe.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/encoders/__pycache__/sentencepiece_bpe.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/legacy/__pycache__/block_pair_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/legacy/__pycache__/block_pair_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/multilingual_transformer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/multilingual_transformer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/lightweight_convolution.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/lightweight_convolution.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/linearized_convolution.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/linearized_convolution.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/multilingual_translation.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/multilingual_translation.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/trans_image_embedding.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | image_embedding_file = "features_resnet50/train-resnet50-avgpool.npy"
4 | embeding_weights = np.load(image_embedding_file)
--------------------------------------------------------------------------------
/fairseq/__pycache__/legacy_distributed_data_parallel.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/__pycache__/legacy_distributed_data_parallel.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/multi_corpus_sampled_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/multi_corpus_sampled_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/legacy/__pycache__/masked_lm_dictionary.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/legacy/__pycache__/masked_lm_dictionary.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/distributed_fairseq_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/distributed_fairseq_model.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/fairseq_incremental_decoder.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/fairseq_incremental_decoder.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/character_token_embedder.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/character_token_embedder.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/mean_pool_gating_network.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/mean_pool_gating_network.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/fixed_schedule.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/fixed_schedule.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/semisupervised_translation.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/semisupervised_translation.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/data/__pycache__/transform_eos_lang_pair_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/data/__pycache__/transform_eos_lang_pair_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/learned_positional_embedding.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/learned_positional_embedding.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/transformer_sentence_encoder.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/transformer_sentence_encoder.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/criterions/__pycache__/label_smoothed_cross_entropy.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/criterions/__pycache__/label_smoothed_cross_entropy.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/models/__pycache__/transformer_from_pretrained_xlm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/models/__pycache__/transformer_from_pretrained_xlm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/downsampled_multihead_attention.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/downsampled_multihead_attention.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/sinusoidal_positional_embedding.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/sinusoidal_positional_embedding.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/cosine_lr_scheduler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/cosine_lr_scheduler.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/fairseq_lr_scheduler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/fairseq_lr_scheduler.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/reduce_lr_on_plateau.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/reduce_lr_on_plateau.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/tasks/__pycache__/translation_from_pretrained_xlm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/tasks/__pycache__/translation_from_pretrained_xlm.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/tri_stage_lr_scheduler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/tri_stage_lr_scheduler.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/modules/__pycache__/transformer_sentence_encoder_layer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/modules/__pycache__/transformer_sentence_encoder_layer.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/polynomial_decay_schedule.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/polynomial_decay_schedule.cpython-36.pyc
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/triangular_lr_scheduler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/triangular_lr_scheduler.cpython-36.pyc
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__pycache__/inverse_square_root_schedule.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cooelf/UVR-NMT/HEAD/fairseq/optim/lr_scheduler/__pycache__/inverse_square_root_schedule.cpython-36.pyc
--------------------------------------------------------------------------------
/sh_en2de_map.sh:
--------------------------------------------------------------------------------
1 | python image_lookup.py \
2 | --src_dict_dir data/src_dict_wmt_en2de.txt \
3 | --src_en_dir wmt14_en_de/bpe.multi30k.en \
4 | --image_dir multi30k-dataset/data/task1/image_splits/train.txt \
5 | --cap2image_file data/cap2image_en2de.pickle
--------------------------------------------------------------------------------
/fairseq/modules/lightconv_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .lightconv_layer import LightconvLayer # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .dynamicconv_layer import DynamicconvLayer # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/models/roberta/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .hub_interface import * # noqa
7 | from .model import * # noqa
8 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/fairseq/tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import re
7 |
8 | SPACE_NORMALIZER = re.compile(r"\s+")
9 |
10 |
11 | def tokenize_line(line):
12 | line = SPACE_NORMALIZER.sub(" ", line)
13 | line = line.strip()
14 | return line.split()
15 |
--------------------------------------------------------------------------------
/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/uvm-nmt.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/fairseq/data/num_samples_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import FairseqDataset
7 |
8 |
9 | class NumSamplesDataset(FairseqDataset):
10 |
11 | def __getitem__(self, index):
12 | return 1
13 |
14 | def __len__(self):
15 | return 0
16 |
17 | def collater(self, samples):
18 | return sum(samples)
19 |
--------------------------------------------------------------------------------
/fairseq/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | __all__ = ['pdb']
7 | __version__ = '0.8.0'
8 |
9 | import fairseq.criterions # noqa
10 | import fairseq.models # noqa
11 | import fairseq.modules # noqa
12 | import fairseq.optim # noqa
13 | import fairseq.optim.lr_scheduler # noqa
14 | import fairseq.pdb # noqa
15 | import fairseq.tasks # noqa
16 |
--------------------------------------------------------------------------------
/fairseq/data/id_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class IdDataset(FairseqDataset):
12 |
13 | def __getitem__(self, index):
14 | return index
15 |
16 | def __len__(self):
17 | return 0
18 |
19 | def collater(self, samples):
20 | return torch.tensor(samples)
21 |
--------------------------------------------------------------------------------
/fairseq/data/legacy/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary
7 | from .block_pair_dataset import BlockPairDataset
8 | from .masked_lm_dataset import MaskedLMDataset
9 |
10 | __all__ = [
11 | 'BertDictionary',
12 | 'BlockPairDataset',
13 | 'MaskedLMDataset',
14 | 'MaskedLMDictionary',
15 | ]
16 |
--------------------------------------------------------------------------------
/fairseq/data/offset_tokens_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class OffsetTokensDataset(BaseWrapperDataset):
10 |
11 | def __init__(self, dataset, offset):
12 | super().__init__(dataset)
13 | self.offset = offset
14 |
15 | def __getitem__(self, idx):
16 | return self.dataset[idx] + self.offset
17 |
--------------------------------------------------------------------------------
/fairseq/modules/grad_multiply.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 |
9 | class GradMultiply(torch.autograd.Function):
10 | @staticmethod
11 | def forward(ctx, x, scale):
12 | ctx.scale = scale
13 | res = x.new(x)
14 | return res
15 |
16 | @staticmethod
17 | def backward(ctx, grad):
18 | return grad * ctx.scale, None
19 |
--------------------------------------------------------------------------------
/fairseq/data/strip_token_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class StripTokenDataset(BaseWrapperDataset):
10 |
11 | def __init__(self, dataset, id_to_strip):
12 | super().__init__(dataset)
13 | self.id_to_strip = id_to_strip
14 |
15 | def __getitem__(self, index):
16 | item = self.dataset[index]
17 | return item[item.ne(self.id_to_strip)]
18 |
--------------------------------------------------------------------------------
/fairseq/modules/unfold.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.nn.functional as F
7 |
8 |
9 | def unfold1d(x, kernel_size, padding_l, pad_value=0):
10 | '''unfold T x B x C to T x B x C x K'''
11 | if kernel_size > 1:
12 | T, B, C = x.size()
13 | x = F.pad(x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value)
14 | x = x.as_strided((T, B, C, kernel_size), (B*C, C, 1, B*C))
15 | else:
16 | x = x.unsqueeze(3)
17 | return x
18 |
--------------------------------------------------------------------------------
/fairseq/modules/lightconv_layer/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | from setuptools import setup
8 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension
9 |
10 | setup(
11 | name='lightconv_layer',
12 | ext_modules=[
13 | CUDAExtension('lightconv_cuda', [
14 | 'lightconv_cuda.cpp',
15 | 'lightconv_cuda_kernel.cu',
16 | ]),
17 | ],
18 | cmdclass={
19 | 'build_ext': BuildExtension
20 | })
21 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/space_tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import re
7 |
8 | from fairseq.data.encoders import register_tokenizer
9 |
10 |
11 | @register_tokenizer('space')
12 | class SpaceTokenizer(object):
13 |
14 | def __init__(self, source_lang=None, target_lang=None):
15 | self.space_tok = re.compile(r"\s+")
16 |
17 | def encode(self, x: str) -> str:
18 | return self.space_tok.sub(' ', x)
19 |
20 | def decode(self, x: str) -> str:
21 | return x
22 |
--------------------------------------------------------------------------------
/fairseq/data/raw_label_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class RawLabelDataset(FairseqDataset):
12 |
13 | def __init__(self, labels):
14 | super().__init__()
15 | self.labels = labels
16 |
17 | def __getitem__(self, index):
18 | return self.labels[index]
19 |
20 | def __len__(self):
21 | return len(self.labels)
22 |
23 | def collater(self, samples):
24 | return torch.tensor(samples)
25 |
--------------------------------------------------------------------------------
/fairseq/modules/layer_norm.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 |
9 | def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
10 | if not export and torch.cuda.is_available():
11 | try:
12 | from apex.normalization import FusedLayerNorm
13 | return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
14 | except ImportError:
15 | pass
16 | return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
17 |
--------------------------------------------------------------------------------
/fairseq/data/lru_cache_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from functools import lru_cache
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class LRUCacheDataset(BaseWrapperDataset):
12 |
13 | def __init__(self, dataset, token=None):
14 | super().__init__(dataset)
15 |
16 | @lru_cache(maxsize=8)
17 | def __getitem__(self, index):
18 | return self.dataset[index]
19 |
20 | @lru_cache(maxsize=8)
21 | def collater(self, samples):
22 | return self.dataset.collater(samples)
23 |
--------------------------------------------------------------------------------
/fairseq/data/sort_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class SortDataset(BaseWrapperDataset):
12 |
13 | def __init__(self, dataset, sort_order):
14 | super().__init__(dataset)
15 | if not isinstance(sort_order, (list, tuple)):
16 | sort_order = [sort_order]
17 | self.sort_order = sort_order
18 |
19 | assert all(len(so) == len(dataset) for so in sort_order)
20 |
21 | def ordered_indices(self):
22 | return np.lexsort(self.sort_order)
23 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | from setuptools import setup
8 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension
9 |
10 | setup(
11 | name='dynamicconv_layer',
12 | ext_modules=[
13 | CUDAExtension(
14 | name='dynamicconv_cuda',
15 | sources=[
16 | 'dynamicconv_cuda.cpp',
17 | 'dynamicconv_cuda_kernel.cu',
18 | ],
19 | ),
20 | ],
21 | cmdclass={
22 | 'build_ext': BuildExtension
23 | })
24 |
--------------------------------------------------------------------------------
/fairseq/data/list_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class ListDataset(BaseWrapperDataset):
10 |
11 | def __init__(self, dataset, sizes=None):
12 | super().__init__(dataset)
13 | self._sizes = sizes
14 |
15 | def collater(self, samples):
16 | return samples
17 |
18 | @property
19 | def sizes(self):
20 | return self._sizes
21 |
22 | def num_tokens(self, index):
23 | return self.sizes[index]
24 |
25 | def size(self, index):
26 | return self.sizes[index]
27 |
28 | def set_epoch(self, epoch):
29 | pass
30 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/nltk_tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data.encoders import register_tokenizer
7 |
8 |
9 | @register_tokenizer('nltk')
10 | class NLTKTokenizer(object):
11 |
12 | def __init__(self, source_lang=None, target_lang=None):
13 | try:
14 | from nltk.tokenize import word_tokenize
15 | self.word_tokenize = word_tokenize
16 | except ImportError:
17 | raise ImportError('Please install nltk with: pip install nltk')
18 |
19 | def encode(self, x: str) -> str:
20 | return ' '.join(self.word_tokenize(x))
21 |
22 | def decode(self, x: str) -> str:
23 | return x
24 |
--------------------------------------------------------------------------------
/fairseq/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | from fairseq import registry
10 | from fairseq.criterions.fairseq_criterion import FairseqCriterion
11 |
12 |
13 | build_criterion, register_criterion, CRITERION_REGISTRY = registry.setup_registry(
14 | '--criterion',
15 | base_class=FairseqCriterion,
16 | default='cross_entropy',
17 | )
18 |
19 |
20 | # automatically import any Python files in the criterions/ directory
21 | for file in os.listdir(os.path.dirname(__file__)):
22 | if file.endswith('.py') and not file.startswith('_'):
23 | module = file[:file.find('.py')]
24 | importlib.import_module('fairseq.criterions.' + module)
25 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | import importlib
8 | import os
9 |
10 | from fairseq import registry
11 |
12 |
13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY = registry.setup_registry(
14 | '--tokenizer',
15 | default=None,
16 | )
17 |
18 |
19 | build_bpe, register_bpe, BPE_REGISTRY = registry.setup_registry(
20 | '--bpe',
21 | default=None,
22 | )
23 |
24 |
25 | # automatically import any Python files in the encoders/ directory
26 | for file in os.listdir(os.path.dirname(__file__)):
27 | if file.endswith('.py') and not file.startswith('_'):
28 | module = file[:file.find('.py')]
29 | importlib.import_module('fairseq.data.encoders.' + module)
30 |
--------------------------------------------------------------------------------
/hubconf.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import functools
7 |
8 | from fairseq.hub_utils import BPEHubInterface as bpe # noqa
9 | from fairseq.hub_utils import TokenizerHubInterface as tokenizer # noqa
10 | from fairseq.models import MODEL_REGISTRY
11 |
12 |
13 | dependencies = [
14 | 'numpy',
15 | 'regex',
16 | 'requests',
17 | 'torch',
18 | ]
19 |
20 |
21 | for _model_type, _cls in MODEL_REGISTRY.items():
22 | for model_name in _cls.hub_models().keys():
23 | globals()[model_name] = functools.partial(
24 | _cls.from_pretrained,
25 | model_name,
26 | )
27 | # to simplify the interface we only expose named models
28 | # globals()[_model_type] = _cls.from_pretrained
29 |
--------------------------------------------------------------------------------
/fairseq/modules/gelu.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with
7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs
8 | """
9 |
10 | import math
11 |
12 | import torch
13 |
14 |
15 | def gelu_accurate(x):
16 | if not hasattr(gelu_accurate, "_a"):
17 | gelu_accurate._a = math.sqrt(2 / math.pi)
18 | return 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
19 |
20 |
21 | def gelu(x: torch.Tensor) -> torch.Tensor:
22 | if hasattr(torch.nn.functional, 'gelu'):
23 | return torch.nn.functional.gelu(x.float()).type_as(x)
24 | else:
25 | return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
26 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | from fairseq import registry
10 | from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import FairseqLRScheduler
11 |
12 |
13 | build_lr_scheduler, register_lr_scheduler, LR_SCHEDULER_REGISTRY = registry.setup_registry(
14 | '--lr-scheduler',
15 | base_class=FairseqLRScheduler,
16 | default='fixed',
17 | )
18 |
19 | # automatically import any Python files in the optim/lr_scheduler/ directory
20 | for file in os.listdir(os.path.dirname(__file__)):
21 | if file.endswith('.py') and not file.startswith('_'):
22 | module = file[:file.find('.py')]
23 | importlib.import_module('fairseq.optim.lr_scheduler.' + module)
24 |
--------------------------------------------------------------------------------
/fairseq/data/numel_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch
8 |
9 | from . import BaseWrapperDataset
10 |
11 |
12 | class NumelDataset(BaseWrapperDataset):
13 |
14 | def __init__(self, dataset, reduce=False):
15 | super().__init__(dataset)
16 | self.reduce = reduce
17 |
18 | def __getitem__(self, index):
19 | item = self.dataset[index]
20 | if torch.is_tensor(item):
21 | return torch.numel(item)
22 | else:
23 | return np.size(item)
24 |
25 | def __len__(self):
26 | return len(self.dataset)
27 |
28 | def collater(self, samples):
29 | if self.reduce:
30 | return sum(samples)
31 | else:
32 | return torch.tensor(samples)
33 |
--------------------------------------------------------------------------------
/fairseq/data/replace_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class ReplaceDataset(BaseWrapperDataset):
10 | def __init__(self, dataset, replace_map, offset=0):
11 | super().__init__(dataset)
12 | assert len(replace_map) > 0
13 | self.replace_map = replace_map
14 | self.offset = offset
15 |
16 | def __getitem__(self, index):
17 | item = self.dataset[index]
18 | is_tuple = isinstance(item, tuple)
19 | src = item[0] if is_tuple else item
20 |
21 | for k, v in self.replace_map.items():
22 | src_off = src[self.offset:]
23 | src_off.masked_fill_(src_off == k, v)
24 |
25 | item = tuple((src,) + item[1:]) if is_tuple else src
26 | return item
27 |
--------------------------------------------------------------------------------
/fairseq/modules/logsumexp_moe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 |
9 | class LogSumExpMoE(torch.autograd.Function):
10 | """Standard LogSumExp forward pass, but use *posterior* for the backward.
11 |
12 | See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
13 | (Shen et al., 2019) `_.
14 | """
15 |
16 | @staticmethod
17 | def forward(ctx, logp, posterior, dim=-1):
18 | ctx.save_for_backward(posterior)
19 | ctx.dim = dim
20 | return torch.logsumexp(logp, dim=dim)
21 |
22 | @staticmethod
23 | def backward(ctx, grad_output):
24 | posterior, = ctx.saved_tensors
25 | grad_logp = grad_output.unsqueeze(ctx.dim) * posterior
26 | return grad_logp, None, None
27 |
--------------------------------------------------------------------------------
/fairseq/clib/libbleu/module.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2017-present, Facebook, Inc.
3 | * All rights reserved.
4 | *
5 | * This source code is licensed under the license found in the
6 | * LICENSE file in the root directory of this source tree.
7 | */
8 |
9 | #include
10 |
11 |
12 | static PyMethodDef method_def[] = {
13 | {NULL, NULL, 0, NULL}
14 | };
15 |
16 | static struct PyModuleDef module_def = {
17 | PyModuleDef_HEAD_INIT,
18 | "libbleu", /* name of module */
19 | NULL, /* module documentation, may be NULL */
20 | -1, /* size of per-interpreter state of the module,
21 | or -1 if the module keeps state in global variables. */
22 | method_def
23 | };
24 |
25 |
26 | #if PY_MAJOR_VERSION == 2
27 | PyMODINIT_FUNC init_libbleu()
28 | #else
29 | PyMODINIT_FUNC PyInit_libbleu()
30 | #endif
31 | {
32 | PyObject *m = PyModule_Create(&module_def);
33 | if (!m) {
34 | return NULL;
35 | }
36 | return m;
37 | }
38 |
--------------------------------------------------------------------------------
/fairseq/data/pad_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data import data_utils
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class PadDataset(BaseWrapperDataset):
12 |
13 | def __init__(self, dataset, pad_idx, left_pad):
14 | super().__init__(dataset)
15 | self.pad_idx = pad_idx
16 | self.left_pad = left_pad
17 |
18 | def collater(self, samples):
19 | return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad)
20 |
21 |
22 | class LeftPadDataset(PadDataset):
23 |
24 | def __init__(self, dataset, pad_idx):
25 | super().__init__(dataset, pad_idx, left_pad=True)
26 |
27 |
28 | class RightPadDataset(PadDataset):
29 |
30 | def __init__(self, dataset, pad_idx):
31 | super().__init__(dataset, pad_idx, left_pad=False)
32 |
--------------------------------------------------------------------------------
/fairseq/data/truncate_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class TruncateDataset(BaseWrapperDataset):
12 |
13 | def __init__(self, dataset, truncation_length):
14 | super().__init__(dataset)
15 | assert truncation_length is not None
16 | self.truncation_length = truncation_length
17 | self.dataset = dataset
18 |
19 | def __getitem__(self, index):
20 | item = self.dataset[index]
21 | item_len = item.size(0)
22 | if item_len > self.truncation_length:
23 | item = item[:self.truncation_length]
24 | return item
25 |
26 | @property
27 | def sizes(self):
28 | return np.minimum(self.dataset.sizes, self.truncation_length)
29 |
30 | def __len__(self):
31 | return len(self.dataset)
32 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | std::vector dynamicconv_cpu_forward(
5 | float* input,
6 | float* filters,
7 | int padding_l);
8 |
9 | std::vector dynamicconv_cpu_backward(
10 | float* gradOutput,
11 | int padding_l,
12 | float* input,
13 | float* filters);
14 |
15 | std::vector dynamicconv_forward(
16 | float* input,
17 | float* filters,
18 | int padding_l) {
19 |
20 | return dynamicconv_cpu_forward(input, filters, padding_l);
21 | }
22 |
23 | std::vector dynamicconv_backward(
24 | float* gradOutput,
25 | int padding_l,
26 | float* input,
27 | float* filters) {
28 |
29 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters);
30 | }
31 |
32 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
33 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)");
34 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)");
35 | }
36 |
--------------------------------------------------------------------------------
/fairseq/modules/scalar_bias.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | #
6 |
7 | import torch
8 |
9 |
10 | class ScalarBias(torch.autograd.Function):
11 | """
12 | Adds a vector of scalars, used in self-attention mechanism to allow
13 | the model to optionally attend to this vector instead of the past
14 | """
15 |
16 | @staticmethod
17 | def forward(ctx, input, dim, bias_init):
18 | size = list(input.size())
19 | size[dim] += 1
20 | output = input.new(*size).fill_(bias_init)
21 | output.narrow(dim, 1, size[dim] - 1).copy_(input)
22 | ctx.dim = dim
23 | return output
24 |
25 | @staticmethod
26 | def backward(ctx, grad):
27 | return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
28 |
29 |
30 | def scalar_bias(input, dim, bias_init=0):
31 | return ScalarBias.apply(input, dim, bias_init)
32 |
--------------------------------------------------------------------------------
/fairseq/data/prepend_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch
8 |
9 | from . import BaseWrapperDataset
10 |
11 |
12 | class PrependDataset(BaseWrapperDataset):
13 | def __init__(self, dataset, prepend_getter, ensure_first_token_is=None):
14 | super().__init__(dataset)
15 | self.prepend_getter = prepend_getter
16 | self.ensure_first_token = ensure_first_token_is
17 |
18 | def __getitem__(self, idx):
19 | item = self.dataset[idx]
20 | is_tuple = isinstance(item, tuple)
21 | src = item[0] if is_tuple else item
22 |
23 | assert self.ensure_first_token is None or src[0] == self.ensure_first_token
24 | prepend_idx = self.prepend_getter(self.dataset, idx)
25 | assert isinstance(prepend_idx, int)
26 | src[0] = prepend_idx
27 | item = tuple((src,) + item[1:]) if is_tuple else src
28 | return item
29 |
--------------------------------------------------------------------------------
/fairseq/optim/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | from fairseq import registry
10 | from fairseq.optim.fairseq_optimizer import FairseqOptimizer
11 | from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
12 | from fairseq.optim.bmuf import FairseqBMUF # noqa
13 |
14 |
15 | __all__ = [
16 | 'FairseqOptimizer',
17 | 'FP16Optimizer',
18 | 'MemoryEfficientFP16Optimizer',
19 | ]
20 |
21 |
22 | build_optimizer, register_optimizer, OPTIMIZER_REGISTRY = registry.setup_registry(
23 | '--optimizer',
24 | base_class=FairseqOptimizer,
25 | default='nag',
26 | )
27 |
28 |
29 | # automatically import any Python files in the optim/ directory
30 | for file in os.listdir(os.path.dirname(__file__)):
31 | if file.endswith('.py') and not file.startswith('_'):
32 | module = file[:file.find('.py')]
33 | importlib.import_module('fairseq.optim.' + module)
34 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/sentencepiece_bpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq import file_utils
7 | from fairseq.data.encoders import register_bpe
8 |
9 |
10 | @register_bpe('sentencepiece')
11 | class SentencepieceBPE(object):
12 |
13 | @staticmethod
14 | def add_args(parser):
15 | # fmt: off
16 | parser.add_argument('--sentencepiece-vocab', type=str,
17 | help='path to sentencepiece vocab')
18 | # fmt: on
19 |
20 | def __init__(self, args):
21 | vocab = file_utils.cached_path(args.sentencepiece_vocab)
22 | try:
23 | import sentencepiece as spm
24 | self.sp = spm.SentencePieceProcessor()
25 | self.sp.Load(vocab)
26 | except ImportError:
27 | raise ImportError('Please install sentencepiece with: pip install sentencepiece')
28 |
29 | def encode(self, x: str) -> str:
30 | return ' '.join(self.sp.EncodeAsPieces(x))
31 |
32 | def decode(self, x: str) -> str:
33 | return x.replace(' ', '').replace('\u2581', ' ').strip()
34 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/fastbpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq import file_utils
7 | from fairseq.data.encoders import register_bpe
8 |
9 |
10 | @register_bpe('fastbpe')
11 | class fastBPE(object):
12 |
13 | @staticmethod
14 | def add_args(parser):
15 | # fmt: off
16 | parser.add_argument('--bpe-codes', type=str,
17 | help='path to fastBPE BPE')
18 | # fmt: on
19 |
20 | def __init__(self, args):
21 | if args.bpe_codes is None:
22 | raise ValueError('--bpe-codes is required for --bpe=subword_nmt')
23 | codes = file_utils.cached_path(args.bpe_codes)
24 | try:
25 | import fastBPE
26 | self.bpe = fastBPE.fastBPE(codes)
27 | self.bpe_symbol = "@@ "
28 | except ImportError:
29 | raise ImportError('Please install fastBPE with: pip install fastBPE')
30 |
31 | def encode(self, x: str) -> str:
32 | return self.bpe.apply([x])[0]
33 |
34 | def decode(self, x: str) -> str:
35 | return (x + ' ').replace(self.bpe_symbol, '').rstrip()
36 |
--------------------------------------------------------------------------------
/fairseq/tasks/translation_from_pretrained_xlm.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
7 | from fairseq.tasks.translation import TranslationTask
8 |
9 | from . import register_task
10 |
11 |
12 | @register_task("translation_from_pretrained_xlm")
13 | class TranslationFromPretrainedXLMTask(TranslationTask):
14 | """
15 | Same as TranslationTask except use the MaskedLMDictionary class so that
16 | we can load data that was binarized with the MaskedLMDictionary class.
17 |
18 | This task should be used for the entire training pipeline when we want to
19 | train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,
20 | training NMT with the pretrained XLM checkpoint, and subsequent evaluation
21 | of that trained model.
22 | """
23 |
24 | @classmethod
25 | def load_dictionary(cls, filename):
26 | """Load the masked LM dictionary from the filename
27 |
28 | Args:
29 | filename (str): the filename
30 | """
31 | return MaskedLMDictionary.load(filename)
32 |
--------------------------------------------------------------------------------
/fairseq/data/prepend_token_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch
8 |
9 | from . import BaseWrapperDataset
10 |
11 |
12 | class PrependTokenDataset(BaseWrapperDataset):
13 |
14 | def __init__(self, dataset, token=None):
15 | super().__init__(dataset)
16 | self.token = token
17 | if token is not None:
18 | self._sizes = np.array(dataset.sizes) + 1
19 | else:
20 | self._sizes = dataset.sizes
21 |
22 | def __getitem__(self, idx):
23 | item = self.dataset[idx]
24 | if self.token is not None:
25 | item = torch.cat([item.new([self.token]), item])
26 | return item
27 |
28 | @property
29 | def sizes(self):
30 | return self._sizes
31 |
32 | def num_tokens(self, index):
33 | n = self.dataset.num_tokens(index)
34 | if self.token is not None:
35 | n += 1
36 | return n
37 |
38 | def size(self, index):
39 | n = self.dataset.size(index)
40 | if self.token is not None:
41 | n += 1
42 | return n
43 |
--------------------------------------------------------------------------------
/fairseq/pdb.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import multiprocessing
7 | import os
8 | import pdb
9 | import sys
10 |
11 |
12 | __all__ = ['set_trace']
13 |
14 |
15 | _stdin = [None]
16 | _stdin_lock = multiprocessing.Lock()
17 | try:
18 | _stdin_fd = sys.stdin.fileno()
19 | except Exception:
20 | _stdin_fd = None
21 |
22 |
23 | class MultiprocessingPdb(pdb.Pdb):
24 | """A Pdb wrapper that works in a multiprocessing environment.
25 |
26 | Usage: `from fairseq import pdb; pdb.set_trace()`
27 | """
28 |
29 | def __init__(self):
30 | pdb.Pdb.__init__(self, nosigint=True)
31 |
32 | def _cmdloop(self):
33 | stdin_bak = sys.stdin
34 | with _stdin_lock:
35 | try:
36 | if _stdin_fd is not None:
37 | if not _stdin[0]:
38 | _stdin[0] = os.fdopen(_stdin_fd)
39 | sys.stdin = _stdin[0]
40 | self.cmdloop()
41 | finally:
42 | sys.stdin = stdin_bak
43 |
44 |
45 | def set_trace():
46 | pdb = MultiprocessingPdb()
47 | pdb.set_trace(sys._getframe().f_back)
48 |
--------------------------------------------------------------------------------
/training_wmt16_en2ro.sh:
--------------------------------------------------------------------------------
1 | TEXT=data/en-ro/
2 | python preprocess.py --source-lang en --target-lang ro --trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test --destdir data-bin/en2ro --joined-dictionary --thresholdtgt 0 --thresholdsrc 0
3 | DATA_DIR=data-bin/en2ro/
4 | python train.py ${DATA_DIR} --task translation \
5 | --arch transformer_wmt_en_de --share-all-embeddings --dropout 0.1 \
6 | --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
7 | --lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 4000 \
8 | --lr 0.0007 --min-lr 1e-09 \
9 | --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --weight-decay 0.0 \
10 | --max-tokens 4096\
11 | --update-freq 2 --no-progress-bar --log-format json --log-interval 50 \
12 | --save-interval-updates 1000 --keep-interval-updates 500 --max-update 100000 --source-lang en --target-lang ro \
13 | --save-dir checkpoints/base-en2ro \
14 | --save_src_dict data/src_dict_en2ro.txt \
15 | --cap2image_file data/cap2image_en2ro.pickle \
16 | --image_embedding_file features_resnet50/train-resnet50-avgpool.npy \
17 | --encoder-type TransformerAvgEncoder \
18 | --L2norm true --image_emb_fix --total_num_img 5 --per_num_img 1 --find-unused-parameters --merge_option att-gate --gate_type neural-gate
--------------------------------------------------------------------------------
/fairseq/optim/adagrad.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.optim
7 |
8 | from . import FairseqOptimizer, register_optimizer
9 |
10 |
11 | @register_optimizer('adagrad')
12 | class Adagrad(FairseqOptimizer):
13 | def __init__(self, args, params):
14 | super().__init__(args)
15 | self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)
16 |
17 | @staticmethod
18 | def add_args(parser):
19 | """Add optimizer-specific arguments to the parser."""
20 | # fmt: off
21 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
22 | help='weight decay')
23 | # fmt: on
24 |
25 | @property
26 | def optimizer_config(self):
27 | """
28 | Return a kwarg dictionary that will be used to override optimizer
29 | args stored in checkpoints. This allows us to load a checkpoint and
30 | resume training using a different set of optimizer args, e.g., with a
31 | different learning rate.
32 | """
33 | return {
34 | 'lr': self.args.lr[0],
35 | 'weight_decay': self.args.weight_decay,
36 | }
37 |
--------------------------------------------------------------------------------
/training_wmt14_en2de.sh:
--------------------------------------------------------------------------------
1 | TEXT=wmt14_en_de
2 | python preprocess.py --source-lang en --target-lang de --trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test --destdir data-bin/wmt14_en2de --joined-dictionary --thresholdtgt 0 --thresholdsrc 0 --workers 20
3 | DATA_DIR=data-bin/wmt14_en2de/
4 | python train.py ${DATA_DIR} --task translation \
5 | --arch transformer_wmt_en_de --share-all-embeddings --dropout 0.15 \
6 | --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
7 | --lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 8000 \
8 | --lr 0.0007 --min-lr 1e-09 \
9 | --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --weight-decay 0.0 \
10 | --max-tokens 4096\
11 | --update-freq 1 --no-progress-bar --log-format json --log-interval 100 \
12 | --save-interval-updates 1000 --keep-interval-updates 1000 --max-update 300000 --source-lang en --target-lang de \
13 | --save-dir checkpoints/base-wmt-en2de \
14 | --save_src_dict data/src_dict_wmt_en2de.txt \
15 | --cap2image_file data/cap2image_en2de.pickle \
16 | --image_embedding_file features_resnet50/train-resnet50-avgpool.npy \
17 | --encoder-type TransformerAvgEncoder --L2norm true --image_emb_fix --total_num_img 5 --per_num_img 1 --find-unused-parameters --merge_option att-gate --gate_type neural-gate
--------------------------------------------------------------------------------
/fairseq/modules/positional_embedding.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.nn as nn
7 |
8 | from .learned_positional_embedding import LearnedPositionalEmbedding
9 | from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
10 |
11 |
12 | def PositionalEmbedding(
13 | num_embeddings: int,
14 | embedding_dim: int,
15 | padding_idx: int,
16 | learned: bool = False,
17 | ):
18 | if learned:
19 | # if padding_idx is specified then offset the embedding ids by
20 | # this index and adjust num_embeddings appropriately
21 | # TODO: The right place for this offset would be inside
22 | # LearnedPositionalEmbedding. Move this there for a cleaner implementation.
23 | if padding_idx is not None:
24 | num_embeddings = num_embeddings + padding_idx + 1
25 | m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
26 | nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
27 | if padding_idx is not None:
28 | nn.init.constant_(m.weight[padding_idx], 0)
29 | else:
30 | m = SinusoidalPositionalEmbedding(
31 | embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1,
32 | )
33 | return m
34 |
--------------------------------------------------------------------------------
/fairseq/modules/conv_tbc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 | from torch.nn.modules.utils import _single
8 |
9 |
10 | class ConvTBC(torch.nn.Module):
11 | """1D convolution over an input of shape (time x batch x channel)
12 |
13 | The implementation uses gemm to perform the convolution. This implementation
14 | is faster than cuDNN for small kernel sizes.
15 | """
16 | def __init__(self, in_channels, out_channels, kernel_size, padding=0):
17 | super(ConvTBC, self).__init__()
18 | self.in_channels = in_channels
19 | self.out_channels = out_channels
20 | self.kernel_size = _single(kernel_size)
21 | self.padding = _single(padding)
22 |
23 | self.weight = torch.nn.Parameter(torch.Tensor(
24 | self.kernel_size[0], in_channels, out_channels))
25 | self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
26 |
27 | def forward(self, input):
28 | return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0])
29 |
30 | def __repr__(self):
31 | s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
32 | ', padding={padding}')
33 | if self.bias is None:
34 | s += ', bias=False'
35 | s += ')'
36 | return s.format(name=self.__class__.__name__, **self.__dict__)
37 |
--------------------------------------------------------------------------------
/fairseq/optim/sgd.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.optim
7 |
8 | from . import FairseqOptimizer, register_optimizer
9 |
10 |
11 | @register_optimizer('sgd')
12 | class SGD(FairseqOptimizer):
13 | def __init__(self, args, params):
14 | super().__init__(args)
15 | self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
16 |
17 | @staticmethod
18 | def add_args(parser):
19 | """Add optimizer-specific arguments to the parser."""
20 | # fmt: off
21 | parser.add_argument('--momentum', default=0.0, type=float, metavar='M',
22 | help='momentum factor')
23 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
24 | help='weight decay')
25 | # fmt: on
26 |
27 | @property
28 | def optimizer_config(self):
29 | """
30 | Return a kwarg dictionary that will be used to override optimizer
31 | args stored in checkpoints. This allows us to load a checkpoint and
32 | resume training using a different set of optimizer args, e.g., with a
33 | different learning rate.
34 | """
35 | return {
36 | 'lr': self.args.lr[0],
37 | 'momentum': self.args.momentum,
38 | 'weight_decay': self.args.weight_decay,
39 | }
40 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .. import FairseqOptimizer
7 |
8 |
9 | class FairseqLRScheduler(object):
10 |
11 | def __init__(self, args, optimizer):
12 | super().__init__()
13 | if not isinstance(optimizer, FairseqOptimizer):
14 | raise ValueError('optimizer must be an instance of FairseqOptimizer')
15 | self.args = args
16 | self.optimizer = optimizer
17 | self.best = None
18 |
19 | @staticmethod
20 | def add_args(parser):
21 | """Add arguments to the parser for this LR scheduler."""
22 | pass
23 |
24 | def state_dict(self):
25 | """Return the LR scheduler state dict."""
26 | return {'best': self.best}
27 |
28 | def load_state_dict(self, state_dict):
29 | """Load an LR scheduler state dict."""
30 | self.best = state_dict['best']
31 |
32 | def step(self, epoch, val_loss=None):
33 | """Update the learning rate at the end of the given epoch."""
34 | if val_loss is not None:
35 | if self.best is None:
36 | self.best = val_loss
37 | else:
38 | self.best = min(self.best, val_loss)
39 |
40 | def step_update(self, num_updates):
41 | """Update the learning rate after each update."""
42 | return self.optimizer.get_lr()
43 |
--------------------------------------------------------------------------------
/fairseq/criterions/fairseq_criterion.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from torch.nn.modules.loss import _Loss
7 |
8 |
9 | class FairseqCriterion(_Loss):
10 |
11 | def __init__(self, args, task):
12 | super().__init__()
13 | self.args = args
14 | self.task = task
15 | self.padding_idx = task.target_dictionary.pad() if task.target_dictionary is not None else -100
16 |
17 | @staticmethod
18 | def add_args(parser):
19 | """Add criterion-specific arguments to the parser."""
20 | pass
21 |
22 | @classmethod
23 | def build_criterion(cls, args, task):
24 | return cls(args, task)
25 |
26 | def forward(self, model, sample, reduce=True):
27 | """Compute the loss for the given sample.
28 |
29 | Returns a tuple with three elements:
30 | 1) the loss
31 | 2) the sample size, which is used as the denominator for the gradient
32 | 3) logging outputs to display while training
33 | """
34 | raise NotImplementedError
35 |
36 | @staticmethod
37 | def aggregate_logging_outputs(logging_outputs):
38 | """Aggregate logging outputs from data parallel training."""
39 | raise NotImplementedError
40 |
41 | @staticmethod
42 | def grad_denom(sample_sizes):
43 | """Compute the gradient denominator for a set of sample sizes."""
44 | return sum(sample_sizes)
45 |
--------------------------------------------------------------------------------
/fairseq/data/base_wrapper_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from torch.utils.data.dataloader import default_collate
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class BaseWrapperDataset(FairseqDataset):
12 |
13 | def __init__(self, dataset):
14 | super().__init__()
15 | self.dataset = dataset
16 |
17 | def __getitem__(self, index):
18 | return self.dataset[index]
19 |
20 | def __len__(self):
21 | return len(self.dataset)
22 |
23 | def collater(self, samples):
24 | if hasattr(self.dataset, 'collater'):
25 | return self.dataset.collater(samples)
26 | else:
27 | return default_collate(samples)
28 |
29 | @property
30 | def sizes(self):
31 | return self.dataset.sizes
32 |
33 | def num_tokens(self, index):
34 | return self.dataset.num_tokens(index)
35 |
36 | def size(self, index):
37 | return self.dataset.size(index)
38 |
39 | def ordered_indices(self):
40 | return self.dataset.ordered_indices()
41 |
42 | @property
43 | def supports_prefetch(self):
44 | return getattr(self.dataset, 'supports_prefetch', False)
45 |
46 | def prefetch(self, indices):
47 | self.dataset.prefetch(indices)
48 |
49 | def set_epoch(self, epoch):
50 | super().set_epoch(epoch)
51 | if hasattr(self.dataset, 'set_epoch'):
52 | self.dataset.set_epoch(epoch)
53 |
--------------------------------------------------------------------------------
/fairseq/models/fairseq_encoder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.nn as nn
7 |
8 |
9 | class FairseqEncoder(nn.Module):
10 | """Base class for encoders."""
11 |
12 | def __init__(self, dictionary):
13 | super().__init__()
14 | self.dictionary = dictionary
15 |
16 | def forward(self, src_tokens, src_lengths=None, **kwargs):
17 | """
18 | Args:
19 | src_tokens (LongTensor): tokens in the source language of shape
20 | `(batch, src_len)`
21 | src_lengths (LongTensor): lengths of each source sentence of shape
22 | `(batch)`
23 | """
24 | raise NotImplementedError
25 |
26 | def reorder_encoder_out(self, encoder_out, new_order):
27 | """
28 | Reorder encoder output according to `new_order`.
29 |
30 | Args:
31 | encoder_out: output from the ``forward()`` method
32 | new_order (LongTensor): desired order
33 |
34 | Returns:
35 | `encoder_out` rearranged according to `new_order`
36 | """
37 | raise NotImplementedError
38 |
39 | def max_positions(self):
40 | """Maximum input length supported by the encoder."""
41 | return 1e6 # an arbitrary large number
42 |
43 | def upgrade_state_dict(self, state_dict):
44 | """Upgrade a (possibly old) state dict for new versions of fairseq."""
45 | return state_dict
46 |
--------------------------------------------------------------------------------
/fairseq/data/concat_sentences_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class ConcatSentencesDataset(FairseqDataset):
12 |
13 | def __init__(self, *datasets):
14 | super().__init__()
15 | self.datasets = datasets
16 | assert all(len(ds) == len(datasets[0]) for ds in datasets), \
17 | 'datasets must have the same length'
18 |
19 | def __getitem__(self, index):
20 | return torch.cat([ds[index] for ds in self.datasets])
21 |
22 | def __len__(self):
23 | return len(self.datasets[0])
24 |
25 | def collater(self, samples):
26 | return self.datasets[0].collater(samples)
27 |
28 | @property
29 | def sizes(self):
30 | return sum(ds.sizes for ds in self.datasets)
31 |
32 | def num_tokens(self, index):
33 | return sum(ds.num_tokens(index) for ds in self.datasets)
34 |
35 | def size(self, index):
36 | return sum(ds.size(index) for ds in self.datasets)
37 |
38 | def ordered_indices(self):
39 | return self.datasets[0].ordered_indices()
40 |
41 | @property
42 | def supports_prefetch(self):
43 | return any(
44 | getattr(ds, 'supports_prefetch', False) for ds in self.datasets
45 | )
46 |
47 | def prefetch(self, indices):
48 | for ds in self.datasets:
49 | if getattr(ds, 'supports_prefetch', False):
50 | ds.prefetch(indices)
51 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cuh:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | #include
9 | #include
10 |
11 | #include
12 | #include
13 | #include
14 |
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 |
22 | #include
23 | #include
24 | #include
25 |
26 | #define SHFL_MASK 0xffffffff
27 |
28 | template
29 | __global__
30 | void dynamicconv_forward_kernel(const scalar_t* input,
31 | const scalar_t* weight,
32 | int minibatch,
33 | int sequenceLength,
34 | int numFeatures,
35 | int numFiltersInBlock,
36 | int numHeads,
37 | scalar_t* output);
38 |
39 | template
40 | __global__
41 | void dynamicconv_backward_kernel(
42 | const scalar_t* gradOutput, // B * C * T
43 | const scalar_t* input, // B * C * T
44 | const scalar_t* weight,
45 | int minibatch,
46 | int sequenceLength,
47 | int numFeatures,
48 | int numFiltersInBlock,
49 | int numHeads,
50 | scalar_t* gradWeight,
51 | scalar_t* gradInput); // B * H * k * T
52 |
--------------------------------------------------------------------------------
/fairseq/modules/lightconv_layer/lightconv_cuda.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | #include
9 | #include
10 |
11 | std::vector lightconv_cuda_forward(
12 | at::Tensor input,
13 | at::Tensor filters,
14 | int padding_l);
15 |
16 | std::vector lightconv_cuda_backward(
17 | at::Tensor gradOutput,
18 | int padding_l,
19 | at::Tensor input,
20 | at::Tensor filters);
21 |
22 |
23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
26 |
27 | std::vector lightconv_forward(
28 | at::Tensor input,
29 | at::Tensor filters,
30 | int padding_l) {
31 |
32 | CHECK_INPUT(input);
33 | CHECK_INPUT(filters);
34 |
35 | return lightconv_cuda_forward(input, filters, padding_l);
36 | }
37 |
38 | std::vector lightconv_backward(
39 | at::Tensor gradOutput,
40 | int padding_l,
41 | at::Tensor input,
42 | at::Tensor filters) {
43 |
44 | CHECK_INPUT(gradOutput);
45 | CHECK_INPUT(input);
46 | CHECK_INPUT(filters);
47 |
48 | return lightconv_cuda_backward(gradOutput, padding_l, input, filters);
49 | }
50 |
51 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
52 | m.def("forward", &lightconv_forward, "lighconv forward (CUDA)");
53 | m.def("backward", &lightconv_backward, "lighconv backward (CUDA)");
54 | }
55 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/gpt2_bpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq import file_utils
7 | from fairseq.data.encoders import register_bpe
8 |
9 | from .gpt2_bpe_utils import get_encoder
10 |
11 |
12 | DEFAULT_ENCODER_JSON = 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json'
13 | DEFAULT_VOCAB_BPE = 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe'
14 |
15 |
16 | @register_bpe('gpt2')
17 | class GPT2BPE(object):
18 |
19 | @staticmethod
20 | def add_args(parser):
21 | # fmt: off
22 | parser.add_argument('--gpt2-encoder-json', type=str,
23 | default=DEFAULT_ENCODER_JSON,
24 | help='path to encoder.json')
25 | parser.add_argument('--gpt2-vocab-bpe', type=str,
26 | default=DEFAULT_VOCAB_BPE,
27 | help='path to vocab.bpe')
28 | # fmt: on
29 |
30 | def __init__(self, args):
31 | encoder_json = file_utils.cached_path(
32 | getattr(args, 'gpt2_encoder_json', DEFAULT_ENCODER_JSON)
33 | )
34 | vocab_bpe = file_utils.cached_path(
35 | getattr(args, 'gpt2_vocab_bpe', DEFAULT_VOCAB_BPE)
36 | )
37 | self.bpe = get_encoder(encoder_json, vocab_bpe)
38 |
39 | def encode(self, x: str) -> str:
40 | return ' '.join(map(str, self.bpe.encode(x)))
41 |
42 | def decode(self, x: str) -> str:
43 | return self.bpe.decode(map(int, x.split()))
44 |
45 | def is_beginning_of_word(self, x: str) -> bool:
46 | return self.decode(x).startswith(' ')
47 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | #include
9 | #include
10 |
11 | std::vector dynamicconv_cuda_forward(
12 | at::Tensor input,
13 | at::Tensor filters,
14 | int padding_l);
15 |
16 | std::vector dynamicconv_cuda_backward(
17 | at::Tensor gradOutput,
18 | int padding_l,
19 | at::Tensor input,
20 | at::Tensor filters);
21 |
22 |
23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
26 |
27 | std::vector dynamicconv_forward(
28 | at::Tensor input,
29 | at::Tensor filters,
30 | int padding_l) {
31 |
32 | CHECK_INPUT(input);
33 | CHECK_INPUT(filters);
34 |
35 | return dynamicconv_cuda_forward(input, filters,
36 | padding_l);
37 | }
38 |
39 | std::vector dynamicconv_backward(
40 | at::Tensor gradOutput,
41 | int padding_l,
42 | at::Tensor input,
43 | at::Tensor filters) {
44 |
45 | CHECK_INPUT(gradOutput);
46 | CHECK_INPUT(input);
47 | CHECK_INPUT(filters);
48 |
49 | return dynamicconv_cuda_backward(gradOutput, padding_l,
50 | input, filters);
51 | }
52 |
53 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
54 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)");
55 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)");
56 | }
57 |
--------------------------------------------------------------------------------
/fairseq/data/legacy/masked_lm_dictionary.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data import Dictionary
7 |
8 |
9 | class MaskedLMDictionary(Dictionary):
10 | """
11 | Dictionary for Masked Language Modelling tasks. This extends Dictionary by
12 | adding the mask symbol.
13 | """
14 | def __init__(
15 | self,
16 | pad='',
17 | eos='',
18 | unk='',
19 | mask='',
20 | ):
21 | super().__init__(pad, eos, unk)
22 | self.mask_word = mask
23 | self.mask_index = self.add_symbol(mask)
24 | self.nspecial = len(self.symbols)
25 |
26 | def mask(self):
27 | """Helper to get index of mask symbol"""
28 | return self.mask_index
29 |
30 |
31 | class BertDictionary(MaskedLMDictionary):
32 | """
33 | Dictionary for BERT task. This extends MaskedLMDictionary by adding support
34 | for cls and sep symbols.
35 | """
36 | def __init__(
37 | self,
38 | pad='',
39 | eos='',
40 | unk='',
41 | mask='',
42 | cls='',
43 | sep=''
44 | ):
45 | super().__init__(pad, eos, unk, mask)
46 | self.cls_word = cls
47 | self.sep_word = sep
48 | self.cls_index = self.add_symbol(cls)
49 | self.sep_index = self.add_symbol(sep)
50 | self.nspecial = len(self.symbols)
51 |
52 | def cls(self):
53 | """Helper to get index of cls symbol"""
54 | return self.cls_index
55 |
56 | def sep(self):
57 | """Helper to get index of sep symbol"""
58 | return self.sep_index
59 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/subword_nmt_bpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq import file_utils
7 | from fairseq.data.encoders import register_bpe
8 |
9 |
10 | @register_bpe('subword_nmt')
11 | class SubwordNMTBPE(object):
12 |
13 | @staticmethod
14 | def add_args(parser):
15 | # fmt: off
16 | parser.add_argument('--bpe-codes', type=str,
17 | help='path to subword NMT BPE')
18 | parser.add_argument('--bpe-separator', default='@@',
19 | help='BPE separator')
20 | # fmt: on
21 |
22 | def __init__(self, args):
23 | if args.bpe_codes is None:
24 | raise ValueError('--bpe-codes is required for --bpe=subword_nmt')
25 | codes = file_utils.cached_path(args.bpe_codes)
26 | try:
27 | from subword_nmt import apply_bpe
28 | bpe_parser = apply_bpe.create_parser()
29 | bpe_args = bpe_parser.parse_args([
30 | '--codes', codes,
31 | '--separator', args.bpe_separator,
32 | ])
33 | self.bpe = apply_bpe.BPE(
34 | bpe_args.codes,
35 | bpe_args.merges,
36 | bpe_args.separator,
37 | None,
38 | bpe_args.glossaries,
39 | )
40 | self.bpe_symbol = bpe_args.separator + ' '
41 | except ImportError:
42 | raise ImportError('Please install subword_nmt with: pip install subword-nmt')
43 |
44 | def encode(self, x: str) -> str:
45 | return self.bpe.process_line(x)
46 |
47 | def decode(self, x: str) -> str:
48 | return (x + ' ').replace(self.bpe_symbol, '').rstrip()
49 |
--------------------------------------------------------------------------------
/fairseq/modules/sparse_transformer_sentence_encoder_layer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.modules import TransformerSentenceEncoderLayer
7 | from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
8 |
9 |
10 | class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
11 | """
12 | Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention)
13 | """
14 |
15 | def __init__(
16 | self,
17 | embedding_dim: float = 768,
18 | ffn_embedding_dim: float = 3072,
19 | num_attention_heads: float = 8,
20 | dropout: float = 0.1,
21 | attention_dropout: float = 0.1,
22 | activation_dropout: float = 0.1,
23 | activation_fn: str = 'relu',
24 | add_bias_kv: bool = False,
25 | add_zero_attn: bool = False,
26 | export: bool = False,
27 | is_bidirectional: bool = True,
28 | stride: int = 32,
29 | expressivity: int = 8,
30 | ) -> None:
31 |
32 | super().__init__(
33 | embedding_dim, ffn_embedding_dim, num_attention_heads, dropout,
34 | attention_dropout, activation_dropout, activation_fn, add_bias_kv,
35 | add_zero_attn, export
36 | )
37 |
38 | self.self_attn = SparseMultiheadAttention(
39 | self.embedding_dim,
40 | num_attention_heads,
41 | dropout=attention_dropout,
42 | add_bias_kv=add_bias_kv,
43 | add_zero_attn=add_zero_attn,
44 | self_attention=True,
45 | is_bidirectional=is_bidirectional,
46 | stride=stride,
47 | expressivity=expressivity,
48 | )
49 |
--------------------------------------------------------------------------------
/fairseq/optim/adadelta.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.optim
7 |
8 | from . import FairseqOptimizer, register_optimizer
9 |
10 |
11 | @register_optimizer('adadelta')
12 | class Adadelta(FairseqOptimizer):
13 | def __init__(self, args, params):
14 | super().__init__(args)
15 | self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config)
16 |
17 | @staticmethod
18 | def add_args(parser):
19 | """Add optimizer-specific arguments to the parser."""
20 | # fmt: off
21 | parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO',
22 | help='coefficient used for computing a running average of squared gradients')
23 | parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS',
24 | help='term added to the denominator to improve numerical stability')
25 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
26 | help='weight decay')
27 | parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps')
28 | # fmt: on
29 |
30 | @property
31 | def optimizer_config(self):
32 | """
33 | Return a kwarg dictionary that will be used to override optimizer
34 | args stored in checkpoints. This allows us to load a checkpoint and
35 | resume training using a different set of optimizer args, e.g., with a
36 | different learning rate.
37 | """
38 | return {
39 | 'lr': self.args.lr[0],
40 | 'rho': self.args.adadelta_rho,
41 | 'eps': self.args.adadelta_eps,
42 | 'weight_decay': self.args.weight_decay,
43 | }
44 |
--------------------------------------------------------------------------------
/fairseq/meters.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import time
7 |
8 |
9 | class AverageMeter(object):
10 | """Computes and stores the average and current value"""
11 | def __init__(self):
12 | self.reset()
13 |
14 | def reset(self):
15 | self.val = 0
16 | self.avg = 0
17 | self.sum = 0
18 | self.count = 0
19 |
20 | def update(self, val, n=1):
21 | self.val = val
22 | self.sum += val * n
23 | self.count += n
24 | self.avg = self.sum / self.count
25 |
26 |
27 | class TimeMeter(object):
28 | """Computes the average occurrence of some event per second"""
29 | def __init__(self, init=0):
30 | self.reset(init)
31 |
32 | def reset(self, init=0):
33 | self.init = init
34 | self.start = time.time()
35 | self.n = 0
36 |
37 | def update(self, val=1):
38 | self.n += val
39 |
40 | @property
41 | def avg(self):
42 | return self.n / self.elapsed_time
43 |
44 | @property
45 | def elapsed_time(self):
46 | return self.init + (time.time() - self.start)
47 |
48 |
49 | class StopwatchMeter(object):
50 | """Computes the sum/avg duration of some event in seconds"""
51 | def __init__(self):
52 | self.reset()
53 |
54 | def start(self):
55 | self.start_time = time.time()
56 |
57 | def stop(self, n=1):
58 | if self.start_time is not None:
59 | delta = time.time() - self.start_time
60 | self.sum += delta
61 | self.n += n
62 | self.start_time = None
63 |
64 | def reset(self):
65 | self.sum = 0
66 | self.n = 0
67 | self.start_time = None
68 |
69 | @property
70 | def avg(self):
71 | return self.sum / self.n
72 |
--------------------------------------------------------------------------------
/fairseq/data/fairseq_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch.utils.data
8 |
9 |
10 | class FairseqDataset(torch.utils.data.Dataset):
11 | """A dataset that provides helpers for batching."""
12 |
13 | def __getitem__(self, index):
14 | raise NotImplementedError
15 |
16 | def __len__(self):
17 | raise NotImplementedError
18 |
19 | def collater(self, samples):
20 | """Merge a list of samples to form a mini-batch.
21 |
22 | Args:
23 | samples (List[dict]): samples to collate
24 |
25 | Returns:
26 | dict: a mini-batch suitable for forwarding with a Model
27 | """
28 | raise NotImplementedError
29 |
30 | def num_tokens(self, index):
31 | """Return the number of tokens in a sample. This value is used to
32 | enforce ``--max-tokens`` during batching."""
33 | raise NotImplementedError
34 |
35 | def size(self, index):
36 | """Return an example's size as a float or tuple. This value is used when
37 | filtering a dataset with ``--max-positions``."""
38 | raise NotImplementedError
39 |
40 | def ordered_indices(self):
41 | """Return an ordered list of indices. Batches will be constructed based
42 | on this order."""
43 | return np.arange(len(self))
44 |
45 | @property
46 | def supports_prefetch(self):
47 | """Whether this dataset supports prefetching."""
48 | return False
49 |
50 | def attr(self, attr: str, index: int):
51 | return getattr(self, attr, None)
52 |
53 | def prefetch(self, indices):
54 | """Prefetch the data required for this epoch."""
55 | raise NotImplementedError
56 |
57 | def set_epoch(self, epoch):
58 | pass
59 |
--------------------------------------------------------------------------------
/fairseq/data/subsample_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class SubsampleDataset(BaseWrapperDataset):
12 | def __init__(self, dataset, size_ratio):
13 | super().__init__(dataset)
14 | assert size_ratio < 1
15 | self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int)
16 | self.indices = np.random.choice(
17 | range(len(self.dataset)), self.actual_size, replace=False
18 | )
19 | print(
20 | "subsampled dataset from {} to {} (ratio={})".format(len(self.dataset), self.actual_size, size_ratio)
21 | )
22 |
23 | def __getitem__(self, index):
24 | return self.dataset[self.indices[index]]
25 |
26 | def __len__(self):
27 | return self.actual_size
28 |
29 | def collater(self, samples):
30 | return self.dataset.collater(samples)
31 |
32 | @property
33 | def sizes(self):
34 | return self.dataset.sizes[self.indices]
35 |
36 | @property
37 | def name(self):
38 | return self.dataset.name
39 |
40 | def num_tokens(self, index):
41 | return self.dataset.num_tokens(self.indices[index])
42 |
43 | def size(self, index):
44 | return self.dataset.size(self.indices[index])
45 |
46 | def ordered_indices(self):
47 | """Return an ordered list of indices. Batches will be constructed based
48 | on this order."""
49 | if self.shuffle:
50 | order = [np.random.permutation(len(self))]
51 | else:
52 | order = [np.arange(len(self))]
53 | order.append(self.sizes)
54 | return np.lexsort(order)
55 |
56 | def prefetch(self, indices):
57 | self.dataset.prefetch(self.indices[indices])
58 |
--------------------------------------------------------------------------------
/fairseq/modules/highway.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from torch import nn
9 |
10 |
11 | class Highway(torch.nn.Module):
12 | """
13 | A `Highway layer `_.
14 | Adopted from the AllenNLP implementation.
15 | """
16 |
17 | def __init__(
18 | self,
19 | input_dim: int,
20 | num_layers: int = 1
21 | ):
22 | super(Highway, self).__init__()
23 | self.input_dim = input_dim
24 | self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2)
25 | for _ in range(num_layers)])
26 | self.activation = nn.ReLU()
27 |
28 | self.reset_parameters()
29 |
30 | def reset_parameters(self):
31 | for layer in self.layers:
32 | # As per comment in AllenNLP:
33 | # We should bias the highway layer to just carry its input forward. We do that by
34 | # setting the bias on `B(x)` to be positive, because that means `g` will be biased to
35 | # be high, so we will carry the input forward. The bias on `B(x)` is the second half
36 | # of the bias vector in each Linear layer.
37 | nn.init.constant_(layer.bias[self.input_dim:], 1)
38 |
39 | nn.init.constant_(layer.bias[:self.input_dim], 0)
40 | nn.init.xavier_normal_(layer.weight)
41 |
42 | def forward(
43 | self,
44 | x: torch.Tensor
45 | ):
46 | for layer in self.layers:
47 | projection = layer(x)
48 | proj_x, gate = projection.chunk(2, dim=-1)
49 | proj_x = self.activation(proj_x)
50 | gate = torch.sigmoid(gate)
51 | x = gate * x + (gate.new_tensor([1]) - gate) * proj_x
52 | return x
53 |
--------------------------------------------------------------------------------
/fairseq/modules/beamable_mm.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 | import torch.nn as nn
8 |
9 |
10 | class BeamableMM(nn.Module):
11 | """This module provides an optimized MM for beam decoding with attention.
12 |
13 | It leverage the fact that the source-side of the input is replicated beam
14 | times and the target-side of the input is of width one. This layer speeds up
15 | inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)}
16 | with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}.
17 | """
18 | def __init__(self, beam_size=None):
19 | super(BeamableMM, self).__init__()
20 | self.beam_size = beam_size
21 |
22 | def forward(self, input1, input2):
23 | if (
24 | not self.training and # test mode
25 | self.beam_size is not None and # beam size is set
26 | input1.dim() == 3 and # only support batched input
27 | input1.size(1) == 1 # single time step update
28 | ):
29 | bsz, beam = input1.size(0), self.beam_size
30 |
31 | # bsz x 1 x nhu --> bsz/beam x beam x nhu
32 | input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1)
33 |
34 | # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu
35 | input2 = input2.unfold(0, beam, beam)[:, :, :, 0]
36 |
37 | # use non batched operation if bsz = beam
38 | if input1.size(0) == 1:
39 | output = torch.mm(input1[0, :, :], input2[0, :, :])
40 | else:
41 | output = input1.bmm(input2)
42 | return output.view(bsz, 1, -1)
43 | else:
44 | return input1.bmm(input2)
45 |
46 | def set_beam_size(self, beam_size):
47 | self.beam_size = beam_size
48 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/hf_bert_bpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data.encoders import register_bpe
7 |
8 |
9 | @register_bpe('bert')
10 | class BertBPE(object):
11 |
12 | @staticmethod
13 | def add_args(parser):
14 | # fmt: off
15 | parser.add_argument('--bpe-cased', action='store_true',
16 | help='set for cased BPE',
17 | default=False)
18 | parser.add_argument('--bpe-vocab-file', type=str,
19 | help='bpe vocab file.')
20 | # fmt: on
21 |
22 | def __init__(self, args):
23 | try:
24 | from pytorch_transformers import BertTokenizer
25 | from pytorch_transformers.tokenization_utils import clean_up_tokenization
26 | except ImportError:
27 | raise ImportError(
28 | 'Please install 1.0.0 version of pytorch_transformers'
29 | 'with: pip install pytorch-transformers'
30 | )
31 |
32 | if 'bpe_vocab_file' in args:
33 | self.bert_tokenizer = BertTokenizer(
34 | args.bpe_vocab_file,
35 | do_lower_case=not args.bpe_cased
36 | )
37 | else:
38 | vocab_file_name = 'bert-base-cased' if args.bpe_cased else 'bert-base-uncased'
39 | self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name)
40 | self.clean_up_tokenization = clean_up_tokenization
41 |
42 | def encode(self, x: str) -> str:
43 | return ' '.join(self.bert_tokenizer.tokenize(x))
44 |
45 | def decode(self, x: str) -> str:
46 | return self.clean_up_tokenization(
47 | self.bert_tokenizer.convert_tokens_to_string(x.split(' '))
48 | )
49 |
50 | def is_beginning_of_word(self, x: str) -> bool:
51 | return not x.startswith('##')
52 |
--------------------------------------------------------------------------------
/fairseq/data/sharded_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import itertools
7 | import os
8 | import random
9 |
10 | from . import BaseWrapperDataset
11 | from fairseq.data import data_utils
12 |
13 |
14 | class ShardedDataset(BaseWrapperDataset):
15 | """A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS.
16 |
17 | Loads a dataset which has been sharded into multiple files. each shard is only loaded for each specific epoch
18 |
19 | """
20 |
21 | def __init__(
22 | self,
23 | dictionary,
24 | dataset_impl: str,
25 | path: str,
26 | split: str,
27 | epoch: int,
28 | name: str = None,
29 | combine: bool = False,
30 | seed: int = 0,
31 | ):
32 | self._name = name if name is not None else os.path.basename(path)
33 | num_shards = 0
34 | for i in itertools.count():
35 | if not os.path.exists(os.path.join(path, "shard" + str(i))):
36 | break
37 | num_shards += 1
38 |
39 | if num_shards > 0 and split == "train":
40 | random.seed(seed ^ epoch)
41 | shard = random.randint(0, num_shards - 1)
42 | split_path = os.path.join(path, "shard" + str(shard), split)
43 | else:
44 | split_path = os.path.join(path, split)
45 | if os.path.isdir(split_path):
46 | split_path = os.path.join(split_path, split)
47 |
48 | dataset = data_utils.load_indexed_dataset(
49 | split_path, dictionary, dataset_impl, combine=combine
50 | )
51 | if dataset is None:
52 | raise FileNotFoundError(
53 | "Dataset not found: {} ({})".format(split, split_path)
54 | )
55 |
56 | super().__init__(dataset)
57 |
58 | @property
59 | def name(self):
60 | return self._name
61 |
--------------------------------------------------------------------------------
/fairseq/modules/learned_positional_embedding.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.nn as nn
7 |
8 | from fairseq import utils
9 |
10 |
11 | class LearnedPositionalEmbedding(nn.Embedding):
12 | """
13 | This module learns positional embeddings up to a fixed maximum size.
14 | Padding ids are ignored by either offsetting based on padding_idx
15 | or by setting padding_idx to None and ensuring that the appropriate
16 | position ids are passed to the forward function.
17 | """
18 |
19 | def __init__(
20 | self,
21 | num_embeddings: int,
22 | embedding_dim: int,
23 | padding_idx: int,
24 | ):
25 | super().__init__(num_embeddings, embedding_dim, padding_idx)
26 | self.onnx_trace = False
27 |
28 | def forward(self, input, incremental_state=None, positions=None):
29 | """Input is expected to be of size [bsz x seqlen]."""
30 | assert (
31 | (positions is None) or (self.padding_idx is None)
32 | ), "If positions is pre-computed then padding_idx should not be set."
33 |
34 | if positions is None:
35 | if incremental_state is not None:
36 | # positions is the same for every token when decoding a single step
37 | # Without the int() cast, it doesn't work in some cases when exporting to ONNX
38 | positions = input.data.new(1, 1).fill_(int(self.padding_idx + input.size(1)))
39 | else:
40 | positions = utils.make_positions(
41 | input.data, self.padding_idx, onnx_trace=self.onnx_trace,
42 | )
43 | return super().forward(positions)
44 |
45 | def max_positions(self):
46 | """Maximum number of supported positions."""
47 | if self.padding_idx is not None:
48 | return self.num_embeddings - self.padding_idx - 1
49 | else:
50 | return self.num_embeddings
51 |
--------------------------------------------------------------------------------
/fairseq/models/composite_encoder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.models import FairseqEncoder
7 |
8 |
9 | class CompositeEncoder(FairseqEncoder):
10 | """
11 | A wrapper around a dictionary of :class:`FairseqEncoder` objects.
12 |
13 | We run forward on each encoder and return a dictionary of outputs. The first
14 | encoder's dictionary is used for initialization.
15 |
16 | Args:
17 | encoders (dict): a dictionary of :class:`FairseqEncoder` objects.
18 | """
19 |
20 | def __init__(self, encoders):
21 | super().__init__(next(iter(encoders.values())).dictionary)
22 | self.encoders = encoders
23 | for key in self.encoders:
24 | self.add_module(key, self.encoders[key])
25 |
26 | def forward(self, src_tokens, src_lengths):
27 | """
28 | Args:
29 | src_tokens (LongTensor): tokens in the source language of shape
30 | `(batch, src_len)`
31 | src_lengths (LongTensor): lengths of each source sentence of shape
32 | `(batch)`
33 |
34 | Returns:
35 | dict:
36 | the outputs from each Encoder
37 | """
38 | encoder_out = {}
39 | for key in self.encoders:
40 | encoder_out[key] = self.encoders[key](src_tokens, src_lengths)
41 | return encoder_out
42 |
43 | def reorder_encoder_out(self, encoder_out, new_order):
44 | """Reorder encoder output according to new_order."""
45 | for key in self.encoders:
46 | encoder_out[key] = self.encoders[key].reorder_encoder_out(encoder_out[key], new_order)
47 | return encoder_out
48 |
49 | def max_positions(self):
50 | return min([self.encoders[key].max_positions() for key in self.encoders])
51 |
52 | def upgrade_state_dict(self, state_dict):
53 | for key in self.encoders:
54 | self.encoders[key].upgrade_state_dict(state_dict)
55 | return state_dict
56 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/moses_tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data.encoders import register_tokenizer
7 |
8 |
9 | @register_tokenizer('moses')
10 | class MosesTokenizer(object):
11 |
12 | @staticmethod
13 | def add_args(parser):
14 | # fmt: off
15 | parser.add_argument('--moses-source-lang', metavar='SRC',
16 | help='source language')
17 | parser.add_argument('--moses-target-lang', metavar='TARGET',
18 | help='target language')
19 | parser.add_argument('--moses-no-dash-splits', action='store_true', default=False,
20 | help='don\'t apply dash split rules')
21 | parser.add_argument('--moses-no-escape', action='store_true', default=False,
22 | help='don\'t perform HTML escaping on apostrophy, quotes, etc.')
23 | # fmt: on
24 |
25 | def __init__(self, args):
26 | self.args = args
27 |
28 | if getattr(args, 'moses_source_lang', None) is None:
29 | args.moses_source_lang = getattr(args, 'source_lang', 'en')
30 | if getattr(args, 'moses_target_lang', None) is None:
31 | args.moses_target_lang = getattr(args, 'target_lang', 'en')
32 |
33 | try:
34 | from sacremoses import MosesTokenizer, MosesDetokenizer
35 | self.tok = MosesTokenizer(args.moses_source_lang)
36 | self.detok = MosesDetokenizer(args.moses_target_lang)
37 | except ImportError:
38 | raise ImportError('Please install Moses tokenizer with: pip install sacremoses')
39 |
40 | def encode(self, x: str) -> str:
41 | return self.tok.tokenize(
42 | x,
43 | aggressive_dash_splits=(not self.args.moses_no_dash_splits),
44 | return_str=True,
45 | escape=(not self.args.moses_no_escape),
46 | )
47 |
48 | def decode(self, x: str) -> str:
49 | return self.detok.detokenize(x.split())
50 |
--------------------------------------------------------------------------------
/fairseq/data/data_utils_fast.pyx:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 |
8 | cimport cython
9 | cimport numpy as np
10 |
11 | DTYPE = np.int64
12 | ctypedef np.int64_t DTYPE_t
13 |
14 |
15 | cdef _is_batch_full(list batch, long num_tokens, long max_tokens, long max_sentences):
16 | if len(batch) == 0:
17 | return 0
18 | if len(batch) == max_sentences:
19 | return 1
20 | if num_tokens > max_tokens:
21 | return 1
22 | return 0
23 |
24 |
25 | @cython.cdivision(True)
26 | cpdef list batch_by_size_fast(
27 | np.ndarray[DTYPE_t, ndim=1] indices,
28 | num_tokens_fn,
29 | long max_tokens,
30 | long max_sentences,
31 | int bsz_mult,
32 | ):
33 | cdef long sample_len = 0
34 | cdef list sample_lens = []
35 | cdef list batch = []
36 | cdef list batches = []
37 | cdef long mod_len
38 | cdef long i
39 | cdef long idx
40 | cdef long num_tokens
41 | cdef DTYPE_t[:] indices_view = indices
42 |
43 | for i in range(len(indices_view)):
44 | idx = indices_view[i]
45 | num_tokens = num_tokens_fn(idx)
46 | sample_lens.append(num_tokens)
47 | sample_len = max(sample_len, num_tokens)
48 |
49 | assert sample_len <= max_tokens, (
50 | "sentence at index {} of size {} exceeds max_tokens "
51 | "limit of {}!".format(idx, sample_len, max_tokens)
52 | )
53 | num_tokens = (len(batch) + 1) * sample_len
54 |
55 | if _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
56 | mod_len = max(
57 | bsz_mult * (len(batch) // bsz_mult),
58 | len(batch) % bsz_mult,
59 | )
60 | batches.append(batch[:mod_len])
61 | batch = batch[mod_len:]
62 | sample_lens = sample_lens[mod_len:]
63 | sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
64 | batch.append(idx)
65 | if len(batch) > 0:
66 | batches.append(batch)
67 | return batches
68 |
--------------------------------------------------------------------------------
/fairseq/modules/mean_pool_gating_network.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 | import torch.nn.functional as F
8 |
9 |
10 | class MeanPoolGatingNetwork(torch.nn.Module):
11 | """A simple mean-pooling gating network for selecting experts.
12 |
13 | This module applies mean pooling over an encoder's output and returns
14 | reponsibilities for each expert. The encoder format is expected to match
15 | :class:`fairseq.models.transformer.TransformerEncoder`.
16 | """
17 |
18 | def __init__(self, embed_dim, num_experts, dropout=None):
19 | super().__init__()
20 | self.embed_dim = embed_dim
21 | self.num_experts = num_experts
22 |
23 | self.fc1 = torch.nn.Linear(embed_dim, embed_dim)
24 | self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None
25 | self.fc2 = torch.nn.Linear(embed_dim, num_experts)
26 |
27 | def forward(self, encoder_out):
28 | if not (
29 | isinstance(encoder_out, dict)
30 | and 'encoder_out' in encoder_out
31 | and 'encoder_padding_mask' in encoder_out
32 | and encoder_out['encoder_out'].size(2) == self.embed_dim
33 | ):
34 | raise ValueError('Unexpected format for encoder_out')
35 |
36 | # mean pooling over time
37 | encoder_padding_mask = encoder_out['encoder_padding_mask'] # B x T
38 | encoder_out = encoder_out['encoder_out'].transpose(0, 1) # B x T x C
39 | if encoder_padding_mask is not None:
40 | encoder_out = encoder_out.clone() # required because of transpose above
41 | encoder_out[encoder_padding_mask] = 0
42 | ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True)
43 | x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out)
44 | else:
45 | x = torch.mean(encoder_out, dim=1)
46 |
47 | x = torch.tanh(self.fc1(x))
48 | if self.dropout is not None:
49 | x = self.dropout(x)
50 | x = self.fc2(x)
51 | return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
52 |
--------------------------------------------------------------------------------
/fairseq/tasks/audio_pretraining.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import os
7 |
8 | from fairseq.data import FileAudioDataset
9 | from . import FairseqTask, register_task
10 |
11 |
12 | @register_task('audio_pretraining')
13 | class AudioPretrainingTask(FairseqTask):
14 | """
15 |
16 | """
17 |
18 | @staticmethod
19 | def add_args(parser):
20 | """Add task-specific arguments to the parser."""
21 | parser.add_argument('data', help='path to data directory')
22 | parser.add_argument('--sample-rate', default=16000, type=int,
23 | help='target sample rate. audio files will be up/down sampled to this rate')
24 | parser.add_argument('--max-sample-size', default=None, type=int,
25 | help='max sample size to crop to for batching. default = min sample length')
26 | parser.add_argument('--min-sample-size', default=None, type=int,
27 | help='min sample size to crop to for batching. default = same as --max-sample-size')
28 |
29 | def __init__(self, args):
30 | super().__init__(args)
31 |
32 | @classmethod
33 | def setup_task(cls, args, **kwargs):
34 | """Setup the task (e.g., load dictionaries).
35 |
36 | Args:
37 | args (argparse.Namespace): parsed command-line arguments
38 | """
39 | return cls(args)
40 |
41 | def load_dataset(self, split, **kwargs):
42 | """Load a given dataset split.
43 |
44 | Args:
45 | split (str): name of the split (e.g., train, valid, test)
46 | """
47 |
48 | manifest = os.path.join(self.args.data, '{}.tsv'.format(split))
49 | self.datasets[split] = FileAudioDataset(manifest,
50 | sample_rate=self.args.sample_rate,
51 | max_sample_size=self.args.max_sample_size,
52 | min_sample_size=self.args.min_sample_size)
53 |
54 | @property
55 | def target_dictionary(self):
56 | """Return the :class:`~fairseq.data.Dictionary` for the language
57 | model."""
58 | return None
59 |
--------------------------------------------------------------------------------
/fairseq/binarizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from collections import Counter
7 | import os
8 |
9 | from fairseq.tokenizer import tokenize_line
10 |
11 |
12 | def safe_readline(f):
13 | pos = f.tell()
14 | while True:
15 | try:
16 | return f.readline()
17 | except UnicodeDecodeError:
18 | pos -= 1
19 | f.seek(pos) # search where this character begins
20 |
21 |
22 | class Binarizer:
23 |
24 | @staticmethod
25 | def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False,
26 | offset=0, end=-1):
27 | nseq, ntok = 0, 0
28 | replaced = Counter()
29 |
30 | def replaced_consumer(word, idx):
31 | if idx == dict.unk_index and word != dict.unk_word:
32 | replaced.update([word])
33 |
34 | with open(filename, 'r', encoding='utf-8') as f:
35 | f.seek(offset)
36 | # next(f) breaks f.tell(), hence readline() must be used
37 | line = safe_readline(f)
38 | while line:
39 | if end > 0 and f.tell() > end:
40 | break
41 | ids = dict.encode_line(
42 | line=line,
43 | line_tokenizer=tokenize,
44 | add_if_not_exist=False,
45 | consumer=replaced_consumer,
46 | append_eos=append_eos,
47 | reverse_order=reverse_order,
48 | )
49 | nseq += 1
50 | ntok += len(ids)
51 | consumer(ids)
52 | line = f.readline()
53 | return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced}
54 |
55 | @staticmethod
56 | def find_offsets(filename, num_chunks):
57 | with open(filename, 'r', encoding='utf-8') as f:
58 | size = os.fstat(f.fileno()).st_size
59 | chunk_size = size // num_chunks
60 | offsets = [0 for _ in range(num_chunks + 1)]
61 | for i in range(1, num_chunks):
62 | f.seek(chunk_size * i)
63 | safe_readline(f)
64 | offsets[i] = f.tell()
65 | return offsets
66 |
--------------------------------------------------------------------------------
/fairseq/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .adaptive_input import AdaptiveInput
7 | from .adaptive_softmax import AdaptiveSoftmax
8 | from .beamable_mm import BeamableMM
9 | from .character_token_embedder import CharacterTokenEmbedder
10 | from .conv_tbc import ConvTBC
11 | from .downsampled_multihead_attention import DownsampledMultiHeadAttention
12 | from .dynamic_convolution import DynamicConv, DynamicConv1dTBC
13 | from .gelu import gelu, gelu_accurate
14 | from .grad_multiply import GradMultiply
15 | from .highway import Highway
16 | from .layer_norm import LayerNorm
17 | from .learned_positional_embedding import LearnedPositionalEmbedding
18 | from .lightweight_convolution import LightweightConv, LightweightConv1dTBC
19 | from .linearized_convolution import LinearizedConvolution
20 | from .logsumexp_moe import LogSumExpMoE
21 | from .mean_pool_gating_network import MeanPoolGatingNetwork
22 | from .multihead_attention import MultiheadAttention
23 | from .positional_embedding import PositionalEmbedding
24 | from .scalar_bias import ScalarBias
25 | from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
26 | from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer
27 | from .transformer_sentence_encoder import TransformerSentenceEncoder
28 | from .unfold import unfold1d
29 | from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer
30 | from .vggblock import VGGBlock
31 |
32 | __all__ = [
33 | 'AdaptiveInput',
34 | 'AdaptiveSoftmax',
35 | 'BeamableMM',
36 | 'CharacterTokenEmbedder',
37 | 'ConvTBC',
38 | 'DownsampledMultiHeadAttention',
39 | 'DynamicConv1dTBC',
40 | 'DynamicConv',
41 | 'gelu',
42 | 'gelu_accurate',
43 | 'GradMultiply',
44 | 'Highway',
45 | 'LayerNorm',
46 | 'LearnedPositionalEmbedding',
47 | 'LightweightConv1dTBC',
48 | 'LightweightConv',
49 | 'LinearizedConvolution',
50 | 'LogSumExpMoE',
51 | 'MeanPoolGatingNetwork',
52 | 'MultiheadAttention',
53 | 'PositionalEmbedding',
54 | 'ScalarBias',
55 | 'SinusoidalPositionalEmbedding',
56 | 'TransformerSentenceEncoderLayer',
57 | 'TransformerSentenceEncoder',
58 | 'TransformerDecoderLayer',
59 | 'TransformerEncoderLayer',
60 | 'VGGBlock',
61 | 'unfold1d',
62 | ]
63 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.optim.lr_scheduler
7 |
8 | from . import FairseqLRScheduler, register_lr_scheduler
9 |
10 |
11 | @register_lr_scheduler('reduce_lr_on_plateau')
12 | class ReduceLROnPlateau(FairseqLRScheduler):
13 | """Decay the LR by a factor every time the validation loss plateaus."""
14 |
15 | def __init__(self, args, optimizer):
16 | super().__init__(args, optimizer)
17 | if len(args.lr) > 1:
18 | raise ValueError(
19 | 'Cannot use a fixed learning rate schedule with reduce_lr_on_plateau.'
20 | ' Consider --lr-scheduler=fixed instead.'
21 | )
22 | self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
23 | self.optimizer.optimizer, patience=0, factor=args.lr_shrink,
24 | threshold=args.lr_threshold)
25 |
26 | @staticmethod
27 | def add_args(parser):
28 | """Add arguments to the parser for this LR scheduler."""
29 | # fmt: off
30 | parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',
31 | help='shrink factor for annealing, lr_new = (lr * lr_shrink)')
32 | parser.add_argument('--lr-threshold', default=1e-4, type=float, metavar='LT',
33 | help='Threshold for measuring the new optimum, \
34 | to only focus on significant changes')
35 | # fmt: on
36 |
37 | def state_dict(self):
38 | """Return the LR scheduler state dict."""
39 | return {
40 | 'best': self.lr_scheduler.best,
41 | 'last_epoch': self.lr_scheduler.last_epoch,
42 | }
43 |
44 | def load_state_dict(self, state_dict):
45 | """Load an LR scheduler state dict."""
46 | self.lr_scheduler.best = state_dict['best']
47 | if 'last_epoch' in state_dict:
48 | self.lr_scheduler.last_epoch = state_dict['last_epoch']
49 |
50 | def step(self, epoch, val_loss=None):
51 | """Update the learning rate at the end of the given epoch."""
52 | if val_loss is not None:
53 | self.lr_scheduler.step(val_loss, epoch)
54 | else:
55 | self.lr_scheduler.last_epoch = epoch
56 | return self.optimizer.get_lr()
57 |
--------------------------------------------------------------------------------
/fairseq/modules/lightconv_layer/lightconv_cuda.cuh:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | #include
9 | #include
10 |
11 | #include
12 | #include
13 |
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 |
21 | #include
22 | #include
23 |
24 | #define SHFL_MASK 0xffffffff
25 |
26 | template
27 | __global__
28 | void lightconv_forward_kernel(const scalar_t* input,
29 | const scalar_t* filters,
30 | int minibatch, int sequenceLength,
31 | int numFeatures, int numFiltersInBlock,
32 | scalar_t* output);
33 |
34 | template
35 | __global__
36 | void lightconv_grad_wrt_input_kernel(
37 | const scalar_t* input,
38 | const scalar_t* filters,
39 | int minibatch,
40 | int sequenceLength,
41 | int numFeatures,
42 | int numFiltersInBlock,
43 | scalar_t* output);
44 |
45 | template
46 | __global__
47 | void lightconv_grad_wrt_weights_firstpass_short_kernel(
48 | const scalar_t* input,
49 | const scalar_t* gradInput,
50 | int minibatch,
51 | int sequenceLength,
52 | int numFeatures,
53 | int numFiltersInBlock,
54 | int numHeads,
55 | float* output);
56 |
57 | template
58 | __global__
59 | void lightconv_grad_wrt_weights_secondpass_short_kernel(
60 | const float* input,
61 | const int minibatch,
62 | const int numFiltersInBlock,
63 | scalar_t* output);
64 |
65 | template
66 | __global__
67 | void lightconv_grad_wrt_weights_firstpass_kernel(
68 | const scalar_t* input,
69 | const scalar_t* gradInput,
70 | int minibatch,
71 | int sequenceLength,
72 | int numFeatures,
73 | int numFiltersInBlock,
74 | float* output);
75 |
76 | template
77 | __global__
78 | void lightconv_grad_wrt_weights_secondpass_kernel(
79 | const float* input,
80 | const int minibatch,
81 | const int numFiltersInBlock,
82 | scalar_t* output);
83 |
84 |
--------------------------------------------------------------------------------
/fairseq/modules/adaptive_input.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | import torch
8 | from torch import nn
9 |
10 | from typing import List
11 |
12 |
13 | class AdaptiveInput(nn.Module):
14 |
15 | def __init__(
16 | self,
17 | vocab_size: int,
18 | padding_idx: int,
19 | initial_dim: int,
20 | factor: float,
21 | output_dim: int,
22 | cutoff: List[int],
23 | ):
24 | super().__init__()
25 |
26 | if vocab_size > cutoff[-1]:
27 | cutoff = cutoff + [vocab_size]
28 | else:
29 | assert vocab_size == cutoff[
30 | -1], 'cannot specify cutoff larger than vocab size'
31 |
32 | self.cutoff = cutoff
33 | self.embedding_dim = output_dim
34 | self.padding_idx = padding_idx
35 |
36 | self.embeddings = nn.ModuleList()
37 | for i in range(len(self.cutoff)):
38 | prev = self.cutoff[i - 1] if i > 0 else 0
39 | size = self.cutoff[i] - prev
40 | dim = int(initial_dim // (factor ** i))
41 | seq = nn.Sequential(
42 | nn.Embedding(size, dim, padding_idx),
43 | nn.Linear(dim, output_dim, bias=False)
44 | )
45 | self.embeddings.append(seq)
46 |
47 | def init_weights(m):
48 | if isinstance(m, nn.Embedding):
49 | nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5)
50 | nn.init.constant_(m.weight[padding_idx], 0)
51 | elif hasattr(m, 'weight'):
52 | nn.init.xavier_uniform_(m.weight)
53 |
54 | self.apply(init_weights)
55 |
56 | self.register_buffer('_float_tensor', torch.FloatTensor(1))
57 |
58 | def weights_for_band(self, band: int):
59 | return self.embeddings[band][0].weight, self.embeddings[band][1].weight
60 |
61 | def forward(self, input: torch.Tensor):
62 | result = self._float_tensor.new(input.shape + (self.embedding_dim,))
63 | for i in range(len(self.cutoff)):
64 | mask = input.lt(self.cutoff[i])
65 | if i > 0:
66 | mask.mul_(input.ge(self.cutoff[i - 1]))
67 | chunk_input = input[mask] - self.cutoff[i - 1]
68 | else:
69 | chunk_input = input[mask]
70 | if mask.any():
71 | result[mask] = self.embeddings[i](chunk_input)
72 | return result
73 |
--------------------------------------------------------------------------------
/fairseq/criterions/masked_lm.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import math
7 |
8 | import torch
9 | import torch.nn.functional as F
10 |
11 | from fairseq import utils
12 |
13 | from . import FairseqCriterion, register_criterion
14 |
15 |
16 | @register_criterion('masked_lm')
17 | class MaskedLmLoss(FairseqCriterion):
18 | """
19 | Implementation for the loss used in masked language model (MLM) training.
20 | """
21 |
22 | def __init__(self, args, task):
23 | super().__init__(args, task)
24 |
25 | def forward(self, model, sample, reduce=True):
26 | """Compute the loss for the given sample.
27 | Returns a tuple with three elements:
28 | 1) the loss
29 | 2) the sample size, which is used as the denominator for the gradient
30 | 3) logging outputs to display while training
31 | """
32 | # compute MLM loss
33 | logits = model(**sample['net_input'], return_all_hiddens=False)[0]
34 | targets = model.get_targets(sample, [logits])
35 | loss = F.nll_loss(
36 | F.log_softmax(
37 | logits.view(-1, logits.size(-1)),
38 | dim=-1,
39 | dtype=torch.float32,
40 | ),
41 | targets.view(-1),
42 | reduction='sum',
43 | ignore_index=self.padding_idx,
44 | )
45 |
46 | sample_size = targets.ne(self.padding_idx).int().sum().item()
47 |
48 | logging_output = {
49 | 'loss': utils.item(loss.data) if reduce else loss.data,
50 | 'ntokens': sample['ntokens'],
51 | 'nsentences': sample['nsentences'],
52 | 'sample_size': sample_size,
53 | }
54 | return loss, sample_size, logging_output
55 |
56 | @staticmethod
57 | def aggregate_logging_outputs(logging_outputs):
58 | """Aggregate logging outputs from data parallel training."""
59 | loss = sum(log.get('loss', 0) for log in logging_outputs)
60 | ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
61 | nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
62 | sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
63 |
64 | agg_output = {
65 | 'loss': loss / sample_size / math.log(2),
66 | 'ntokens': ntokens,
67 | 'nsentences': nsentences,
68 | 'sample_size': sample_size,
69 | }
70 | return agg_output
71 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/fixed_schedule.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import FairseqLRScheduler, register_lr_scheduler
7 |
8 |
9 | @register_lr_scheduler('fixed')
10 | class FixedSchedule(FairseqLRScheduler):
11 | """Decay the LR on a fixed schedule."""
12 |
13 | def __init__(self, args, optimizer):
14 | super().__init__(args, optimizer)
15 |
16 | # set defaults
17 | args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0
18 |
19 | self.lr = args.lr[0]
20 | if args.warmup_updates > 0:
21 | self.warmup_factor = 1. / args.warmup_updates
22 | else:
23 | self.warmup_factor = 1
24 |
25 | @staticmethod
26 | def add_args(parser):
27 | """Add arguments to the parser for this LR scheduler."""
28 | # fmt: off
29 | parser.add_argument('--force-anneal', '--fa', type=int, metavar='N',
30 | help='force annealing at specified epoch')
31 | parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',
32 | help='shrink factor for annealing, lr_new = (lr * lr_shrink)')
33 | parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
34 | help='warmup the learning rate linearly for the first N updates')
35 | # fmt: on
36 |
37 | def get_next_lr(self, epoch):
38 | lrs = self.args.lr
39 | if self.args.force_anneal is None or epoch < self.args.force_anneal:
40 | # use fixed LR schedule
41 | next_lr = lrs[min(epoch, len(lrs) - 1)]
42 | else:
43 | # annneal based on lr_shrink
44 | next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal)
45 | return next_lr
46 |
47 | def step(self, epoch, val_loss=None):
48 | """Update the learning rate at the end of the given epoch."""
49 | super().step(epoch, val_loss)
50 | self.lr = self.get_next_lr(epoch)
51 | self.optimizer.set_lr(self.warmup_factor * self.lr)
52 | return self.optimizer.get_lr()
53 |
54 | def step_update(self, num_updates):
55 | """Update the learning rate after each update."""
56 | if self.args.warmup_updates > 0 and num_updates <= self.args.warmup_updates:
57 | self.warmup_factor = num_updates / float(self.args.warmup_updates)
58 | self.optimizer.set_lr(self.warmup_factor * self.lr)
59 | return self.optimizer.get_lr()
60 |
--------------------------------------------------------------------------------
/fairseq/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import argparse
7 | import importlib
8 | import os
9 |
10 | from .fairseq_task import FairseqTask
11 |
12 | TASK_REGISTRY = {}
13 | TASK_CLASS_NAMES = set()
14 |
15 |
16 | def setup_task(args, **kwargs):
17 | return TASK_REGISTRY[args.task].setup_task(args, **kwargs)
18 |
19 |
20 | def register_task(name):
21 | """
22 | New tasks can be added to fairseq with the
23 | :func:`~fairseq.tasks.register_task` function decorator.
24 |
25 | For example::
26 |
27 | @register_task('classification')
28 | class ClassificationTask(FairseqTask):
29 | (...)
30 |
31 | .. note::
32 |
33 | All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
34 | interface.
35 |
36 | Please see the
37 |
38 | Args:
39 | name (str): the name of the task
40 | """
41 |
42 | def register_task_cls(cls):
43 | if name in TASK_REGISTRY:
44 | raise ValueError('Cannot register duplicate task ({})'.format(name))
45 | if not issubclass(cls, FairseqTask):
46 | raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
47 | if cls.__name__ in TASK_CLASS_NAMES:
48 | raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
49 | TASK_REGISTRY[name] = cls
50 | TASK_CLASS_NAMES.add(cls.__name__)
51 | return cls
52 |
53 | return register_task_cls
54 |
55 |
56 | # automatically import any Python files in the tasks/ directory
57 | for file in os.listdir(os.path.dirname(__file__)):
58 | if file.endswith('.py') and not file.startswith('_'):
59 | task_name = file[:file.find('.py')]
60 | importlib.import_module('fairseq.tasks.' + task_name)
61 |
62 | # expose `task_parser` for sphinx
63 | if task_name in TASK_REGISTRY:
64 | parser = argparse.ArgumentParser(add_help=False)
65 | group_task = parser.add_argument_group('Task name')
66 | # fmt: off
67 | group_task.add_argument('--task', metavar=task_name,
68 | help='Enable this task with: ``--task=' + task_name + '``')
69 | # fmt: on
70 | group_args = parser.add_argument_group('Additional command-line arguments')
71 | TASK_REGISTRY[task_name].add_args(group_args)
72 | globals()[task_name + '_parser'] = parser
73 |
74 |
75 | def get_task(name):
76 | return TASK_REGISTRY[name]
77 |
--------------------------------------------------------------------------------
/fairseq/models/distributed_fairseq_model.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import inspect
7 |
8 | import torch.nn as nn
9 |
10 | from fairseq.legacy_distributed_data_parallel import LegacyDistributedDataParallel
11 | from fairseq.models import BaseFairseqModel
12 |
13 |
14 | def DistributedFairseqModel(args, model):
15 | """
16 | Wrap a *model* to support distributed data parallel training.
17 |
18 | This is similar to the built-in DistributedDataParallel, but allows
19 | additional configuration of the DistributedDataParallel class to
20 | use, and also provides easier access to the wrapped model by
21 | forwarding requests for missing attributes to the wrapped model.
22 |
23 | Args:
24 | args (argparse.Namespace): fairseq args
25 | model (BaseFairseqModel): model to wrap
26 | """
27 | # determine which DDP class to extend
28 | assert isinstance(model, nn.Module)
29 | if args.ddp_backend == 'c10d':
30 | ddp_class = nn.parallel.DistributedDataParallel
31 | init_kwargs = dict(
32 | module=model,
33 | device_ids=[args.device_id],
34 | output_device=args.device_id,
35 | broadcast_buffers=False,
36 | bucket_cap_mb=args.bucket_cap_mb,
37 | )
38 | # Maintain backward compatibility
39 | if 'check_reduction' in inspect.getargspec(ddp_class)[0]:
40 | init_kwargs['check_reduction'] = True
41 | if 'find_unused_parameters' in inspect.getargspec(ddp_class)[0]:
42 | init_kwargs['find_unused_parameters'] = args.find_unused_parameters
43 | elif args.ddp_backend == 'no_c10d':
44 | ddp_class = LegacyDistributedDataParallel
45 | init_kwargs = dict(
46 | module=model,
47 | world_size=args.distributed_world_size,
48 | buffer_size=2**28,
49 | )
50 | else:
51 | raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend)
52 |
53 | class _DistributedFairseqModel(ddp_class):
54 | """Extend DistributedDataParallel to check for missing
55 | attributes in the wrapped module."""
56 |
57 | def __init__(self, *args, **kwargs):
58 | super().__init__(*args, **kwargs)
59 |
60 | def __getattr__(self, name):
61 | wrapped_module = super().__getattr__('module')
62 | if hasattr(wrapped_module, name):
63 | return getattr(wrapped_module, name)
64 | return super().__getattr__(name)
65 |
66 | return _DistributedFairseqModel(**init_kwargs)
67 |
--------------------------------------------------------------------------------
/fairseq/criterions/binary_cross_entropy.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import math
7 | import torch
8 | import torch.nn.functional as F
9 |
10 | from fairseq import utils
11 |
12 | from . import FairseqCriterion, register_criterion
13 |
14 |
15 | @register_criterion('binary_cross_entropy')
16 | class BinaryCrossEntropyCriterion(FairseqCriterion):
17 |
18 | def __init__(self, args, task):
19 | super().__init__(args, task)
20 |
21 | def forward(self, model, sample, reduce=True):
22 | """Compute the loss for the given sample.
23 |
24 | Returns a tuple with three elements:
25 | 1) the loss
26 | 2) the sample size, which is used as the denominator for the gradient
27 | 3) logging outputs to display while training
28 | """
29 | net_output = model(**sample['net_input'])
30 | logits = model.get_logits(net_output).float()
31 | target = model.get_targets(sample, net_output, expand_steps=False).float()
32 |
33 | if hasattr(model, 'get_target_weights'):
34 | weights = model.get_target_weights(target, net_output)
35 | if torch.is_tensor(weights):
36 | weights = weights.float()
37 | else:
38 | weights = 1.
39 |
40 | loss = F.binary_cross_entropy_with_logits(logits, target, reduce=False)
41 |
42 | loss = loss * weights
43 |
44 | if reduce:
45 | loss = loss.sum()
46 |
47 | sample_size = target.numel()
48 | logging_output = {
49 | 'loss': utils.item(loss.data) if reduce else loss.data,
50 | 'ntokens': sample_size,
51 | 'nsentences': logits.size(0),
52 | 'sample_size': sample_size,
53 | }
54 | return loss, sample_size, logging_output
55 |
56 | @staticmethod
57 | def aggregate_logging_outputs(logging_outputs):
58 | """Aggregate logging outputs from data parallel training."""
59 | loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
60 | ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
61 | nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
62 | sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
63 | agg_output = {
64 | 'loss': loss_sum / sample_size / math.log(2),
65 | 'ntokens': ntokens,
66 | 'nsentences': nsentences,
67 | 'sample_size': sample_size,
68 | }
69 | if sample_size != ntokens:
70 | agg_output['nll_loss'] = loss_sum / ntokens / math.log(2)
71 | return agg_output
72 |
--------------------------------------------------------------------------------
/fairseq/criterions/cross_entropy.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import math
7 | import torch.nn.functional as F
8 |
9 | from fairseq import utils
10 |
11 | from . import FairseqCriterion, register_criterion
12 |
13 |
14 | @register_criterion('cross_entropy')
15 | class CrossEntropyCriterion(FairseqCriterion):
16 |
17 | def __init__(self, args, task):
18 | super().__init__(args, task)
19 |
20 | def forward(self, model, sample, reduce=True):
21 | """Compute the loss for the given sample.
22 |
23 | Returns a tuple with three elements:
24 | 1) the loss
25 | 2) the sample size, which is used as the denominator for the gradient
26 | 3) logging outputs to display while training
27 | """
28 | net_output = model(**sample['net_input'])
29 | loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)
30 | sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
31 | logging_output = {
32 | 'loss': utils.item(loss.data) if reduce else loss.data,
33 | 'ntokens': sample['ntokens'],
34 | 'nsentences': sample['target'].size(0),
35 | 'sample_size': sample_size,
36 | }
37 | return loss, sample_size, logging_output
38 |
39 | def compute_loss(self, model, net_output, sample, reduce=True):
40 | lprobs = model.get_normalized_probs(net_output, log_probs=True)
41 | lprobs = lprobs.view(-1, lprobs.size(-1))
42 | target = model.get_targets(sample, net_output).view(-1)
43 | loss = F.nll_loss(
44 | lprobs,
45 | target,
46 | ignore_index=self.padding_idx,
47 | reduction='sum' if reduce else 'none',
48 | )
49 | return loss, loss
50 |
51 | @staticmethod
52 | def aggregate_logging_outputs(logging_outputs):
53 | """Aggregate logging outputs from data parallel training."""
54 | loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
55 | ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
56 | nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
57 | sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
58 | agg_output = {
59 | 'loss': loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.,
60 | 'ntokens': ntokens,
61 | 'nsentences': nsentences,
62 | 'sample_size': sample_size,
63 | }
64 | if sample_size != ntokens:
65 | agg_output['nll_loss'] = loss_sum / ntokens / math.log(2)
66 | return agg_output
67 |
--------------------------------------------------------------------------------
/fairseq/data/transform_eos_lang_pair_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | from . import FairseqDataset
8 | from typing import Optional
9 |
10 |
11 | class TransformEosLangPairDataset(FairseqDataset):
12 | """A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on
13 | collated samples of language pair dataset.
14 |
15 | Note that the transformation is applied in :func:`collater`.
16 |
17 | Args:
18 | dataset (~fairseq.data.FairseqDataset): dataset that collates sample into
19 | LanguagePairDataset schema
20 | src_eos (int): original source end-of-sentence symbol index to be replaced
21 | new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol
22 | tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced
23 | new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the
24 | beginning of 'prev_output_tokens'
25 | """
26 |
27 | def __init__(
28 | self,
29 | dataset: FairseqDataset,
30 | src_eos: int,
31 | new_src_eos: Optional[int] = None,
32 | tgt_bos: Optional[int] = None,
33 | new_tgt_bos: Optional[int] = None,
34 | ):
35 | self.dataset = dataset
36 | self.src_eos = src_eos
37 | self.new_src_eos = new_src_eos
38 | self.tgt_bos = tgt_bos
39 | self.new_tgt_bos = new_tgt_bos
40 |
41 | def __getitem__(self, index):
42 | return self.dataset[index]
43 |
44 | def __len__(self):
45 | return len(self.dataset)
46 |
47 | def collater(self, samples):
48 | samples = self.dataset.collater(samples)
49 |
50 | # TODO: support different padding direction
51 | if self.new_src_eos is not None:
52 | assert(samples['net_input']['src_tokens'][:, -1] != self.src_eos).sum() == 0
53 | samples['net_input']['src_tokens'][:, -1] = self.new_src_eos
54 |
55 | if self.new_tgt_bos is not None:
56 | assert (samples['net_input']['prev_output_tokens'][:, 0] != self.tgt_bos).sum() == 0
57 | samples['net_input']['prev_output_tokens'][:, 0] = self.new_tgt_bos
58 |
59 | return samples
60 |
61 | def num_tokens(self, index):
62 | return self.dataset.num_tokens(index)
63 |
64 | def size(self, index):
65 | return self.dataset.size(index)
66 |
67 | def ordered_indices(self):
68 | return self.dataset.ordered_indices()
69 |
70 | @property
71 | def supports_prefetch(self):
72 | return getattr(self.dataset, 'supports_prefetch', False)
73 |
74 | def prefetch(self, indices):
75 | return self.dataset.prefetch(indices)
76 |
--------------------------------------------------------------------------------
/fairseq/registry.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import argparse
7 |
8 |
9 | REGISTRIES = {}
10 |
11 |
12 | def setup_registry(
13 | registry_name: str,
14 | base_class=None,
15 | default=None,
16 | ):
17 | assert registry_name.startswith('--')
18 | registry_name = registry_name[2:].replace('-', '_')
19 |
20 | REGISTRY = {}
21 | REGISTRY_CLASS_NAMES = set()
22 |
23 | # maintain a registry of all registries
24 | if registry_name in REGISTRIES:
25 | return # registry already exists
26 | REGISTRIES[registry_name] = {
27 | 'registry': REGISTRY,
28 | 'default': default,
29 | }
30 |
31 | def build_x(args, *extra_args, **extra_kwargs):
32 | choice = getattr(args, registry_name, None)
33 | if choice is None:
34 | return None
35 | cls = REGISTRY[choice]
36 | if hasattr(cls, 'build_' + registry_name):
37 | builder = getattr(cls, 'build_' + registry_name)
38 | else:
39 | builder = cls
40 | set_defaults(args, cls)
41 | return builder(args, *extra_args, **extra_kwargs)
42 |
43 | def register_x(name):
44 |
45 | def register_x_cls(cls):
46 | if name in REGISTRY:
47 | raise ValueError('Cannot register duplicate {} ({})'.format(registry_name, name))
48 | if cls.__name__ in REGISTRY_CLASS_NAMES:
49 | raise ValueError(
50 | 'Cannot register {} with duplicate class name ({})'.format(
51 | registry_name, cls.__name__,
52 | )
53 | )
54 | if base_class is not None and not issubclass(cls, base_class):
55 | raise ValueError('{} must extend {}'.format(cls.__name__, base_class.__name__))
56 | REGISTRY[name] = cls
57 | REGISTRY_CLASS_NAMES.add(cls.__name__)
58 | return cls
59 |
60 | return register_x_cls
61 |
62 | return build_x, register_x, REGISTRY
63 |
64 |
65 | def set_defaults(args, cls):
66 | """Helper to set default arguments based on *add_args*."""
67 | if not hasattr(cls, 'add_args'):
68 | return
69 | parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, allow_abbrev=False)
70 | cls.add_args(parser)
71 | # copied from argparse.py:
72 | defaults = argparse.Namespace()
73 | for action in parser._actions:
74 | if action.dest is not argparse.SUPPRESS:
75 | if not hasattr(defaults, action.dest):
76 | if action.default is not argparse.SUPPRESS:
77 | setattr(defaults, action.dest, action.default)
78 | for key, default_value in vars(defaults).items():
79 | if not hasattr(args, key):
80 | setattr(args, key, default_value)
81 |
--------------------------------------------------------------------------------
/fairseq/data/plasma_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import subprocess
7 | import tempfile
8 |
9 |
10 | class PlasmaArray(object):
11 | """
12 | Wrapper around numpy arrays that automatically moves the data to shared
13 | memory upon serialization. This is particularly helpful when passing numpy
14 | arrays through multiprocessing, so that data is not unnecessarily
15 | duplicated or pickled.
16 | """
17 |
18 | def __init__(self, array):
19 | super().__init__()
20 | self.array = array
21 | self.disable = array.nbytes < 134217728 # disable for arrays <128MB
22 | self.object_id = None
23 | self.path = None
24 |
25 | # variables with underscores shouldn't be pickled
26 | self._client = None
27 | self._server = None
28 | self._server_tmp = None
29 | self._plasma = None
30 |
31 | @property
32 | def plasma(self):
33 | if self._plasma is None and not self.disable:
34 | try:
35 | import pyarrow.plasma as plasma
36 | self._plasma = plasma
37 | except ImportError:
38 | self._plasma = None
39 | return self._plasma
40 |
41 | def start_server(self):
42 | if self.plasma is None or self._server is not None:
43 | return
44 | assert self.object_id is None
45 | assert self.path is None
46 | self._server_tmp = tempfile.NamedTemporaryFile()
47 | self.path = self._server_tmp.name
48 | self._server = subprocess.Popen([
49 | 'plasma_store',
50 | '-m', str(int(1.05 * self.array.nbytes)),
51 | '-s', self.path,
52 | ])
53 |
54 | @property
55 | def client(self):
56 | if self._client is None:
57 | assert self.path is not None
58 | self._client = self.plasma.connect(self.path)
59 | return self._client
60 |
61 | def __getstate__(self):
62 | if self.plasma is None:
63 | return self.__dict__
64 | if self.object_id is None:
65 | self.start_server()
66 | self.object_id = self.client.put(self.array)
67 | state = self.__dict__.copy()
68 | del state['array']
69 | state['_client'] = None
70 | state['_server'] = None
71 | state['_server_tmp'] = None
72 | state['_plasma'] = None
73 | return state
74 |
75 | def __setstate__(self, state):
76 | self.__dict__.update(state)
77 | if self.plasma is None:
78 | return
79 | self.array = self.client.get(self.object_id)
80 |
81 | def __del__(self):
82 | if self._server is not None:
83 | self._server.kill()
84 | self._server = None
85 | self._server_tmp.close()
86 | self._server_tmp = None
87 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import math
7 |
8 | from . import FairseqLRScheduler, register_lr_scheduler
9 |
10 |
11 | @register_lr_scheduler('triangular')
12 | class TriangularSchedule(FairseqLRScheduler):
13 | """Assign LR based on a triangular cyclical schedule.
14 |
15 | See https://arxiv.org/pdf/1506.01186.pdf for details.
16 | """
17 |
18 | def __init__(self, args, optimizer):
19 | super().__init__(args, optimizer)
20 | if len(args.lr) > 1:
21 | raise ValueError(
22 | 'Cannot use a fixed learning rate schedule with triangular.'
23 | ' Consider --lr-scheduler=fixed instead.'
24 | )
25 |
26 | lr = args.lr[0]
27 |
28 | assert args.max_lr > lr, 'max_lr must be more than lr'
29 | self.min_lr = lr
30 | self.max_lr = args.max_lr
31 | self.stepsize = args.lr_period_updates // 2
32 | self.lr_shrink = args.lr_shrink
33 | self.shrink_min = args.shrink_min
34 |
35 | # initial learning rate
36 | self.lr = self.min_lr
37 | self.optimizer.set_lr(self.lr)
38 |
39 | @staticmethod
40 | def add_args(parser):
41 | """Add arguments to the parser for this LR scheduler."""
42 | # fmt: off
43 | parser.add_argument('--max-lr', required=True, type=float, metavar='LR',
44 | help='max learning rate, must be more than args.lr')
45 | parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR',
46 | help='initial number of updates per period (cycle length)')
47 | parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',
48 | help='shrink factor for annealing')
49 | parser.add_argument('--shrink-min', action='store_true',
50 | help='if set, also shrinks min lr')
51 | # fmt: on
52 |
53 | def step(self, epoch, val_loss=None):
54 | """Update the learning rate at the end of the given epoch."""
55 | super().step(epoch, val_loss)
56 | # we don't change the learning rate at epoch boundaries
57 | return self.optimizer.get_lr()
58 |
59 | def step_update(self, num_updates):
60 | """Update the learning rate after each update."""
61 | cycle = math.floor(num_updates / (2 * self.stepsize))
62 |
63 | lr_shrink = self.lr_shrink ** cycle
64 | max_lr = self.max_lr * lr_shrink
65 | if self.shrink_min:
66 | min_lr = self.min_lr * lr_shrink
67 | else:
68 | min_lr = self.min_lr
69 |
70 | x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1)
71 | self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x))
72 |
73 | self.optimizer.set_lr(self.lr)
74 | return self.lr
75 |
--------------------------------------------------------------------------------
/fairseq/models/fairseq_decoder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.nn as nn
7 |
8 | from fairseq import utils
9 |
10 |
11 | class FairseqDecoder(nn.Module):
12 | """Base class for decoders."""
13 |
14 | def __init__(self, dictionary):
15 | super().__init__()
16 | self.dictionary = dictionary
17 | self.onnx_trace = False
18 |
19 | def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
20 | """
21 | Args:
22 | prev_output_tokens (LongTensor): shifted output tokens of shape
23 | `(batch, tgt_len)`, for teacher forcing
24 | encoder_out (dict, optional): output from the encoder, used for
25 | encoder-side attention
26 |
27 | Returns:
28 | tuple:
29 | - the decoder's output of shape `(batch, tgt_len, vocab)`
30 | - a dictionary with any model-specific outputs
31 | """
32 | x, extra = self.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs)
33 | x = self.output_layer(x)
34 | return x, extra
35 |
36 | def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
37 | """
38 | Returns:
39 | tuple:
40 | - the decoder's features of shape `(batch, tgt_len, embed_dim)`
41 | - a dictionary with any model-specific outputs
42 | """
43 | raise NotImplementedError
44 |
45 | def output_layer(self, features, **kwargs):
46 | """
47 | Project features to the default output size, e.g., vocabulary size.
48 |
49 | Args:
50 | features (Tensor): features returned by *extract_features*.
51 | """
52 | raise NotImplementedError
53 |
54 | def get_normalized_probs(self, net_output, log_probs, sample):
55 | """Get normalized probabilities (or log probs) from a net's output."""
56 |
57 | if hasattr(self, 'adaptive_softmax') and self.adaptive_softmax is not None:
58 | if sample is not None:
59 | assert 'target' in sample
60 | target = sample['target']
61 | else:
62 | target = None
63 | out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
64 | return out.exp_() if not log_probs else out
65 |
66 | logits = net_output[0]
67 | if log_probs:
68 | return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
69 | else:
70 | return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
71 |
72 | def max_positions(self):
73 | """Maximum input length supported by the decoder."""
74 | return 1e6 # an arbitrary large number
75 |
76 | def upgrade_state_dict(self, state_dict):
77 | """Upgrade a (possibly old) state dict for new versions of fairseq."""
78 | return state_dict
79 |
80 | def prepare_for_onnx_export_(self):
81 | self.onnx_trace = True
82 |
--------------------------------------------------------------------------------
/fairseq/data/lm_context_window_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch
8 |
9 | from fairseq.data.monolingual_dataset import MonolingualDataset
10 |
11 | from . import FairseqDataset
12 |
13 |
14 | class LMContextWindowDataset(FairseqDataset):
15 | """Wraps a MonolingualDataset and provides more context for evaluation."""
16 |
17 | def __init__(self, dataset, tokens_per_sample, context_window, pad_idx):
18 | assert isinstance(dataset, MonolingualDataset)
19 | assert context_window > 0
20 | self.dataset = dataset
21 | self.tokens_per_sample = tokens_per_sample
22 | self.context_window = context_window
23 | self.pad_idx = pad_idx
24 | self.prev_tokens = np.empty([0])
25 |
26 | def __getitem__(self, index):
27 | return self.dataset[index]
28 |
29 | def __len__(self):
30 | return len(self.dataset)
31 |
32 | def collater(self, samples):
33 | sample = self.dataset.collater(samples)
34 |
35 | pad = self.pad_idx
36 | max_sample_len = self.tokens_per_sample + self.context_window
37 |
38 | bsz, tsz = sample['net_input']['src_tokens'].shape
39 | start_idxs = [0] * bsz
40 | toks = sample['net_input']['src_tokens']
41 | lengths = sample['net_input']['src_lengths']
42 | tgt = sample['target']
43 | new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64)
44 | new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64)
45 | sample_lens = toks.ne(pad).long().sum(dim=1).cpu()
46 | for i in range(bsz):
47 | sample_len = sample_lens[i]
48 | extra = len(self.prev_tokens) + sample_len - max_sample_len
49 | if extra > 0:
50 | self.prev_tokens = self.prev_tokens[extra:]
51 | pads = np.full(self.context_window - len(self.prev_tokens), pad)
52 | new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads])
53 | new_tgt[i, len(self.prev_tokens):len(self.prev_tokens) + len(tgt[i])] = tgt[i]
54 | start_idxs[i] = len(self.prev_tokens)
55 | lengths[i] += len(self.prev_tokens)
56 | self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window:]
57 | sample['net_input']['src_tokens'] = torch.from_numpy(new_toks)
58 | sample['target'] = torch.from_numpy(new_tgt)
59 | sample['start_indices'] = start_idxs
60 |
61 | return sample
62 |
63 | def num_tokens(self, index):
64 | return self.dataset.num_tokens(index)
65 |
66 | def size(self, index):
67 | return self.dataset.size(index)
68 |
69 | def ordered_indices(self):
70 | # NOTE we don't shuffle the data to retain access to the previous dataset elements
71 | return np.arange(len(self.dataset))
72 |
73 | @property
74 | def supports_prefetch(self):
75 | return getattr(self.dataset, 'supports_prefetch', False)
76 |
77 | def prefetch(self, indices):
78 | return self.dataset.prefetch(indices)
79 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import FairseqLRScheduler, register_lr_scheduler
7 |
8 |
9 | @register_lr_scheduler('inverse_sqrt')
10 | class InverseSquareRootSchedule(FairseqLRScheduler):
11 | """Decay the LR based on the inverse square root of the update number.
12 |
13 | We also support a warmup phase where we linearly increase the learning rate
14 | from some initial learning rate (``--warmup-init-lr``) until the configured
15 | learning rate (``--lr``). Thereafter we decay proportional to the number of
16 | updates, with a decay factor set to align with the configured learning rate.
17 |
18 | During warmup::
19 |
20 | lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
21 | lr = lrs[update_num]
22 |
23 | After warmup::
24 |
25 | decay_factor = args.lr * sqrt(args.warmup_updates)
26 | lr = decay_factor / sqrt(update_num)
27 | """
28 |
29 | def __init__(self, args, optimizer):
30 | super().__init__(args, optimizer)
31 | if len(args.lr) > 1:
32 | raise ValueError(
33 | 'Cannot use a fixed learning rate schedule with inverse_sqrt.'
34 | ' Consider --lr-scheduler=fixed instead.'
35 | )
36 | warmup_end_lr = args.lr[0]
37 | if args.warmup_init_lr < 0:
38 | args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr
39 |
40 | # linearly warmup for the first args.warmup_updates
41 | self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
42 |
43 | # then, decay prop. to the inverse square root of the update number
44 | self.decay_factor = warmup_end_lr * args.warmup_updates**0.5
45 |
46 | # initial learning rate
47 | self.lr = args.warmup_init_lr
48 | self.optimizer.set_lr(self.lr)
49 |
50 | @staticmethod
51 | def add_args(parser):
52 | """Add arguments to the parser for this LR scheduler."""
53 | # fmt: off
54 | parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N',
55 | help='warmup the learning rate linearly for the first N updates')
56 | parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
57 | help='initial learning rate during warmup phase; default is args.lr')
58 | # fmt: on
59 |
60 | def step(self, epoch, val_loss=None):
61 | """Update the learning rate at the end of the given epoch."""
62 | super().step(epoch, val_loss)
63 | # we don't change the learning rate at epoch boundaries
64 | return self.optimizer.get_lr()
65 |
66 | def step_update(self, num_updates):
67 | """Update the learning rate after each update."""
68 | if num_updates < self.args.warmup_updates:
69 | self.lr = self.args.warmup_init_lr + num_updates*self.lr_step
70 | else:
71 | self.lr = self.decay_factor * num_updates**-0.5
72 | self.optimizer.set_lr(self.lr)
73 | return self.lr
74 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import FairseqLRScheduler, register_lr_scheduler
7 |
8 |
9 | @register_lr_scheduler('polynomial_decay')
10 | class PolynomialDecaySchedule(FairseqLRScheduler):
11 | """Decay the LR on a fixed schedule."""
12 |
13 | def __init__(self, args, optimizer):
14 | super().__init__(args, optimizer)
15 |
16 | # set defaults
17 | args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0
18 |
19 | self.lr = args.lr[0]
20 | if args.warmup_updates > 0:
21 | self.warmup_factor = 1. / args.warmup_updates
22 | else:
23 | self.warmup_factor = 1
24 | self.end_learning_rate = args.end_learning_rate
25 | self.total_num_update = args.total_num_update
26 | self.power = args.power
27 | self.optimizer.set_lr(self.warmup_factor * self.lr)
28 |
29 | @staticmethod
30 | def add_args(parser):
31 | """Add arguments to the parser for this LR scheduler."""
32 | parser.add_argument('--force-anneal', '--fa', type=int, metavar='N',
33 | help='force annealing at specified epoch')
34 | parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
35 | help='warmup the learning rate linearly for the first N updates')
36 | parser.add_argument('--end-learning-rate', default=0.0, type=float)
37 | parser.add_argument('--power', default=1.0, type=float)
38 | parser.add_argument('--total-num-update', default=1000000, type=int)
39 |
40 | def get_next_lr(self, epoch):
41 | lrs = self.args.lr
42 | if self.args.force_anneal is None or epoch < self.args.force_anneal:
43 | # use fixed LR schedule
44 | next_lr = lrs[min(epoch, len(lrs) - 1)]
45 | else:
46 | # annneal based on lr_shrink
47 | next_lr = self.optimizer.get_lr()
48 | return next_lr
49 |
50 | def step(self, epoch, val_loss=None):
51 | """Update the learning rate at the end of the given epoch."""
52 | super().step(epoch, val_loss)
53 | self.lr = self.get_next_lr(epoch)
54 | self.optimizer.set_lr(self.warmup_factor * self.lr)
55 | return self.optimizer.get_lr()
56 |
57 | def step_update(self, num_updates):
58 | """Update the learning rate after each update."""
59 | if self.args.warmup_updates > 0 and num_updates <= self.args.warmup_updates:
60 | self.warmup_factor = num_updates / float(self.args.warmup_updates)
61 | lr = self.warmup_factor * self.lr
62 | elif num_updates >= self.total_num_update:
63 | lr = self.end_learning_rate
64 | else:
65 | warmup = self.args.warmup_updates
66 | lr_range = self.lr - self.end_learning_rate
67 | pct_remaining = 1 - (num_updates - warmup) / (self.total_num_update - warmup)
68 | lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate
69 | self.optimizer.set_lr(lr)
70 | return self.optimizer.get_lr()
71 |
--------------------------------------------------------------------------------
/fairseq/modules/transformer_sentence_encoder_layer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 | from fairseq import utils
11 | from fairseq.modules import (
12 | LayerNorm,
13 | MultiheadAttention,
14 | )
15 |
16 |
17 | class TransformerSentenceEncoderLayer(nn.Module):
18 | """
19 | Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
20 | models.
21 | """
22 |
23 | def __init__(
24 | self,
25 | embedding_dim: float = 768,
26 | ffn_embedding_dim: float = 3072,
27 | num_attention_heads: float = 8,
28 | dropout: float = 0.1,
29 | attention_dropout: float = 0.1,
30 | activation_dropout: float = 0.1,
31 | activation_fn: str = 'relu',
32 | add_bias_kv: bool = False,
33 | add_zero_attn: bool = False,
34 | export: bool = False,
35 | ) -> None:
36 |
37 | super().__init__()
38 | # Initialize parameters
39 | self.embedding_dim = embedding_dim
40 | self.dropout = dropout
41 | self.activation_dropout = activation_dropout
42 |
43 | # Initialize blocks
44 | self.activation_fn = utils.get_activation_fn(activation_fn)
45 | self.self_attn = MultiheadAttention(
46 | self.embedding_dim,
47 | num_attention_heads,
48 | dropout=attention_dropout,
49 | add_bias_kv=add_bias_kv,
50 | add_zero_attn=add_zero_attn,
51 | self_attention=True
52 | )
53 |
54 | # layer norm associated with the self attention layer
55 | self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
56 | self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
57 | self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
58 |
59 | # layer norm associated with the position wise feed-forward NN
60 | self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
61 |
62 | def forward(
63 | self,
64 | x: torch.Tensor,
65 | self_attn_mask: torch.Tensor = None,
66 | self_attn_padding_mask: torch.Tensor = None,
67 | ):
68 | """
69 | LayerNorm is applied either before or after the self-attention/ffn
70 | modules similar to the original Transformer imlementation.
71 | """
72 | residual = x
73 | x, attn = self.self_attn(
74 | query=x,
75 | key=x,
76 | value=x,
77 | key_padding_mask=self_attn_padding_mask,
78 | need_weights=False,
79 | attn_mask=self_attn_mask,
80 | )
81 | x = F.dropout(x, p=self.dropout, training=self.training)
82 | x = residual + x
83 | x = self.self_attn_layer_norm(x)
84 |
85 | residual = x
86 | x = self.activation_fn(self.fc1(x))
87 | x = F.dropout(x, p=self.activation_dropout, training=self.training)
88 | x = self.fc2(x)
89 | x = F.dropout(x, p=self.dropout, training=self.training)
90 | x = residual + x
91 | x = self.final_layer_norm(x)
92 | return x, attn
93 |
--------------------------------------------------------------------------------
/score.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | """
7 | BLEU scoring of generated translations against reference translations.
8 | """
9 |
10 | import argparse
11 | import os
12 | import sys
13 |
14 | from fairseq import bleu
15 | from fairseq.data import dictionary
16 |
17 |
18 | def get_parser():
19 | parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.')
20 | # fmt: off
21 | parser.add_argument('-s', '--sys', default='-', help='system output')
22 | parser.add_argument('-r', '--ref', required=True, help='references')
23 | parser.add_argument('-o', '--order', default=4, metavar='N',
24 | type=int, help='consider ngrams up to this order')
25 | parser.add_argument('--ignore-case', action='store_true',
26 | help='case-insensitive scoring')
27 | parser.add_argument('--sacrebleu', action='store_true',
28 | help='score with sacrebleu')
29 | parser.add_argument('--sentence-bleu', action='store_true',
30 | help='report sentence-level BLEUs (i.e., with +1 smoothing)')
31 | # fmt: on
32 | return parser
33 |
34 |
35 | def main():
36 | parser = get_parser()
37 | args = parser.parse_args()
38 | print(args)
39 |
40 | assert args.sys == '-' or os.path.exists(args.sys), \
41 | "System output file {} does not exist".format(args.sys)
42 | assert os.path.exists(args.ref), \
43 | "Reference file {} does not exist".format(args.ref)
44 |
45 | dict = dictionary.Dictionary()
46 |
47 | def readlines(fd):
48 | for line in fd.readlines():
49 | if args.ignore_case:
50 | yield line.lower()
51 | else:
52 | yield line
53 |
54 | if args.sacrebleu:
55 | import sacrebleu
56 |
57 | def score(fdsys):
58 | with open(args.ref) as fdref:
59 | print(sacrebleu.corpus_bleu(fdsys, [fdref]))
60 | elif args.sentence_bleu:
61 | def score(fdsys):
62 | with open(args.ref) as fdref:
63 | scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
64 | for i, (sys_tok, ref_tok) in enumerate(zip(readlines(fdsys), readlines(fdref))):
65 | scorer.reset(one_init=True)
66 | sys_tok = dict.encode_line(sys_tok)
67 | ref_tok = dict.encode_line(ref_tok)
68 | scorer.add(ref_tok, sys_tok)
69 | print(i, scorer.result_string(args.order))
70 | else:
71 | def score(fdsys):
72 | with open(args.ref) as fdref:
73 | scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
74 | for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
75 | sys_tok = dict.encode_line(sys_tok)
76 | ref_tok = dict.encode_line(ref_tok)
77 | scorer.add(ref_tok, sys_tok)
78 | print(scorer.result_string(args.order))
79 |
80 | if args.sys == '-':
81 | score(sys.stdin)
82 | else:
83 | with open(args.sys, 'r') as f:
84 | score(f)
85 |
86 |
87 | if __name__ == '__main__':
88 | main()
89 |
--------------------------------------------------------------------------------
/fairseq/clib/libbleu/libbleu.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2017-present, Facebook, Inc.
3 | * All rights reserved.
4 | *
5 | * This source code is licensed under the license found in the
6 | * LICENSE file in the root directory of this source tree.
7 | */
8 |
9 | #include