├── .DS_Store
├── .gitignore
├── README.md
├── assets
├── .DS_Store
└── intro.jpg
├── examples
├── .DS_Store
├── .gitignore
├── __init__.py
├── adaptive_span
│ ├── README.md
│ ├── __init__.py
│ ├── adagrad_with_grad_clip.py
│ ├── adaptive_span_attention.py
│ ├── adaptive_span_loss.py
│ ├── adaptive_span_model.py
│ ├── adaptive_span_model_wrapper.py
│ └── truncated_bptt_lm_task.py
├── backtranslation
│ ├── README.md
│ ├── deduplicate_lines.py
│ ├── extract_bt_data.py
│ ├── prepare-de-monolingual.sh
│ ├── prepare-wmt18en2de.sh
│ ├── sacrebleu.sh
│ └── tokenized_bleu.sh
├── bart
│ ├── README.glue.md
│ ├── README.md
│ └── README.summarization.md
├── byte_level_bpe
│ ├── README.md
│ ├── get_bitext.py
│ ├── get_data.sh
│ └── gru_transformer.py
├── camembert
│ └── README.md
├── constrained_decoding
│ ├── README.md
│ ├── normalize.py
│ └── tok.py
├── conv_seq2seq
│ └── README.md
├── criss
│ ├── .DS_Store
│ ├── README.md
│ ├── download_and_preprocess_flores_test.sh
│ ├── download_and_preprocess_tatoeba.sh
│ ├── mining
│ │ ├── mine.py
│ │ └── mine_example.sh
│ ├── save_encoder.py
│ ├── sentence_retrieval
│ │ ├── encoder_analysis.py
│ │ └── sentence_retrieval_tatoeba.sh
│ └── unsupervised_mt
│ │ └── eval.sh
├── cross_lingual_language_model
│ └── README.md
├── fast_noisy_channel
│ ├── README.md
│ ├── __init__.py
│ ├── noisy_channel_beam_search.py
│ ├── noisy_channel_sequence_generator.py
│ └── noisy_channel_translation.py
├── gottbert
│ └── README.md
├── joint_alignment_translation
│ ├── README.md
│ └── prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh
├── language_model
│ ├── README.adaptive_inputs.md
│ ├── README.conv.md
│ ├── README.md
│ └── prepare-wikitext-103.sh
├── latent_depth
│ ├── .DS_Store
│ ├── README.md
│ └── latent_depth_src
│ │ ├── .DS_Store
│ │ ├── __init__.py
│ │ ├── loss
│ │ ├── __init__.py
│ │ └── latent_depth.py
│ │ ├── models
│ │ ├── __init__.py
│ │ ├── latent_multilingual_transformer.py
│ │ └── latent_transformer.py
│ │ ├── modules
│ │ ├── __init__.py
│ │ └── latent_layers.py
│ │ └── multilingual_translation_latent_depth.py
├── layerdrop
│ └── README.md
├── linformer
│ ├── .DS_Store
│ ├── README.md
│ └── linformer_src
│ │ ├── .DS_Store
│ │ ├── __init__.py
│ │ ├── models
│ │ ├── __init__.py
│ │ └── linformer_roberta.py
│ │ └── modules
│ │ ├── __init__.py
│ │ ├── linformer_sentence_encoder.py
│ │ ├── linformer_sentence_encoder_layer.py
│ │ └── multihead_linear_attention.py
├── m2m_100
│ ├── .DS_Store
│ ├── README.md
│ ├── install_dependecies.sh
│ ├── process_data
│ │ ├── clean_histogram.py
│ │ ├── dedup_data.py
│ │ └── remove_too_much_punc.py
│ ├── tok.sh
│ └── tokenizers
│ │ ├── README.md
│ │ ├── seg_ja.sh
│ │ ├── seg_ko.sh
│ │ ├── thirdparty
│ │ └── .gitignore
│ │ ├── tokenize_indic.py
│ │ ├── tokenize_thai.py
│ │ ├── tokenize_zh.py
│ │ └── tokenizer_ar.sh
├── mbart
│ └── README.md
├── megatron_11b
│ ├── README.md
│ └── detok.py
├── multilingual
│ ├── ML50_langs.txt
│ ├── README.md
│ ├── data_scripts
│ │ ├── README.md
│ │ ├── binarize.py
│ │ ├── check_iswlt_test_data.py
│ │ ├── check_self_overlaps.py
│ │ ├── check_valid_test_overlaps.py
│ │ ├── dedup_all.py
│ │ ├── download_ML50_v1.sh
│ │ ├── download_af_xh.sh
│ │ ├── download_flores_data.sh
│ │ ├── download_iitb.sh
│ │ ├── download_iwslt_and_extract.sh
│ │ ├── download_lotus.sh
│ │ ├── download_ted_and_extract.py
│ │ ├── download_wat19_my.sh
│ │ ├── download_wmt19_and_before.py
│ │ ├── download_wmt20.sh
│ │ ├── preprocess_ML50_v1.sh
│ │ ├── remove_valid_test_in_train.py
│ │ ├── requirement.txt
│ │ └── utils
│ │ │ ├── dedup.py
│ │ │ ├── fasttext_multi_filter.py
│ │ │ └── strip_sgm.sh
│ ├── finetune_multilingual_model.sh
│ ├── multilingual_fairseq_gen.sh
│ └── train_multilingual_model.sh
├── noisychannel
│ ├── README.md
│ ├── __init__.py
│ ├── rerank.py
│ ├── rerank_generate.py
│ ├── rerank_options.py
│ ├── rerank_score_bw.py
│ ├── rerank_score_lm.py
│ ├── rerank_tune.py
│ └── rerank_utils.py
├── nonautoregressive_translation
│ ├── README.md
│ └── scripts.md
├── paraphraser
│ ├── README.md
│ └── paraphrase.py
├── pay_less_attention_paper
│ └── README.md
├── pointer_generator
│ ├── README.md
│ ├── README.xsum.md
│ ├── pointer_generator_src
│ │ ├── __init__.py
│ │ └── transformer_pg.py
│ ├── postprocess.py
│ └── preprocess.py
├── quant_noise
│ ├── README.md
│ └── transformer_quantization_config.yaml
├── roberta
│ ├── .DS_Store
│ ├── README.custom_classification.md
│ ├── README.glue.md
│ ├── README.md
│ ├── README.pretraining.md
│ ├── README.race.md
│ ├── commonsense_qa
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── commonsense_qa_task.py
│ │ └── download_cqa_data.sh
│ ├── multiprocessing_bpe_encoder.py
│ ├── preprocess_GLUE_tasks.sh
│ ├── preprocess_RACE.py
│ ├── preprocess_RACE.sh
│ └── wsc
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── wsc_criterion.py
│ │ ├── wsc_task.py
│ │ └── wsc_utils.py
├── rxf
│ ├── README.md
│ ├── __init__.py
│ └── rxf_src
│ │ ├── __init__.py
│ │ ├── label_smoothed_cross_entropy_r3f.py
│ │ └── sentence_prediction_r3f.py
├── scaling_nmt
│ └── README.md
├── simultaneous_translation
│ ├── .DS_Store
│ ├── README.md
│ ├── __init__.py
│ ├── criterions
│ │ ├── __init__.py
│ │ └── label_smoothed_cross_entropy_latency_augmented.py
│ ├── docs
│ │ ├── baseline.md
│ │ └── evaluation.md
│ ├── eval
│ │ ├── .DS_Store
│ │ ├── __init__.py
│ │ ├── agents
│ │ │ ├── __init__.py
│ │ │ ├── agent.py
│ │ │ ├── simul_trans_agent.py
│ │ │ ├── simul_trans_text_agent.py
│ │ │ └── word_splitter.py
│ │ ├── client.py
│ │ ├── eval_latency.py
│ │ ├── evaluate.py
│ │ ├── scorers
│ │ │ ├── __init__.py
│ │ │ ├── scorer.py
│ │ │ └── text_scorer.py
│ │ └── server.py
│ ├── models
│ │ ├── __init__.py
│ │ └── transformer_monotonic_attention.py
│ ├── modules
│ │ ├── __init__.py
│ │ ├── monotonic_multihead_attention.py
│ │ └── monotonic_transformer_layer.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── functions.py
│ │ └── latency.py
├── speech_recognition
│ ├── .DS_Store
│ ├── README.md
│ ├── __init__.py
│ ├── criterions
│ │ ├── ASG_loss.py
│ │ ├── __init__.py
│ │ └── cross_entropy_acc.py
│ ├── data
│ │ ├── __init__.py
│ │ ├── asr_dataset.py
│ │ ├── collaters.py
│ │ ├── data_utils.py
│ │ └── replabels.py
│ ├── datasets
│ │ ├── asr_prep_json.py
│ │ └── prepare-librispeech.sh
│ ├── infer.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── vggtransformer.py
│ │ └── w2l_conv_glu_enc.py
│ ├── tasks
│ │ ├── __init__.py
│ │ └── speech_recognition.py
│ ├── utils
│ │ └── wer_utils.py
│ └── w2l_decoder.py
├── speech_to_text
│ ├── README.md
│ ├── data_utils.py
│ ├── docs
│ │ ├── covost_example.md
│ │ ├── librispeech_example.md
│ │ └── mustc_example.md
│ ├── prep_covost_data.py
│ ├── prep_librispeech_data.py
│ └── prep_mustc_data.py
├── stories
│ └── README.md
├── translation
│ ├── README.md
│ ├── prepare-iwslt14.sh
│ ├── prepare-iwslt17-multilingual.sh
│ ├── prepare-wmt14en2de.sh
│ └── prepare-wmt14en2fr.sh
├── translation_moe
│ ├── README.md
│ ├── score.py
│ └── translation_moe_src
│ │ ├── __init__.py
│ │ ├── logsumexp_moe.py
│ │ ├── mean_pool_gating_network.py
│ │ └── translation_moe.py
├── truncated_bptt
│ ├── README.md
│ ├── __init__.py
│ ├── transformer_xl_model.py
│ └── truncated_bptt_lm_task.py
├── unsupervised_quality_estimation
│ ├── README.md
│ ├── aggregate_scores.py
│ ├── meteor.py
│ └── repeat_lines.py
├── wav2vec
│ ├── .DS_Store
│ ├── README.md
│ ├── config
│ │ ├── .DS_Store
│ │ ├── finetuning
│ │ │ ├── base_100h.yaml
│ │ │ ├── base_10h.yaml
│ │ │ ├── base_10m.yaml
│ │ │ ├── base_1h.yaml
│ │ │ ├── base_960h.yaml
│ │ │ ├── vox_100h.yaml
│ │ │ ├── vox_10h.yaml
│ │ │ ├── vox_10m.yaml
│ │ │ ├── vox_1h.yaml
│ │ │ └── vox_960h.yaml
│ │ └── pretraining
│ │ │ ├── wav2vec2_base_librispeech.yaml
│ │ │ └── wav2vec2_large_librivox.yaml
│ ├── libri_labels.py
│ ├── vq-wav2vec_featurize.py
│ ├── wav2vec_featurize.py
│ └── wav2vec_manifest.py
├── wmt19
│ └── README.md
├── wmt20
│ └── README.md
└── xlmr
│ └── README.md
├── fairseq.egg-info
├── PKG-INFO
├── SOURCES.txt
├── dependency_links.txt
├── entry_points.txt
├── not-zip-safe
├── requires.txt
└── top_level.txt
├── fairseq
├── .DS_Store
├── __init__.py
├── benchmark
│ ├── __init__.py
│ ├── dummy_lm.py
│ ├── dummy_masked_lm.py
│ ├── dummy_model.py
│ └── dummy_mt.py
├── binarizer.py
├── checkpoint_utils.py
├── clib
│ ├── .DS_Store
│ ├── cuda
│ │ ├── ngram_repeat_block_cuda.cpp
│ │ └── ngram_repeat_block_cuda_kernel.cu
│ ├── libbleu
│ │ ├── libbleu.cpp
│ │ └── module.cpp
│ ├── libnat
│ │ └── edit_dist.cpp
│ └── libnat_cuda
│ │ ├── binding.cpp
│ │ ├── edit_dist.cu
│ │ └── edit_dist.h
├── config
│ ├── .DS_Store
│ ├── __init__.py
│ ├── config.yaml
│ └── model
│ │ ├── .DS_Store
│ │ ├── transformer_lm
│ │ ├── transformer_lm_baevski_gbw.yaml
│ │ ├── transformer_lm_baevski_wiki103.yaml
│ │ ├── transformer_lm_big.yaml
│ │ ├── transformer_lm_gbw.yaml
│ │ ├── transformer_lm_gpt.yaml
│ │ ├── transformer_lm_gpt2_big.yaml
│ │ ├── transformer_lm_gpt2_medium.yaml
│ │ ├── transformer_lm_gpt2_small.yaml
│ │ └── transformer_lm_wiki103.yaml
│ │ ├── wav2vec
│ │ └── vq_wav2vec_gumbel.yaml
│ │ └── wav2vec2
│ │ ├── wav2vec2_base.yaml
│ │ └── wav2vec2_large.yaml
├── criterions
│ ├── __init__.py
│ ├── adaptive_loss.py
│ ├── composite_loss.py
│ ├── cross_entropy.py
│ ├── ctc.py
│ ├── fairseq_criterion.py
│ ├── glat_loss.py
│ ├── label_smoothed_cross_entropy.py
│ ├── label_smoothed_cross_entropy_with_alignment.py
│ ├── legacy_masked_lm.py
│ ├── masked_lm.py
│ ├── model_criterion.py
│ ├── nat_deep_supervision_loss.py
│ ├── nat_imitation_loss.py
│ ├── nat_loss.py
│ ├── sentence_prediction.py
│ ├── sentence_ranking.py
│ └── wav2vec_criterion.py
├── data
│ ├── .DS_Store
│ ├── __init__.py
│ ├── add_target_dataset.py
│ ├── append_token_dataset.py
│ ├── audio
│ │ ├── __init__.py
│ │ ├── audio_utils.py
│ │ ├── feature_transforms
│ │ │ ├── __init__.py
│ │ │ ├── global_cmvn.py
│ │ │ ├── specaugment.py
│ │ │ └── utterance_cmvn.py
│ │ ├── raw_audio_dataset.py
│ │ └── speech_to_text_dataset.py
│ ├── backtranslation_dataset.py
│ ├── base_wrapper_dataset.py
│ ├── bucket_pad_length_dataset.py
│ ├── colorize_dataset.py
│ ├── concat_dataset.py
│ ├── concat_sentences_dataset.py
│ ├── data_utils.py
│ ├── data_utils_fast.cpp
│ ├── data_utils_fast.cpython-36m-x86_64-linux-gnu.so
│ ├── data_utils_fast.cpython-37m-x86_64-linux-gnu.so
│ ├── data_utils_fast.cpython-38-x86_64-linux-gnu.so
│ ├── data_utils_fast.pyx
│ ├── denoising_dataset.py
│ ├── dictionary.py
│ ├── encoders
│ │ ├── __init__.py
│ │ ├── byte_bpe.py
│ │ ├── byte_utils.py
│ │ ├── bytes.py
│ │ ├── characters.py
│ │ ├── fastbpe.py
│ │ ├── gpt2_bpe.py
│ │ ├── gpt2_bpe_utils.py
│ │ ├── hf_bert_bpe.py
│ │ ├── hf_byte_bpe.py
│ │ ├── moses_tokenizer.py
│ │ ├── nltk_tokenizer.py
│ │ ├── sentencepiece_bpe.py
│ │ ├── space_tokenizer.py
│ │ ├── subword_nmt_bpe.py
│ │ └── utils.py
│ ├── fairseq_dataset.py
│ ├── fasta_dataset.py
│ ├── id_dataset.py
│ ├── indexed_dataset.py
│ ├── iterators.py
│ ├── language_pair_dataset.py
│ ├── legacy
│ │ ├── __init__.py
│ │ ├── block_pair_dataset.py
│ │ ├── masked_lm_dataset.py
│ │ └── masked_lm_dictionary.py
│ ├── list_dataset.py
│ ├── lm_context_window_dataset.py
│ ├── lru_cache_dataset.py
│ ├── mask_tokens_dataset.py
│ ├── monolingual_dataset.py
│ ├── multi_corpus_dataset.py
│ ├── multi_corpus_sampled_dataset.py
│ ├── multilingual
│ │ ├── __init__.py
│ │ ├── multilingual_data_manager.py
│ │ ├── multilingual_utils.py
│ │ ├── sampled_multi_dataset.py
│ │ ├── sampled_multi_epoch_dataset.py
│ │ └── sampling_method.py
│ ├── nested_dictionary_dataset.py
│ ├── noising.py
│ ├── num_samples_dataset.py
│ ├── numel_dataset.py
│ ├── offset_tokens_dataset.py
│ ├── pad_dataset.py
│ ├── plasma_utils.py
│ ├── prepend_dataset.py
│ ├── prepend_token_dataset.py
│ ├── raw_label_dataset.py
│ ├── replace_dataset.py
│ ├── resampling_dataset.py
│ ├── roll_dataset.py
│ ├── round_robin_zip_datasets.py
│ ├── shorten_dataset.py
│ ├── sort_dataset.py
│ ├── strip_token_dataset.py
│ ├── subsample_dataset.py
│ ├── token_block_dataset.py
│ ├── token_block_utils_fast.cpp
│ ├── token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so
│ ├── token_block_utils_fast.cpython-37m-x86_64-linux-gnu.so
│ ├── token_block_utils_fast.cpython-38-x86_64-linux-gnu.so
│ ├── token_block_utils_fast.pyx
│ ├── transform_eos_dataset.py
│ └── transform_eos_lang_pair_dataset.py
├── dataclass
│ ├── __init__.py
│ ├── configs.py
│ ├── constants.py
│ ├── initialize.py
│ └── utils.py
├── distributed_utils.py
├── file_io.py
├── file_utils.py
├── hub_utils.py
├── incremental_decoding_utils.py
├── iterative_refinement_generator.py
├── legacy_distributed_data_parallel.py
├── libbleu.cpython-36m-x86_64-linux-gnu.so
├── libbleu.cpython-37m-x86_64-linux-gnu.so
├── libbleu.cpython-38-x86_64-linux-gnu.so
├── libnat.cpython-36m-x86_64-linux-gnu.so
├── libnat.cpython-38-x86_64-linux-gnu.so
├── logging
│ ├── __init__.py
│ ├── meters.py
│ ├── metrics.py
│ └── progress_bar.py
├── model_parallel
│ ├── .DS_Store
│ ├── __init__.py
│ ├── criterions
│ │ ├── __init__.py
│ │ └── vocab_parallel_cross_entropy.py
│ ├── megatron_trainer.py
│ ├── models
│ │ ├── .DS_Store
│ │ ├── __init__.py
│ │ ├── pipeline_parallel_transformer
│ │ │ ├── __init__.py
│ │ │ ├── layers.py
│ │ │ └── model.py
│ │ ├── roberta
│ │ │ ├── __init__.py
│ │ │ └── model.py
│ │ ├── transformer.py
│ │ └── transformer_lm.py
│ └── modules
│ │ ├── __init__.py
│ │ ├── multihead_attention.py
│ │ ├── transformer_layer.py
│ │ ├── transformer_sentence_encoder.py
│ │ └── transformer_sentence_encoder_layer.py
├── models
│ ├── .DS_Store
│ ├── __init__.py
│ ├── archived
│ │ ├── __init__.py
│ │ ├── nonautoregressive_transformer_ctc.py
│ │ ├── nonautoregressive_transformer_ctc_deep_supervision_recur_input.py
│ │ ├── nonautoregressive_transformer_ctc_deep_supervision_recur_input_masked_sample.py
│ │ ├── nonautoregressive_transformer_ctc_deep_supervision_recur_input_random_mask.py
│ │ ├── nonautoregressive_transformer_ctc_deep_supervision_recur_input_sample.py
│ │ └── nonautoregressive_transformer_ctc_deep_supervision_sample.py
│ ├── bart
│ │ ├── __init__.py
│ │ ├── hub_interface.py
│ │ └── model.py
│ ├── composite_encoder.py
│ ├── distributed_fairseq_model.py
│ ├── fairseq_decoder.py
│ ├── fairseq_encoder.py
│ ├── fairseq_incremental_decoder.py
│ ├── fairseq_model.py
│ ├── fconv.py
│ ├── fconv_lm.py
│ ├── fconv_self_att.py
│ ├── future_work
│ │ ├── __init__.py
│ │ └── nat_subnet_global.py
│ ├── huggingface
│ │ ├── __init__.py
│ │ └── hf_gpt2.py
│ ├── lightconv.py
│ ├── lightconv_lm.py
│ ├── lstm.py
│ ├── lstm_lm.py
│ ├── masked_lm.py
│ ├── model_utils.py
│ ├── multilingual_transformer.py
│ ├── nat
│ │ ├── cmlm_transformer.py
│ │ ├── fairseq_nat_model.py
│ │ ├── insertion_transformer.py
│ │ ├── iterative_nonautoregressive_transformer.py
│ │ ├── levenshtein_transformer.py
│ │ ├── levenshtein_utils.py
│ │ ├── nat.py
│ │ ├── nat_crf_transformer.py
│ │ ├── nat_glat.py
│ │ └── nonautoregressive_ensembles.py
│ ├── roberta
│ │ ├── __init__.py
│ │ ├── alignment_utils.py
│ │ ├── hub_interface.py
│ │ ├── model.py
│ │ ├── model_camembert.py
│ │ ├── model_gottbert.py
│ │ └── model_xlmr.py
│ ├── speech_to_text
│ │ ├── __init__.py
│ │ ├── berard.py
│ │ └── s2t_transformer.py
│ ├── transformer.py
│ ├── transformer_align.py
│ ├── transformer_from_pretrained_xlm.py
│ ├── transformer_lm.py
│ └── wav2vec
│ │ ├── __init__.py
│ │ ├── wav2vec.py
│ │ ├── wav2vec2.py
│ │ └── wav2vec2_asr.py
├── modules
│ ├── .DS_Store
│ ├── __init__.py
│ ├── adaptive_input.py
│ ├── adaptive_softmax.py
│ ├── beamable_mm.py
│ ├── character_token_embedder.py
│ ├── checkpoint_activations.py
│ ├── conv_tbc.py
│ ├── cross_entropy.py
│ ├── cuda_utils.cu
│ ├── downsampled_multihead_attention.py
│ ├── dynamic_convolution.py
│ ├── dynamic_crf_layer.py
│ ├── dynamicconv_layer
│ │ ├── __init__.py
│ │ ├── cuda_function_gen.py
│ │ ├── dynamicconv_cuda.cpp
│ │ ├── dynamicconv_cuda.cuh
│ │ ├── dynamicconv_cuda_kernel.cu
│ │ ├── dynamicconv_layer.py
│ │ ├── dynamiconv_cpu.cpp
│ │ └── setup.py
│ ├── fairseq_dropout.py
│ ├── fp32_group_norm.py
│ ├── gelu.py
│ ├── grad_multiply.py
│ ├── gumbel_vector_quantizer.py
│ ├── kmeans_vector_quantizer.py
│ ├── layer_drop.py
│ ├── layer_norm.py
│ ├── learned_positional_embedding.py
│ ├── lightconv_layer
│ │ ├── __init__.py
│ │ ├── cuda_function_gen.py
│ │ ├── lightconv_cuda.cpp
│ │ ├── lightconv_cuda.cuh
│ │ ├── lightconv_cuda_kernel.cu
│ │ ├── lightconv_layer.py
│ │ └── setup.py
│ ├── lightweight_convolution.py
│ ├── linearized_convolution.py
│ ├── multihead_attention.py
│ ├── positional_embedding.py
│ ├── quant_noise.py
│ ├── quantization
│ │ ├── .DS_Store
│ │ ├── __init__.py
│ │ ├── pq
│ │ │ ├── __init__.py
│ │ │ ├── em.py
│ │ │ ├── modules
│ │ │ │ ├── __init__.py
│ │ │ │ ├── qconv.py
│ │ │ │ ├── qemb.py
│ │ │ │ └── qlinear.py
│ │ │ ├── pq.py
│ │ │ └── utils.py
│ │ ├── quantization_options.py
│ │ └── scalar
│ │ │ ├── __init__.py
│ │ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── qact.py
│ │ │ ├── qconv.py
│ │ │ ├── qemb.py
│ │ │ └── qlinear.py
│ │ │ ├── ops.py
│ │ │ └── utils.py
│ ├── same_pad.py
│ ├── scalar_bias.py
│ ├── sinusoidal_positional_embedding.py
│ ├── sparse_multihead_attention.py
│ ├── sparse_transformer_sentence_encoder.py
│ ├── sparse_transformer_sentence_encoder_layer.py
│ ├── transformer_layer.py
│ ├── transformer_sentence_encoder.py
│ ├── transformer_sentence_encoder_layer.py
│ ├── transpose_last.py
│ ├── unfold.py
│ └── vggblock.py
├── nan_detector.py
├── ngram_repeat_block.py
├── optim
│ ├── __init__.py
│ ├── adadelta.py
│ ├── adafactor.py
│ ├── adagrad.py
│ ├── adam.py
│ ├── adamax.py
│ ├── bmuf.py
│ ├── composite.py
│ ├── dynamic_loss_scaler.py
│ ├── fairseq_optimizer.py
│ ├── fp16_optimizer.py
│ ├── fused_adam.py
│ ├── fused_lamb.py
│ ├── lr_scheduler
│ │ ├── __init__.py
│ │ ├── cosine_lr_scheduler.py
│ │ ├── fairseq_lr_scheduler.py
│ │ ├── fixed_schedule.py
│ │ ├── inverse_square_root_schedule.py
│ │ ├── manual_lr_scheduler.py
│ │ ├── pass_through.py
│ │ ├── polynomial_decay_schedule.py
│ │ ├── reduce_lr_on_plateau.py
│ │ ├── tri_stage_lr_scheduler.py
│ │ └── triangular_lr_scheduler.py
│ ├── nag.py
│ ├── sgd.py
│ └── shard.py
├── options.py
├── pdb.py
├── quantization_utils.py
├── registry.py
├── scoring
│ ├── __init__.py
│ ├── bleu.py
│ ├── chrf.py
│ ├── tokenizer.py
│ └── wer.py
├── search.py
├── sequence_generator.py
├── sequence_scorer.py
├── tasks
│ ├── __init__.py
│ ├── audio_pretraining.py
│ ├── cross_lingual_lm.py
│ ├── denoising.py
│ ├── fairseq_task.py
│ ├── language_modeling.py
│ ├── legacy_masked_lm.py
│ ├── masked_lm.py
│ ├── multilingual_denoising.py
│ ├── multilingual_masked_lm.py
│ ├── multilingual_translation.py
│ ├── semisupervised_translation.py
│ ├── sentence_prediction.py
│ ├── sentence_ranking.py
│ ├── speech_to_text.py
│ ├── translation.py
│ ├── translation_contrastive.py
│ ├── translation_deep_supervision.py
│ ├── translation_from_pretrained_bart.py
│ ├── translation_from_pretrained_xlm.py
│ ├── translation_glat.py
│ ├── translation_imitation.py
│ ├── translation_imitation_glat.py
│ ├── translation_imitation_with_ranked_generator.py
│ ├── translation_lev.py
│ ├── translation_multi_simple_epoch.py
│ └── translation_selfrank.py
├── token_generation_constraints.py
├── tokenizer.py
├── torch_imputer
│ ├── __init__.py
│ ├── best_alignment.cu
│ ├── imputer.cpp
│ ├── imputer.cu
│ └── imputer.py
├── trainer.py
├── utils.py
├── version.py
└── version.txt
├── fairseq_cli
├── __init__.py
├── eval_lm.py
├── generate.py
├── hydra_train.py
├── interactive.py
├── preprocess.py
├── score.py
├── train.py
└── validate.py
├── fs_plugins
├── .DS_Store
├── __init__.py
├── models
│ ├── .DS_Store
│ ├── __init__.py
│ └── at_tree_attn_nonshare.py
└── tasks
│ ├── .DS_Store
│ ├── __init__.py
│ ├── syntactic_generator.py
│ ├── translation_syntactic_gen.py
│ └── tree_utils.py
├── generate.py
├── hubconf.py
├── prepare_training_data.py
├── setup.py
├── setup_env.sh
├── shell
├── apply-bpe.sh
├── infer.sh
├── prepare_data.sh
└── train.sh
├── toy_data
├── code.6000
├── inference
│ ├── dict.src.txt
│ ├── dict.tgt.txt
│ ├── test.src-tgt.src
│ └── test.src-tgt.tgt
├── training
│ ├── train.src
│ └── train.tgt.merge.parse
└── training_triplets
│ ├── dict.src.txt
│ ├── dict.tgt.txt
│ ├── test.src-tgt.src
│ ├── test.src-tgt.tgt
│ ├── train.src
│ ├── train.src-tgt.src
│ ├── train.src-tgt.tgt
│ ├── train.tgt
│ ├── valid.src-tgt.src
│ └── valid.src-tgt.tgt
└── train.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 |
--------------------------------------------------------------------------------
/assets/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/assets/.DS_Store
--------------------------------------------------------------------------------
/assets/intro.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/assets/intro.jpg
--------------------------------------------------------------------------------
/examples/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/.DS_Store
--------------------------------------------------------------------------------
/examples/.gitignore:
--------------------------------------------------------------------------------
1 | !*/*.sh
2 | !*/*.md
3 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | try:
7 | from fairseq.version import __version__ # noqa
8 | except ImportError:
9 | pass
10 |
--------------------------------------------------------------------------------
/examples/adaptive_span/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | # automatically import any Python files in the current directory
10 | cur_dir = os.path.dirname(__file__)
11 | for file in os.listdir(cur_dir):
12 | path = os.path.join(cur_dir, file)
13 | if (
14 | not file.startswith("_")
15 | and not file.startswith(".")
16 | and (file.endswith(".py") or os.path.isdir(path))
17 | ):
18 | mod_name = file[: file.find(".py")] if file.endswith(".py") else file
19 | module = importlib.import_module(__name__ + "." + mod_name)
20 |
--------------------------------------------------------------------------------
/examples/backtranslation/deduplicate_lines.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | import argparse
8 | import fileinput
9 | import hashlib
10 | import sys
11 | from multiprocessing import Pool
12 |
13 |
14 | def get_hashes_and_lines(raw_line):
15 | hash = hashlib.md5(raw_line).hexdigest()
16 | return hash, raw_line
17 |
18 |
19 | def main():
20 | parser = argparse.ArgumentParser()
21 | parser.add_argument("--workers", type=int, default=10)
22 | parser.add_argument("files", nargs="*", help="input files")
23 | args = parser.parse_args()
24 |
25 | seen = set()
26 | with fileinput.input(args.files, mode="rb") as h:
27 | pool = Pool(args.workers)
28 | results = pool.imap_unordered(get_hashes_and_lines, h, 1000)
29 | for i, (hash, raw_line) in enumerate(results):
30 | if hash not in seen:
31 | seen.add(hash)
32 | sys.stdout.buffer.write(raw_line)
33 | if i % 1000000 == 0:
34 | print(i, file=sys.stderr, end="", flush=True)
35 | elif i % 100000 == 0:
36 | print(".", file=sys.stderr, end="", flush=True)
37 | print(file=sys.stderr, flush=True)
38 |
39 |
40 | if __name__ == "__main__":
41 | main()
42 |
--------------------------------------------------------------------------------
/examples/backtranslation/sacrebleu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -ne 5 ]; then
4 | echo "usage: $0 [dataset=wmt14/full] [langpair=en-de] [databin] [bpecode] [model]"
5 | exit
6 | fi
7 |
8 |
9 | DATASET=$1
10 | LANGPAIR=$2
11 | DATABIN=$3
12 | BPECODE=$4
13 | MODEL=$5
14 |
15 | SRCLANG=$(echo $LANGPAIR | cut -d '-' -f 1)
16 | TGTLANG=$(echo $LANGPAIR | cut -d '-' -f 2)
17 |
18 |
19 | BPEROOT=examples/backtranslation/subword-nmt/subword_nmt
20 | if [ ! -e $BPEROOT ]; then
21 | BPEROOT=subword-nmt/subword_nmt
22 | if [ ! -e $BPEROOT ]; then
23 | echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
24 | git clone https://github.com/rsennrich/subword-nmt.git
25 | fi
26 | fi
27 |
28 |
29 | sacrebleu -t $DATASET -l $LANGPAIR --echo src \
30 | | sacremoses tokenize -a -l $SRCLANG -q \
31 | | python $BPEROOT/apply_bpe.py -c $BPECODE \
32 | | fairseq-interactive $DATABIN --path $MODEL \
33 | -s $SRCLANG -t $TGTLANG \
34 | --beam 5 --remove-bpe --buffer-size 1024 --max-tokens 8000 \
35 | | grep ^H- | cut -f 3- \
36 | | sacremoses detokenize -l $TGTLANG -q \
37 | | sacrebleu -t $DATASET -l $LANGPAIR
38 |
--------------------------------------------------------------------------------
/examples/backtranslation/tokenized_bleu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -ne 5 ]; then
4 | echo "usage: $0 [dataset=wmt14/full] [langpair=en-de] [databin] [bpecode] [model]"
5 | exit
6 | fi
7 |
8 |
9 | DATASET=$1
10 | LANGPAIR=$2
11 | DATABIN=$3
12 | BPECODE=$4
13 | MODEL=$5
14 |
15 | SRCLANG=$(echo $LANGPAIR | cut -d '-' -f 1)
16 | TGTLANG=$(echo $LANGPAIR | cut -d '-' -f 2)
17 |
18 |
19 | BPEROOT=examples/backtranslation/subword-nmt/subword_nmt
20 | if [ ! -e $BPEROOT ]; then
21 | BPEROOT=subword-nmt/subword_nmt
22 | if [ ! -e $BPEROOT ]; then
23 | echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
24 | git clone https://github.com/rsennrich/subword-nmt.git
25 | fi
26 | fi
27 |
28 |
29 | TMP_REF=$(mktemp)
30 |
31 | sacrebleu -t $DATASET -l $LANGPAIR --echo ref -q \
32 | | sacremoses normalize -l $TGTLANG -q \
33 | | sacremoses tokenize -a -l $TGTLANG -q \
34 | > $TMP_REF
35 |
36 | sacrebleu -t $DATASET -l $LANGPAIR --echo src -q \
37 | | sacremoses normalize -l $SRCLANG -q \
38 | | sacremoses tokenize -a -l $SRCLANG -q \
39 | | python $BPEROOT/apply_bpe.py -c $BPECODE \
40 | | fairseq-interactive $DATABIN --path $MODEL \
41 | -s $SRCLANG -t $TGTLANG \
42 | --beam 5 --remove-bpe --buffer-size 1024 --max-tokens 8000 \
43 | | grep ^H- | cut -f 3- \
44 | | fairseq-score --ref $TMP_REF
45 |
46 | rm -f $TMP_REF
47 |
--------------------------------------------------------------------------------
/examples/constrained_decoding/normalize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) Facebook, Inc. and its affiliates.
4 | #
5 | # This source code is licensed under the MIT license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | import sys
9 |
10 | from sacremoses.normalize import MosesPunctNormalizer
11 |
12 |
13 | def main(args):
14 | normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn)
15 | for line in sys.stdin:
16 | print(normalizer.normalize(line.rstrip()), flush=True)
17 |
18 |
19 | if __name__ == "__main__":
20 | import argparse
21 |
22 | parser = argparse.ArgumentParser()
23 | parser.add_argument("--lang", "-l", default="en")
24 | parser.add_argument("--penn", "-p", action="store_true")
25 | args = parser.parse_args()
26 |
27 | main(args)
28 |
--------------------------------------------------------------------------------
/examples/constrained_decoding/tok.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) Facebook, Inc. and its affiliates.
4 | #
5 | # This source code is licensed under the MIT license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | import sys
9 |
10 | import sacremoses
11 |
12 |
13 | def main(args):
14 | """Tokenizes, preserving tabs"""
15 | mt = sacremoses.MosesTokenizer(lang=args.lang)
16 |
17 | def tok(s):
18 | return mt.tokenize(s, return_str=True)
19 |
20 | for line in sys.stdin:
21 | parts = list(map(tok, line.split("\t")))
22 | print(*parts, sep="\t", flush=True)
23 |
24 |
25 | if __name__ == "__main__":
26 | import argparse
27 |
28 | parser = argparse.ArgumentParser()
29 | parser.add_argument("--lang", "-l", default="en")
30 | parser.add_argument("--penn", "-p", action="store_true")
31 | parser.add_argument("--fields", "-f", help="fields to tokenize")
32 | args = parser.parse_args()
33 |
34 | main(args)
35 |
--------------------------------------------------------------------------------
/examples/criss/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/criss/.DS_Store
--------------------------------------------------------------------------------
/examples/criss/download_and_preprocess_tatoeba.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | SPM_ENCODE=flores/scripts/spm_encode.py
9 | DATA=data_tmp
10 | SPM_MODEL=criss_checkpoints/sentence.bpe.model
11 | DICT=criss_checkpoints/dict.txt
12 |
13 | if [[ -f flores ]]; then
14 | echo "flores already cloned"
15 | else
16 | git clone https://github.com/facebookresearch/flores
17 | fi
18 | if [[ -f LASER ]]; then
19 | echo "LASER already cloned"
20 | else
21 | git clone https://github.com/facebookresearch/LASER
22 | fi
23 | mkdir -p data_tmp
24 | declare -A lang_tatoeba_map=( ["ar_AR"]="ara" ["de_DE"]="deu" ["es_XX"]="spa" ["et_EE"]="est" ["fi_FI"]="fin" ["fr_XX"]="fra" ["hi_IN"]="hin" ["it_IT"]="ita" ["ja_XX"]="jpn" ["ko_KR"]="kor" ["kk_KZ"]="kaz" ["nl_XX"]="nld" ["ru_RU"]="rus" ["tr_TR"]="tur" ["vi_VN"]="vie" ["zh_CN"]="cmn")
25 | for lang in ar_AR de_DE es_XX et_EE fi_FI fr_XX hi_IN it_IT ja_XX kk_KZ ko_KR nl_XX ru_RU tr_TR vi_VN zh_CN; do
26 | lang_tatoeba=${lang_tatoeba_map[$lang]}
27 | echo $lang_tatoeba
28 | datadir=$DATA/${lang}-en_XX-tatoeba
29 | rm -rf $datadir
30 | mkdir -p $datadir
31 | TEST_PREFIX=LASER/data/tatoeba/v1/tatoeba
32 | python $SPM_ENCODE \
33 | --model ${SPM_MODEL} \
34 | --output_format=piece \
35 | --inputs ${TEST_PREFIX}.${lang_tatoeba}-eng.${lang_tatoeba} ${TEST_PREFIX}.${lang_tatoeba}-eng.eng \
36 | --outputs $datadir/test.bpe.${lang}-en_XX.${lang} $datadir/test.bpe.${lang}-en_XX.en_XX
37 |
38 | # binarize data
39 | fairseq-preprocess \
40 | --source-lang ${lang} --target-lang en_XX \
41 | --testpref $datadir/test.bpe.${lang}-en_XX \
42 | --destdir $datadir \
43 | --srcdict ${DICT} \
44 | --joined-dictionary \
45 | --workers 4
46 | done
47 |
--------------------------------------------------------------------------------
/examples/fast_noisy_channel/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import noisy_channel_translation # noqa
7 | from . import noisy_channel_sequence_generator # noqa
8 | from . import noisy_channel_beam_search # noqa
9 |
--------------------------------------------------------------------------------
/examples/language_model/README.conv.md:
--------------------------------------------------------------------------------
1 | # Language Modeling with Gated Convolutional Networks (Dauphin et al., 2017)
2 |
3 | ## Example usage
4 |
5 | First download and preprocess the data following the main [language modeling README](README.md).
6 |
7 | Then to train a convolutional LM using the `fconv_lm_dauphin_wikitext103`
8 | architecture:
9 | ```bash
10 | fairseq-train --task language_modeling \
11 | data-bin/wikitext-103 \
12 | --save-dir checkpoints/fconv_wikitext-103 \
13 | --arch fconv_lm_dauphin_wikitext103 \
14 | --adaptive-softmax-cutoff 10000,20000,200000 \
15 | --dropout 0.2 \
16 | --criterion adaptive_loss \
17 | --optimizer nag --clip-norm 0.1 --weight-decay 5e-06 \
18 | --lr 1.0 --lr-scheduler reduce_lr_on_plateau --lr-shrink 0.5 \
19 | --max-tokens 1024 --tokens-per-sample 1024 \
20 | --ddp-backend no_c10d \
21 | --max-epoch 35
22 | ```
23 |
24 | And evaluate with:
25 | ```bash
26 | fairseq-eval-lm data-bin/wikitext-103 --path checkpoints/fconv_wiki103/checkpoint_best.pt
27 | ```
28 |
29 | ## Citation
30 |
31 | ```bibtex
32 | @inproceedings{dauphin2017language,
33 | title={Language Modeling with Gated Convolutional Networks},
34 | author={Dauphin, Yann N and Fan, Angela and Auli, Michael and Grangier, David},
35 | booktitle={Proceedings of the 34th International Conference on Machine Learning-Volume 70},
36 | pages={933--941},
37 | year={2017},
38 | organization={JMLR}
39 | }
40 | ```
41 |
--------------------------------------------------------------------------------
/examples/language_model/prepare-wikitext-103.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
3 |
4 | URLS=(
5 | "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip"
6 | )
7 | FILES=(
8 | "wikitext-103-v1.zip"
9 | )
10 |
11 | for ((i=0;i<${#URLS[@]};++i)); do
12 | file=${FILES[i]}
13 | if [ -f $file ]; then
14 | echo "$file already exists, skipping download"
15 | else
16 | url=${URLS[i]}
17 | wget "$url"
18 | if [ -f $file ]; then
19 | echo "$url successfully downloaded."
20 | else
21 | echo "$url not successfully downloaded."
22 | exit -1
23 | fi
24 | if [ ${file: -4} == ".tgz" ]; then
25 | tar zxvf $file
26 | elif [ ${file: -4} == ".tar" ]; then
27 | tar xvf $file
28 | elif [ ${file: -4} == ".zip" ]; then
29 | unzip $file
30 | fi
31 | fi
32 | done
33 | cd ..
34 |
--------------------------------------------------------------------------------
/examples/latent_depth/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/latent_depth/.DS_Store
--------------------------------------------------------------------------------
/examples/latent_depth/latent_depth_src/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/latent_depth/latent_depth_src/.DS_Store
--------------------------------------------------------------------------------
/examples/latent_depth/latent_depth_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import multilingual_translation_latent_depth # noqa
7 | from .loss import latent_depth # noqa
8 | from .models import latent_multilingual_transformer # noqa
9 | from .modules import latent_layers # noqa
10 |
--------------------------------------------------------------------------------
/examples/latent_depth/latent_depth_src/loss/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/latent_depth/latent_depth_src/loss/__init__.py
--------------------------------------------------------------------------------
/examples/latent_depth/latent_depth_src/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/latent_depth/latent_depth_src/models/__init__.py
--------------------------------------------------------------------------------
/examples/latent_depth/latent_depth_src/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/latent_depth/latent_depth_src/modules/__init__.py
--------------------------------------------------------------------------------
/examples/linformer/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/linformer/.DS_Store
--------------------------------------------------------------------------------
/examples/linformer/README.md:
--------------------------------------------------------------------------------
1 | # Linformer: Self-Attention with Linear Complexity (Wang et al., 2020)
2 |
3 | This example contains code to train Linformer models as described in our paper
4 | [Linformer: Self-Attention with Linear Complexity](https://arxiv.org/abs/2006.04768).
5 |
6 | ## Training a new Linformer RoBERTa model
7 |
8 | You can mostly follow the [RoBERTa pretraining README](/examples/roberta/README.pretraining.md),
9 | updating your training command with `--user-dir examples/linformer/linformer_src --arch linformer_roberta_base`.
10 |
11 | ## Citation
12 |
13 | If you use our work, please cite:
14 |
15 | ```bibtex
16 | @article{wang2020linformer,
17 | title={Linformer: Self-Attention with Linear Complexity},
18 | author={Wang, Sinong and Li, Belinda and Khabsa, Madian and Fang, Han and Ma, Hao},
19 | journal={arXiv preprint arXiv:2006.04768},
20 | year={2020}
21 | }
22 | ```
23 |
--------------------------------------------------------------------------------
/examples/linformer/linformer_src/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/linformer/linformer_src/.DS_Store
--------------------------------------------------------------------------------
/examples/linformer/linformer_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .models import linformer_roberta # noqa
7 |
--------------------------------------------------------------------------------
/examples/linformer/linformer_src/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/linformer/linformer_src/models/__init__.py
--------------------------------------------------------------------------------
/examples/linformer/linformer_src/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/linformer/linformer_src/modules/__init__.py
--------------------------------------------------------------------------------
/examples/m2m_100/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/m2m_100/.DS_Store
--------------------------------------------------------------------------------
/examples/m2m_100/process_data/remove_too_much_punc.py:
--------------------------------------------------------------------------------
1 | import gzip
2 | import argparse
3 | from string import punctuation
4 |
5 | def len_no_punc(s, punc):
6 | return len([ch for ch in s if ch in punc])
7 |
8 | def filter_overpunc(len_npunc, len_sen):
9 | return len_npunc < 0.5*len_sen
10 |
11 | def main(args):
12 | punc = punctuation + "—|–"
13 | print('Processing file {}'.format(args.input))
14 | with gzip.open(args.input, 'rt', encoding=args.encoding) as tsv:
15 | with open(args.bitext + '.' + args.src_lang, 'wt', encoding=args.encoding) as fsrc:
16 | with open(args.bitext + '.' + args.tgt_lang, 'wt', encoding=args.encoding) as ftgt:
17 | line = tsv.readline()
18 | fields = line.split('\t')
19 |
20 | src, tgt = fields[1], fields[2]
21 |
22 | nchar_npunc_src = len_no_punc(src, punc)
23 | nchar_npunc_tgt = len_no_punc(tgt, punc)
24 |
25 | if filter_overpunc(nchar_npunc_src, len(src)) and filter_overpunc(nchar_npunc_tgt, len(tgt)):
26 | fsrc.write(src.strip() + '\n')
27 | ftgt.write(tgt.strip() + '\n')
28 |
29 | if __name__ == '__main__':
30 | parser = argparse.ArgumentParser()
31 | parser.add_argument("--input", required=True, type=str)
32 | parser.add_argument('--encoding', default='utf-8', help='character encoding for input/output')
33 | parser.add_argument('--bitext', type=str, required=True, help='language direction')
34 | parser.add_argument('--src-lang', type=str, required=True, help='Source language')
35 | parser.add_argument('--tgt-lang', type=str, required=True, help='Target language')
36 | main(parser.parse_args())
37 |
--------------------------------------------------------------------------------
/examples/m2m_100/tokenizers/README.md:
--------------------------------------------------------------------------------
1 | # M2M-100 Tokenization
2 |
3 | We apply different tokenization strategies for different languages following the existing literature. Here we provide tok.sh a tokenizer that can be used to reproduce our results.
4 |
5 | To reproduce the results, follow these steps:
6 |
7 | ```
8 | tgt_lang=...
9 | reference_translation=...
10 | cat generation_output | grep -P "^H" | sort -V | cut -f 3- | sh tok.sh $tgt_lang > hyp
11 | cat $reference_translation |sh tok.sh $tgt_lang > ref
12 | sacrebleu -tok 'none' ref < hyp
13 | ```
14 |
15 | ## Installation
16 |
17 | Tools needed for all the languages except Arabic can be installed by running install_dependencies.sh
18 | If you want to evaluate Arabic models, please follow the instructions provided here: http://alt.qcri.org/tools/arabic-normalizer/ to install
19 |
--------------------------------------------------------------------------------
/examples/m2m_100/tokenizers/seg_ja.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | SCRIPT=`realpath $0`
7 | KYTEA=`dirname $SCRIPT`/thirdparty/kytea
8 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$KYTEA/lib:/usr/local/lib
9 | export PATH=$PATH:"$KYTEA/bin"
10 |
11 | cat - | tr -d "[:blank:]" | kytea -notags
12 |
--------------------------------------------------------------------------------
/examples/m2m_100/tokenizers/seg_ko.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | SCRIPT=`realpath $0`
7 | MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2
8 |
9 | export PATH=$PATH:"$MECAB/bin":"$MECAB/lib"
10 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib"
11 |
12 | cat - | mecab -O wakati
13 |
--------------------------------------------------------------------------------
/examples/m2m_100/tokenizers/thirdparty/.gitignore:
--------------------------------------------------------------------------------
1 | seg_my.py
2 | indic_nlp_library/
3 | indic_nlp_resources/
4 | kytea/
5 | mecab-0.996-ko-0.9.2.tar.gz
6 | mecab-0.996-ko-0.9.2/
7 | mosesdecoder/
8 | wat2020.my-en.zip
9 | wat2020.my-en/
10 | wmt16-scripts/
11 | mecab-ko-dic-2.1.1-20180720/
12 | mecab-ko-dic-2.1.1-20180720.tar.gz
--------------------------------------------------------------------------------
/examples/m2m_100/tokenizers/tokenize_indic.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | # Use: echo {text} | python tokenize_indic.py {language}
8 |
9 | import sys
10 |
11 | from indicnlp.normalize.indic_normalize import IndicNormalizerFactory
12 | from indicnlp.tokenize.indic_tokenize import trivial_tokenize
13 |
14 |
15 | factory = IndicNormalizerFactory()
16 | normalizer = factory.get_normalizer(
17 | sys.argv[1], remove_nuktas=False, nasals_mode="do_nothing"
18 | )
19 |
20 | for line in sys.stdin:
21 | normalized_line = normalizer.normalize(line.strip())
22 | tokenized_line = " ".join(trivial_tokenize(normalized_line, sys.argv[1]))
23 | print(tokenized_line)
24 |
--------------------------------------------------------------------------------
/examples/m2m_100/tokenizers/tokenize_thai.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | import sys
8 |
9 | from pythainlp import word_tokenize
10 |
11 |
12 | for line in sys.stdin:
13 | print(" ".join(word_tokenize(line.strip())))
14 |
--------------------------------------------------------------------------------
/examples/m2m_100/tokenizers/tokenize_zh.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 |
8 | import fileinput
9 |
10 | import sacrebleu
11 |
12 |
13 | for line in fileinput.input():
14 | print(sacrebleu.tokenize_zh(line))
15 |
--------------------------------------------------------------------------------
/examples/m2m_100/tokenizers/tokenizer_ar.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | #
7 | # Please follow the instructions here http://alt.qcri.org/tools/arabic-normalizer/
8 | # to install tools needed for Arabic
9 |
10 | echo "Please install Arabic tools: http://alt.qcri.org/tools/arabic-normalizer/"
11 | echo "Then update environment variables in tokenizer_ar.sh"
12 | exit 1
13 |
14 | SVMTOOL=...
15 | GOMOSESGO=...
16 | QCRI_ARABIC_NORMALIZER=...
17 |
18 | export PERL5LIB="$SVMTOOL/lib":"$GOMOSESGO/bin/MADA-3.2":$PERL5LIB
19 |
20 |
21 | tempfile=$(mktemp)
22 | cat - > $tempfile
23 |
24 | cd $QCRI_ARABIC_NORMALIZER
25 |
26 | bash qcri_normalizer_mada3.2_aramorph1.2.1.sh $tempfile
27 | cat $tempfile.mada_norm-aramorph.europarl_tok
28 |
--------------------------------------------------------------------------------
/examples/megatron_11b/detok.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | import argparse
8 | import fileinput
9 |
10 | import sacremoses
11 |
12 |
13 | def main():
14 | parser = argparse.ArgumentParser(description="")
15 | parser.add_argument("files", nargs="*", help="input files")
16 | args = parser.parse_args()
17 |
18 | detok = sacremoses.MosesDetokenizer()
19 |
20 | for line in fileinput.input(args.files, openhook=fileinput.hook_compressed):
21 | print(
22 | detok.detokenize(line.strip().split(" "))
23 | .replace(" @", "")
24 | .replace("@ ", "")
25 | .replace(" =", "=")
26 | .replace("= ", "=")
27 | .replace(" – ", "–")
28 | )
29 |
30 |
31 | if __name__ == "__main__":
32 | main()
33 |
--------------------------------------------------------------------------------
/examples/multilingual/ML50_langs.txt:
--------------------------------------------------------------------------------
1 | ar_AR
2 | cs_CZ
3 | de_DE
4 | en_XX
5 | es_XX
6 | et_EE
7 | fi_FI
8 | fr_XX
9 | gu_IN
10 | hi_IN
11 | it_IT
12 | ja_XX
13 | kk_KZ
14 | ko_KR
15 | lt_LT
16 | lv_LV
17 | my_MM
18 | ne_NP
19 | nl_XX
20 | ro_RO
21 | ru_RU
22 | si_LK
23 | tr_TR
24 | vi_VN
25 | zh_CN
26 | af_ZA
27 | az_AZ
28 | bn_IN
29 | fa_IR
30 | he_IL
31 | hr_HR
32 | id_ID
33 | ka_GE
34 | km_KH
35 | mk_MK
36 | ml_IN
37 | mn_MN
38 | mr_IN
39 | pl_PL
40 | ps_AF
41 | pt_XX
42 | sv_SE
43 | sw_KE
44 | ta_IN
45 | te_IN
46 | th_TH
47 | tl_XX
48 | uk_UA
49 | ur_PK
50 | xh_ZA
51 | gl_ES
52 | sl_SI
--------------------------------------------------------------------------------
/examples/multilingual/data_scripts/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Install dependency
3 | ```bash
4 | pip install -r requirement.txt
5 | ```
6 |
7 | # Download the data set
8 | ```bash
9 | export WORKDIR_ROOT=
10 |
11 | ```
12 | The downloaded data will be at $WORKDIR_ROOT/ML50
13 |
14 | # preprocess the data
15 | Install SPM [here](https://github.com/google/sentencepiece)
16 | ```bash
17 | export WORKDIR_ROOT=
18 | export SPM_PATH=
19 | ```
20 | * $WORKDIR_ROOT/ML50/raw: extracted raw data
21 | * $WORKDIR_ROOT/ML50/dedup: dedup data
22 | * $WORKDIR_ROOT/ML50/clean: data with valid and test sentences removed from the dedup data
23 |
24 |
25 |
--------------------------------------------------------------------------------
/examples/multilingual/data_scripts/download_ML50_v1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | if [ -z $WORKDIR_ROOT ] ;
9 | then
10 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
11 | exit
12 | fi
13 |
14 | # first run download_wmt20.sh; it will install a few useful tools for other scripts
15 | # TODO: need to print out instructions on downloading a few files which requires manually authentication from the websites
16 | bash ./download_wmt20.sh
17 |
18 | python ./download_wmt19_and_before.py
19 | bash ./download_wat19_my.sh
20 | python ./download_ted_and_extract.py
21 | bash ./download_lotus.sh
22 | bash ./download_iitb.sh
23 | bash ./download_af_xh.sh
24 |
25 |
26 | # IWSLT downloading URLs have changed in between; TODO: fix them:
27 | bash ./download_iwslt_and_extract.sh
28 |
29 | # TODO: globalvoices URLs changed; need to be fixed
30 | bash ./download_flores_data.sh
31 |
--------------------------------------------------------------------------------
/examples/multilingual/data_scripts/download_iitb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 |
9 | if [ -z $WORKDIR_ROOT ] ;
10 | then
11 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
12 | exit
13 | fi
14 |
15 | IITB=$WORKDIR_ROOT/IITB
16 | mkdir -p $IITB
17 | pushd $IITB
18 |
19 | wget http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download/parallel.tgz
20 | tar -xvzf parallel.tgz
21 |
22 | wget http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download/dev_test.tgz
23 | tar -xvzf dev_test.tgz
24 |
25 | DESTDIR=${WORKDIR_ROOT}/ML50/raw/
26 |
27 | cp parallel/IITB.en-hi.en $DESTDIR/train.hi_IN-en_XX.en_XX
28 | cp parallel/IITB.en-hi.hi $DESTDIR/train.hi_IN-en_XX.hi_IN
29 |
30 | cp dev_test/dev.en $DESTDIR/valid.hi_IN-en_XX.en_XX
31 | cp dev_test/dev.hi $DESTDIR/valid.hi_IN-en_XX.hi_IN
32 |
33 | cp dev_test/test.en $DESTDIR/test.hi_IN-en_XX.en_XX
34 | cp dev_test/test.hi $DESTDIR/test.hi_IN-en_XX.hi_IN
35 | popd
--------------------------------------------------------------------------------
/examples/multilingual/data_scripts/download_wat19_my.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 |
9 | if [ -z $WORKDIR_ROOT ] ;
10 | then
11 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
12 | exit
13 | fi
14 |
15 |
16 | SRCDIR=$WORKDIR_ROOT/indic_languages_corpus
17 | DESTDIR=$WORKDIR_ROOT/ML50/raw
18 | mkdir -p $SRCDIR
19 | mkdir -p $DESTDIR
20 |
21 | WAT_MY_EN=wat2020.my-en.zip
22 | cd $SRCDIR
23 | # please refer to http://lotus.kuee.kyoto-u.ac.jp/WAT/my-en-data/ for latest URL if the following url expired
24 | #- The data used for WAT2020 are identical to those used in WAT2019.
25 | wget http://lotus.kuee.kyoto-u.ac.jp/WAT/my-en-data/$WAT_MY_EN
26 | unzip $WAT_MY_EN
27 |
28 |
29 | SRC_EXTRACT_DIR=$SRCDIR/wat2020.my-en/alt
30 |
31 | cp $SRC_EXTRACT_DIR/train.alt.en $DESTDIR/train.my_MM-en_XX.en_XX
32 | cp $SRC_EXTRACT_DIR/train.alt.my $DESTDIR/train.my_MM-en_XX.my_MM
33 | cp $SRC_EXTRACT_DIR/dev.alt.en $DESTDIR/valid.my_MM-en_XX.en_XX
34 | cp $SRC_EXTRACT_DIR/dev.alt.my $DESTDIR/valid.my_MM-en_XX.my_MM
35 | cp $SRC_EXTRACT_DIR/test.alt.en $DESTDIR/test.my_MM-en_XX.en_XX
36 | cp $SRC_EXTRACT_DIR/test.alt.my $DESTDIR/test.my_MM-en_XX.my_MM
37 |
--------------------------------------------------------------------------------
/examples/multilingual/data_scripts/preprocess_ML50_v1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | if [ -z $WORKDIR_ROOT ] ;
9 | then
10 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
11 | exit
12 | fi
13 |
14 | if [ -z $SPM_PATH ] ;
15 | then
16 | echo "Please install sentence piecence from https://github.com/google/sentencepiece and set SPM_PATH pointing to the installed spm_encode.py. Exitting..."
17 | exit
18 | fi
19 |
20 | ML50=${WORKDIR_ROOT}/ML50
21 |
22 | mkdir -p $ML50/dedup
23 | mkdir -p $ML50/cleaned_dedup
24 |
25 | python ./dedup_all.py --from-folder $ML50/raw --to-folder $ML50/dedup
26 | python ./remove_valid_test_in_train.py --from-folder $ML50/dedup --to-folder $ML50/clean
27 | python ./binarize.py --raw-folder $ML50/clean
--------------------------------------------------------------------------------
/examples/multilingual/data_scripts/requirement.txt:
--------------------------------------------------------------------------------
1 | wget
2 | pandas
--------------------------------------------------------------------------------
/examples/multilingual/data_scripts/utils/dedup.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | import argparse
8 |
9 | def deup(src_file, tgt_file, src_file_out, tgt_file_out):
10 | seen = set()
11 | dup_count = 0
12 | with open(src_file, encoding='utf-8') as fsrc, \
13 | open(tgt_file, encoding='utf-8') as ftgt, \
14 | open(src_file_out, 'w', encoding='utf-8') as fsrc_out, \
15 | open(tgt_file_out, 'w', encoding='utf-8') as ftgt_out:
16 | for s, t in zip(fsrc, ftgt):
17 | if (s, t) not in seen:
18 | fsrc_out.write(s)
19 | ftgt_out.write(t)
20 | seen.add((s, t))
21 | else:
22 | dup_count += 1
23 | print(f'number of duplication: {dup_count}')
24 |
25 |
26 | def main():
27 | parser = argparse.ArgumentParser()
28 | parser.add_argument("--src-file", type=str, required=True,
29 | help="src file")
30 | parser.add_argument("--tgt-file", type=str, required=True,
31 | help="tgt file")
32 | parser.add_argument("--src-file-out", type=str, required=True,
33 | help="src ouptut file")
34 | parser.add_argument("--tgt-file-out", type=str, required=True,
35 | help="tgt ouput file")
36 | args = parser.parse_args()
37 | deup(args.src_file, args.tgt_file, args.src_file_out, args.tgt_file_out)
38 |
39 |
40 | if __name__ == "__main__":
41 | main()
42 |
--------------------------------------------------------------------------------
/examples/multilingual/data_scripts/utils/strip_sgm.sh:
--------------------------------------------------------------------------------
1 | grep "seg id" | sed 's///g' | sed 's/<\/seg>//g'
2 |
--------------------------------------------------------------------------------
/examples/multilingual/finetune_multilingual_model.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | path_2_data=$1 # which contains binarized data for each directions
9 | lang_list=$2 #
10 | lang_pairs=$3 #a list language pairs to train multilingual models, e.g. "en-fr,en-cs,fr-en,cs-en"
11 | # pretrained can be an mBART pretrained model as well
12 | pretrained_model=$4 #
13 |
14 |
15 | fairseq-train "$path_2_data" \
16 | --encoder-normalize-before --decoder-normalize-before \
17 | --arch transformer --layernorm-embedding \
18 | --task translation_multi_simple_epoch \
19 | --finetune-from-model "$pretrained_model" \
20 | --sampling-method "temperature" \
21 | --sampling-temperature "1.5" \
22 | --encoder-langtok "src" \
23 | --decoder-langtok \
24 | --lang-dict "$lang_list" \
25 | --lang-pairs "$lang_pairs" \
26 | --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \
27 | --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \
28 | --lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \
29 | --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \
30 | --max-tokens 1024 --update-freq 2 \
31 | --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \
32 | --seed 222 --log-format simple --log-interval 2
33 |
--------------------------------------------------------------------------------
/examples/multilingual/multilingual_fairseq_gen.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | lang_pairs="en-fr,en-cs,fr-en,cs-en"
9 | path_2_data=$1 #
10 | lang_list=$2 #
11 | model=$3 #
12 | source_lang=cs
13 | target_lang=en
14 |
15 | fairseq-generate "$path_2_data" \
16 | --path "$model" \
17 | --task translation_multi_simple_epoch \
18 | --gen-subset test \
19 | --source-lang "$source_lang" \
20 | --target-lang "$target_lang" \
21 | --sacrebleu --remove-bpe 'sentencepiece'\
22 | --batch-size 32 \
23 | --encoder-langtok "src" \
24 | --decoder-langtok \
25 | --lang-dict "$lang_list" \
26 | --lang-pairs "$lang_pairs"
27 |
--------------------------------------------------------------------------------
/examples/multilingual/train_multilingual_model.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | path_2_data=$1 # which contains binarized data for each directions
9 | lang_list=$2 #
10 | lang_pairs=$3 #a list language pairs to train multilingual models, e.g. "en-fr,en-cs,fr-en,cs-en"
11 |
12 | fairseq-train "$path_2_data" \
13 | --encoder-normalize-before --decoder-normalize-before \
14 | --arch transformer --layernorm-embedding \
15 | --task translation_multi_simple_epoch \
16 | --sampling-method "temperature" \
17 | --sampling-temperature 1.5 \
18 | --encoder-langtok "src" \
19 | --decoder-langtok \
20 | --lang-dict "$lang_list" \
21 | --lang-pairs "$lang_pairs" \
22 | --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \
23 | --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \
24 | --lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \
25 | --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \
26 | --max-tokens 1024 --update-freq 2 \
27 | --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \
28 | --seed 222 --log-format simple --log-interval 2
29 |
--------------------------------------------------------------------------------
/examples/noisychannel/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .rerank_options import * # noqa
7 |
--------------------------------------------------------------------------------
/examples/pointer_generator/pointer_generator_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import transformer_pg # noqa
7 |
--------------------------------------------------------------------------------
/examples/quant_noise/transformer_quantization_config.yaml:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | # This file defines example configuration arguments for quantizing
7 | # a transformer model with product quantization
8 |
9 | # Number of Centroids for Product Quantization, by default 256 (byte-aligned)
10 | n_centroids:
11 | Linear:
12 | key: in_features
13 | value: {"*": 256}
14 | Embedding:
15 | key: embedding_dim
16 | value: {"*": 256}
17 |
18 | # Block Sizes for Product Quantization
19 | # We suggest: 8 for FFN, 4 for ATTN, 4 for embedding projections, 8 for embeddings
20 | block_sizes:
21 | Linear:
22 | key: fuzzy_name
23 | value: {fc: 8, attn: 4, emb: 4}
24 | Embedding:
25 | key: fuzzy_name
26 | value: {emb: 8}
27 |
28 | # Layers to Quantize Sequentially
29 | # We suggest: first FFN, then EMB, then ATTN
30 | layers_to_quantize:
31 | - decoder\\.layers\\.\d+\\.fc[12]
32 | - decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01]
33 | - decoder\\.layers\\.\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj)
34 |
--------------------------------------------------------------------------------
/examples/roberta/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/roberta/.DS_Store
--------------------------------------------------------------------------------
/examples/roberta/commonsense_qa/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import commonsense_qa_task # noqa
7 |
--------------------------------------------------------------------------------
/examples/roberta/commonsense_qa/download_cqa_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | OUTDIR=data/CommonsenseQA
8 |
9 | mkdir -p $OUTDIR
10 |
11 | wget -O $OUTDIR/train.jsonl https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl
12 | wget -O $OUTDIR/valid.jsonl https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl
13 | wget -O $OUTDIR/test.jsonl https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl
14 | wget -O $OUTDIR/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt
15 |
--------------------------------------------------------------------------------
/examples/roberta/wsc/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import wsc_criterion # noqa
7 | from . import wsc_task # noqa
8 |
--------------------------------------------------------------------------------
/examples/rxf/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import rxf_src # noqa
7 |
--------------------------------------------------------------------------------
/examples/rxf/rxf_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa
7 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/simultaneous_translation/.DS_Store
--------------------------------------------------------------------------------
/examples/simultaneous_translation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import criterions, eval, models # noqa
7 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | for file in os.listdir(os.path.dirname(__file__)):
11 | if file.endswith(".py") and not file.startswith("_"):
12 | criterion_name = file[: file.find(".py")]
13 | importlib.import_module(
14 | "examples.simultaneous_translation.criterions." + criterion_name
15 | )
16 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/eval/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/simultaneous_translation/eval/.DS_Store
--------------------------------------------------------------------------------
/examples/simultaneous_translation/eval/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/eval/agents/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | from fairseq import registry
10 |
11 |
12 | build_agent, register_agent, MONOTONIC_AGENT, _ = registry.setup_registry(
13 | "--agent-type"
14 | )
15 |
16 |
17 | DEFAULT_EOS = ""
18 | GET = 0
19 | SEND = 1
20 |
21 | for file in os.listdir(os.path.dirname(__file__)):
22 | if file.endswith(".py") and not file.startswith("_"):
23 | module = file[: file.find(".py")]
24 | importlib.import_module("agents." + module)
25 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/eval/scorers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | from fairseq import registry
10 |
11 |
12 | (build_scorer, register_scorer, SCORER_REGISTRIES, _) = registry.setup_registry(
13 | "--scorer-type"
14 | )
15 |
16 | for file in os.listdir(os.path.dirname(__file__)):
17 | if file.endswith(".py") and not file.startswith("_"):
18 | module = file[: file.find(".py")]
19 | importlib.import_module("scorers." + module)
20 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/eval/scorers/text_scorer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import register_scorer
7 | from .scorer import SimulScorer
8 |
9 |
10 | @register_scorer("text")
11 | class SimulTextScorer(SimulScorer):
12 | def __init__(self, args):
13 | super().__init__(args)
14 | self.data = {
15 | "src": self._load_text_file(args.src_file, split=True),
16 | "tgt": self._load_text_file(args.tgt_file, split=False),
17 | }
18 |
19 | def send_src(self, sent_id, *args):
20 | if self.steps[sent_id] >= len(self.data["src"][sent_id]):
21 | dict_to_return = {
22 | "sent_id": sent_id,
23 | "segment_id": self.steps[sent_id],
24 | "segment": self.eos,
25 | }
26 | # Consider EOS
27 | self.steps[sent_id] = len(self.data["src"][sent_id]) + 1
28 | else:
29 | dict_to_return = {
30 | "sent_id": sent_id,
31 | "segment_id": self.steps[sent_id],
32 | "segment": self.data["src"][sent_id][self.steps[sent_id]],
33 | }
34 |
35 | self.steps[sent_id] += 1
36 |
37 | return dict_to_return
38 |
39 | def src_lengths(self):
40 | # +1 for eos
41 | return [len(sent) + 1 for sent in self.data["src"]]
42 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | for file in os.listdir(os.path.dirname(__file__)):
11 | if file.endswith(".py") and not file.startswith("_"):
12 | model_name = file[: file.find(".py")]
13 | importlib.import_module(
14 | "examples.simultaneous_translation.models." + model_name
15 | )
16 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | from fairseq import registry
10 |
11 |
12 | (
13 | build_monotonic_attention,
14 | register_monotonic_attention,
15 | MONOTONIC_ATTENTION_REGISTRY,
16 | _,
17 | ) = registry.setup_registry("--simul-type")
18 |
19 | for file in os.listdir(os.path.dirname(__file__)):
20 | if file.endswith(".py") and not file.startswith("_"):
21 | model_name = file[: file.find(".py")]
22 | importlib.import_module(
23 | "examples.simultaneous_translation.modules." + model_name
24 | )
25 |
--------------------------------------------------------------------------------
/examples/simultaneous_translation/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | # automatically import any Python files in the criterions/ directory
11 | for file in os.listdir(os.path.dirname(__file__)):
12 | if file.endswith(".py") and not file.startswith("_"):
13 | module = file[: file.find(".py")]
14 | importlib.import_module("examples.simultaneous_translation.utils." + module)
15 |
--------------------------------------------------------------------------------
/examples/speech_recognition/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/speech_recognition/.DS_Store
--------------------------------------------------------------------------------
/examples/speech_recognition/__init__.py:
--------------------------------------------------------------------------------
1 | from . import criterions, models, tasks # noqa
2 |
--------------------------------------------------------------------------------
/examples/speech_recognition/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 |
4 |
5 | # ASG loss requires flashlight bindings
6 | files_to_skip = set()
7 | try:
8 | import flashlight.lib.sequence.criterion
9 | except ImportError:
10 | files_to_skip.add("ASG_loss.py")
11 |
12 | for file in os.listdir(os.path.dirname(__file__)):
13 | if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
14 | criterion_name = file[: file.find(".py")]
15 | importlib.import_module(
16 | "examples.speech_recognition.criterions." + criterion_name
17 | )
18 |
--------------------------------------------------------------------------------
/examples/speech_recognition/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .asr_dataset import AsrDataset
7 |
8 |
9 | __all__ = [
10 | "AsrDataset",
11 | ]
12 |
--------------------------------------------------------------------------------
/examples/speech_recognition/models/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 |
4 |
5 | for file in os.listdir(os.path.dirname(__file__)):
6 | if file.endswith(".py") and not file.startswith("_"):
7 | model_name = file[: file.find(".py")]
8 | importlib.import_module("examples.speech_recognition.models." + model_name)
9 |
--------------------------------------------------------------------------------
/examples/speech_recognition/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 |
4 |
5 | for file in os.listdir(os.path.dirname(__file__)):
6 | if file.endswith(".py") and not file.startswith("_"):
7 | task_name = file[: file.find(".py")]
8 | importlib.import_module("examples.speech_recognition.tasks." + task_name)
9 |
--------------------------------------------------------------------------------
/examples/translation_moe/translation_moe_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import translation_moe # noqa
7 |
--------------------------------------------------------------------------------
/examples/translation_moe/translation_moe_src/logsumexp_moe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 |
9 | class LogSumExpMoE(torch.autograd.Function):
10 | """Standard LogSumExp forward pass, but use *posterior* for the backward.
11 |
12 | See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
13 | (Shen et al., 2019) `_.
14 | """
15 |
16 | @staticmethod
17 | def forward(ctx, logp, posterior, dim=-1):
18 | ctx.save_for_backward(posterior)
19 | ctx.dim = dim
20 | return torch.logsumexp(logp, dim=dim)
21 |
22 | @staticmethod
23 | def backward(ctx, grad_output):
24 | (posterior,) = ctx.saved_tensors
25 | grad_logp = grad_output.unsqueeze(ctx.dim) * posterior
26 | return grad_logp, None, None
27 |
--------------------------------------------------------------------------------
/examples/truncated_bptt/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import transformer_xl_model, truncated_bptt_lm_task # noqa
7 |
--------------------------------------------------------------------------------
/examples/unsupervised_quality_estimation/aggregate_scores.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import argparse
7 | import sys
8 |
9 | import numpy as np
10 |
11 |
12 | aggregate_funcs = {
13 | "std": np.std,
14 | "var": np.var,
15 | "median": np.median,
16 | "mean": np.mean,
17 | "min": np.min,
18 | "max": np.max,
19 | }
20 |
21 |
22 | def main():
23 | parser = argparse.ArgumentParser()
24 | parser.add_argument("-i", "--input_file", required=True, type=str)
25 | parser.add_argument("-n", "--repeat_times", required=True, type=int)
26 | parser.add_argument("-o", "--output_file", required=False)
27 | parser.add_argument("-f", "--func", required=False, default="mean")
28 | args = parser.parse_args()
29 |
30 | stream = open(args.output_file, "w") if args.output_file else sys.stdout
31 |
32 | segment_scores = []
33 | for line in open(args.input_file):
34 | segment_scores.append(float(line.strip()))
35 | if len(segment_scores) == args.repeat_times:
36 | stream.write("{}\n".format(aggregate_funcs[args.func](segment_scores)))
37 | segment_scores = []
38 |
39 |
40 | if __name__ == "__main__":
41 | main()
42 |
--------------------------------------------------------------------------------
/examples/unsupervised_quality_estimation/repeat_lines.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import argparse
7 | import sys
8 |
9 |
10 | def _normalize_spaces(line):
11 | return " ".join(line.split())
12 |
13 |
14 | def main():
15 | parser = argparse.ArgumentParser()
16 | parser.add_argument("-i", "--input_file", required=True, type=str)
17 | parser.add_argument("-n", "--repeat_times", required=True, type=int)
18 | parser.add_argument("-o", "--output_file", required=False, type=str)
19 | args = parser.parse_args()
20 | stream = open(args.output_file, "w") if args.output_file else sys.stdout
21 |
22 | for line in open(args.input_file):
23 | for _ in range(args.repeat_times):
24 | stream.write(_normalize_spaces(line) + "\n")
25 |
26 |
27 | if __name__ == "__main__":
28 | main()
29 |
--------------------------------------------------------------------------------
/examples/wav2vec/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/wav2vec/.DS_Store
--------------------------------------------------------------------------------
/examples/wav2vec/config/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/examples/wav2vec/config/.DS_Store
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/base_100h.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | no_epoch_checkpoints: true
10 | best_checkpoint_metric: wer
11 |
12 | task:
13 | _name: audio_pretraining
14 | data: ???
15 | normalize: false
16 | labels: ltr
17 |
18 | dataset:
19 | num_workers: 6
20 | max_tokens: 3200000
21 | skip_invalid_size_inputs_valid_test: true
22 | valid_subset: dev_other
23 |
24 | distributed_training:
25 | ddp_backend: no_c10d
26 | distributed_world_size: 2
27 |
28 | criterion:
29 | _name: ctc
30 | zero_infinity: true
31 |
32 | optimization:
33 | max_update: 80000
34 | lr: [0.00003]
35 | sentence_avg: true
36 | update_freq: [4]
37 |
38 | optimizer:
39 | _name: adam
40 | adam_betas: (0.9,0.98)
41 | adam_eps: 1e-08
42 |
43 | lr_scheduler:
44 | _name: tri_stage
45 | phase_ratio: [0.1, 0.4, 0.5]
46 | final_lr_scale: 0.05
47 |
48 | model:
49 | _name: wav2vec_ctc
50 | w2v_path: ???
51 | apply_mask: true
52 | mask_prob: 0.65
53 | mask_channel_prob: 0.5
54 | mask_channel_length: 64
55 | layerdrop: 0.1
56 | activation_dropout: 0.1
57 | feature_grad_mult: 0.0
58 | freeze_finetune_updates: 0
59 |
60 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/base_10h.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | save_interval: 50
10 | save_interval_updates: 10000
11 | keep_interval_updates: 1
12 | no_epoch_checkpoints: true
13 | best_checkpoint_metric: wer
14 |
15 | task:
16 | _name: audio_pretraining
17 | data: ???
18 | normalize: false
19 | labels: ltr
20 |
21 | dataset:
22 | num_workers: 6
23 | max_tokens: 3200000
24 | skip_invalid_size_inputs_valid_test: true
25 | validate_after_updates: 10000
26 | validate_interval: 50
27 | valid_subset: dev_other
28 |
29 | distributed_training:
30 | ddp_backend: no_c10d
31 | distributed_world_size: 2
32 |
33 | criterion:
34 | _name: ctc
35 | zero_infinity: true
36 |
37 | optimization:
38 | max_update: 20000
39 | lr: [0.00005]
40 | sentence_avg: true
41 | update_freq: [4]
42 |
43 | optimizer:
44 | _name: adam
45 | adam_betas: (0.9,0.98)
46 | adam_eps: 1e-08
47 |
48 | lr_scheduler:
49 | _name: tri_stage
50 | phase_ratio: [0.1, 0.4, 0.5]
51 | final_lr_scale: 0.05
52 |
53 | model:
54 | _name: wav2vec_ctc
55 | w2v_path: ???
56 | apply_mask: true
57 | mask_prob: 0.65
58 | mask_channel_prob: 0.5
59 | mask_channel_length: 64
60 | layerdrop: 0.05
61 | activation_dropout: 0.1
62 | feature_grad_mult: 0.0
63 | freeze_finetune_updates: 10000
64 |
65 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/base_10m.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | save_interval: 1000
10 | save_interval_updates: 50
11 | keep_interval_updates: 1
12 | no_epoch_checkpoints: true
13 | best_checkpoint_metric: wer
14 |
15 | task:
16 | _name: audio_pretraining
17 | data: ???
18 | normalize: false
19 | labels: ltr
20 |
21 | dataset:
22 | num_workers: 6
23 | max_tokens: 3200000
24 | skip_invalid_size_inputs_valid_test: true
25 | validate_after_updates: 10000
26 | validate_interval: 1000
27 | valid_subset: dev_other
28 |
29 | distributed_training:
30 | ddp_backend: no_c10d
31 | distributed_world_size: 2
32 |
33 | criterion:
34 | _name: ctc
35 | zero_infinity: true
36 |
37 | optimization:
38 | max_update: 13000
39 | lr: [0.00005]
40 | sentence_avg: true
41 | update_freq: [4]
42 |
43 | optimizer:
44 | _name: adam
45 | adam_betas: (0.9,0.98)
46 | adam_eps: 1e-08
47 |
48 | lr_scheduler:
49 | _name: tri_stage
50 | phase_ratio: [0.1, 0.4, 0.5]
51 | final_lr_scale: 0.05
52 |
53 | model:
54 | _name: wav2vec_ctc
55 | w2v_path: ???
56 | apply_mask: true
57 | mask_prob: 0.65
58 | mask_channel_prob: 0.25
59 | mask_channel_length: 64
60 | layerdrop: 0.1
61 | activation_dropout: 0.1
62 | feature_grad_mult: 0.0
63 | freeze_finetune_updates: 10000
64 |
65 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/base_1h.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | save_interval: 1000
10 | save_interval_updates: 50
11 | keep_interval_updates: 1
12 | no_epoch_checkpoints: true
13 | best_checkpoint_metric: wer
14 |
15 | task:
16 | _name: audio_pretraining
17 | data: ???
18 | normalize: false
19 | labels: ltr
20 |
21 | dataset:
22 | num_workers: 6
23 | max_tokens: 3200000
24 | skip_invalid_size_inputs_valid_test: true
25 | validate_after_updates: 10000
26 | validate_interval: 1000
27 | valid_subset: dev_other
28 |
29 | distributed_training:
30 | ddp_backend: no_c10d
31 | distributed_world_size: 2
32 |
33 | criterion:
34 | _name: ctc
35 | zero_infinity: true
36 |
37 | optimization:
38 | max_update: 13000
39 | lr: [0.00005]
40 | sentence_avg: true
41 | update_freq: [4]
42 |
43 | optimizer:
44 | _name: adam
45 | adam_betas: (0.9,0.98)
46 | adam_eps: 1e-08
47 |
48 | lr_scheduler:
49 | _name: tri_stage
50 | phase_ratio: [0.1, 0.4, 0.5]
51 | final_lr_scale: 0.05
52 |
53 | model:
54 | _name: wav2vec_ctc
55 | w2v_path: ???
56 | apply_mask: true
57 | mask_prob: 0.65
58 | mask_channel_prob: 0.25
59 | mask_channel_length: 64
60 | layerdrop: 0.1
61 | activation_dropout: 0.1
62 | feature_grad_mult: 0.0
63 | freeze_finetune_updates: 10000
64 |
65 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/base_960h.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | no_epoch_checkpoints: true
10 | best_checkpoint_metric: wer
11 |
12 | task:
13 | _name: audio_pretraining
14 | data: ???
15 | normalize: false
16 | labels: ltr
17 |
18 | dataset:
19 | num_workers: 6
20 | max_tokens: 3200000
21 | skip_invalid_size_inputs_valid_test: true
22 | valid_subset: dev_other
23 |
24 | distributed_training:
25 | ddp_backend: no_c10d
26 | distributed_world_size: 8
27 |
28 | criterion:
29 | _name: ctc
30 | zero_infinity: true
31 |
32 | optimization:
33 | max_update: 320000
34 | lr: [0.00001]
35 | sentence_avg: true
36 |
37 | optimizer:
38 | _name: adam
39 | adam_betas: (0.9,0.98)
40 | adam_eps: 1e-08
41 |
42 | lr_scheduler:
43 | _name: tri_stage
44 | phase_ratio: [0.1, 0.4, 0.5]
45 | final_lr_scale: 0.05
46 |
47 | model:
48 | _name: wav2vec_ctc
49 | w2v_path: ???
50 | apply_mask: true
51 | mask_prob: 0.5
52 | mask_channel_prob: 0.1
53 | mask_channel_length: 64
54 | layerdrop: 0.1
55 | activation_dropout: 0.1
56 | feature_grad_mult: 0.0
57 | freeze_finetune_updates: 0
58 |
59 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/vox_100h.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | no_epoch_checkpoints: true
10 | best_checkpoint_metric: wer
11 |
12 | task:
13 | _name: audio_pretraining
14 | data: ???
15 | normalize: true
16 | labels: ltr
17 |
18 | dataset:
19 | num_workers: 6
20 | max_tokens: 1280000
21 | skip_invalid_size_inputs_valid_test: true
22 | valid_subset: dev_other
23 |
24 | distributed_training:
25 | ddp_backend: no_c10d
26 | distributed_world_size: 4
27 |
28 | criterion:
29 | _name: ctc
30 | zero_infinity: true
31 |
32 | optimization:
33 | max_update: 80000
34 | lr: [0.00003]
35 | sentence_avg: true
36 | update_freq: [5]
37 |
38 | optimizer:
39 | _name: adam
40 | adam_betas: (0.9,0.98)
41 | adam_eps: 1e-08
42 |
43 | lr_scheduler:
44 | _name: tri_stage
45 | phase_ratio: [0.1, 0.4, 0.5]
46 | final_lr_scale: 0.05
47 |
48 | model:
49 | _name: wav2vec_ctc
50 | w2v_path: ???
51 | apply_mask: true
52 | mask_prob: 0.5
53 | mask_channel_prob: 0.5
54 | mask_channel_length: 64
55 | layerdrop: 0.1
56 | activation_dropout: 0.1
57 | feature_grad_mult: 0.0
58 | freeze_finetune_updates: 10000
59 |
60 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/vox_10h.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | save_interval: 50
10 | save_interval_updates: 10000
11 | keep_interval_updates: 1
12 | no_epoch_checkpoints: true
13 | best_checkpoint_metric: wer
14 |
15 | task:
16 | _name: audio_pretraining
17 | data: ???
18 | normalize: true
19 | labels: ltr
20 |
21 | dataset:
22 | num_workers: 6
23 | max_tokens: 1280000
24 | skip_invalid_size_inputs_valid_test: true
25 | validate_after_updates: 10000
26 | validate_interval: 50
27 | valid_subset: dev_other
28 |
29 | distributed_training:
30 | ddp_backend: no_c10d
31 | distributed_world_size: 4
32 |
33 | criterion:
34 | _name: ctc
35 | zero_infinity: true
36 |
37 | optimization:
38 | max_update: 20000
39 | lr: [0.0001]
40 | sentence_avg: true
41 | update_freq: [5]
42 |
43 | optimizer:
44 | _name: adam
45 | adam_betas: (0.9,0.98)
46 | adam_eps: 1e-08
47 |
48 | lr_scheduler:
49 | _name: tri_stage
50 | phase_ratio: [0.1, 0.4, 0.5]
51 | final_lr_scale: 0.05
52 |
53 | model:
54 | _name: wav2vec_ctc
55 | w2v_path: ???
56 | apply_mask: true
57 | mask_prob: 0.75
58 | mask_channel_prob: 0.25
59 | mask_channel_length: 64
60 | layerdrop: 0.1
61 | activation_dropout: 0.1
62 | feature_grad_mult: 0.0
63 | freeze_finetune_updates: 10000
64 |
65 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/vox_10m.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | save_interval: 1000
10 | save_interval_updates: 50
11 | keep_interval_updates: 1
12 | no_epoch_checkpoints: true
13 | best_checkpoint_metric: wer
14 |
15 | task:
16 | _name: audio_pretraining
17 | data: ???
18 | normalize: true
19 | labels: ltr
20 |
21 | dataset:
22 | num_workers: 6
23 | max_tokens: 1280000
24 | skip_invalid_size_inputs_valid_test: true
25 | validate_after_updates: 10000
26 | validate_interval: 1000
27 | valid_subset: dev_other
28 |
29 | distributed_training:
30 | ddp_backend: no_c10d
31 | distributed_world_size: 4
32 |
33 | criterion:
34 | _name: ctc
35 | zero_infinity: true
36 |
37 | optimization:
38 | max_update: 13000
39 | lr: [0.0001]
40 | sentence_avg: true
41 | update_freq: [5]
42 |
43 | optimizer:
44 | _name: adam
45 | adam_betas: (0.9,0.98)
46 | adam_eps: 1e-08
47 |
48 | lr_scheduler:
49 | _name: tri_stage
50 | phase_ratio: [0.1, 0.4, 0.5]
51 | final_lr_scale: 0.05
52 |
53 | model:
54 | _name: wav2vec_ctc
55 | w2v_path: ???
56 | apply_mask: true
57 | mask_prob: 0.65
58 | mask_channel_prob: 0.25
59 | mask_channel_length: 64
60 | layerdrop: 0.1
61 | activation_dropout: 0.1
62 | feature_grad_mult: 0.0
63 | freeze_finetune_updates: 10000
64 |
65 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/vox_1h.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | save_interval: 1000
10 | save_interval_updates: 50
11 | keep_interval_updates: 1
12 | no_epoch_checkpoints: true
13 | best_checkpoint_metric: wer
14 |
15 | task:
16 | _name: audio_pretraining
17 | data: ???
18 | normalize: true
19 | labels: ltr
20 |
21 | dataset:
22 | num_workers: 6
23 | max_tokens: 1280000
24 | skip_invalid_size_inputs_valid_test: true
25 | validate_after_updates: 10000
26 | validate_interval: 1000
27 | valid_subset: dev_other
28 |
29 | distributed_training:
30 | ddp_backend: no_c10d
31 | distributed_world_size: 4
32 |
33 | criterion:
34 | _name: ctc
35 | zero_infinity: true
36 |
37 | optimization:
38 | max_update: 13000
39 | lr: [0.0003]
40 | sentence_avg: true
41 | update_freq: [5]
42 |
43 | optimizer:
44 | _name: adam
45 | adam_betas: (0.9,0.98)
46 | adam_eps: 1e-08
47 |
48 | lr_scheduler:
49 | _name: tri_stage
50 | phase_ratio: [0.1, 0.4, 0.5]
51 | final_lr_scale: 0.05
52 |
53 | model:
54 | _name: wav2vec_ctc
55 | w2v_path: ???
56 | apply_mask: true
57 | mask_prob: 0.75
58 | mask_channel_prob: 0.25
59 | mask_channel_length: 64
60 | layerdrop: 0.1
61 | activation_dropout: 0.1
62 | feature_grad_mult: 0.0
63 | freeze_finetune_updates: 10000
64 |
65 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/finetuning/vox_960h.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | no_epoch_checkpoints: true
10 | best_checkpoint_metric: wer
11 |
12 | task:
13 | _name: audio_pretraining
14 | data: ???
15 | normalize: true
16 | labels: ltr
17 |
18 | dataset:
19 | num_workers: 6
20 | max_tokens: 1280000
21 | skip_invalid_size_inputs_valid_test: true
22 | valid_subset: dev_other
23 |
24 | distributed_training:
25 | ddp_backend: no_c10d
26 | distributed_world_size: 24
27 |
28 | criterion:
29 | _name: ctc
30 | zero_infinity: true
31 |
32 | optimization:
33 | max_update: 320000
34 | lr: [0.00003]
35 | sentence_avg: true
36 |
37 | optimizer:
38 | _name: adam
39 | adam_betas: (0.9,0.98)
40 | adam_eps: 1e-08
41 |
42 | lr_scheduler:
43 | _name: tri_stage
44 | phase_ratio: [0.1, 0.4, 0.5]
45 | final_lr_scale: 0.05
46 |
47 | model:
48 | _name: wav2vec_ctc
49 | w2v_path: ???
50 | apply_mask: true
51 | mask_prob: 0.5
52 | mask_channel_prob: 0.25
53 | mask_channel_length: 64
54 | layerdrop: 0.1
55 | activation_dropout: 0.1
56 | feature_grad_mult: 0.0
57 | freeze_finetune_updates: 10000
58 |
59 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/pretraining/wav2vec2_base_librispeech.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | save_interval_updates: 25000
10 | keep_interval_updates: 1
11 | no_epoch_checkpoints: true
12 |
13 | task:
14 | _name: audio_pretraining
15 | data: ???
16 | max_sample_size: 250000
17 | min_sample_size: 32000
18 |
19 | dataset:
20 | num_workers: 6
21 | max_tokens: 1400000
22 | skip_invalid_size_inputs_valid_test: true
23 |
24 | distributed_training:
25 | distributed_world_size: 64
26 | ddp_backend: no_c10d
27 |
28 | criterion:
29 | _name: wav2vec
30 | infonce: true
31 | log_keys: ["prob_perplexity","code_perplexity","temp"]
32 | loss_weights: [0.1, 10]
33 |
34 | optimization:
35 | max_update: 400000
36 | lr: [0.0005]
37 |
38 | optimizer:
39 | _name: adam
40 | adam_betas: (0.9,0.98)
41 | adam_eps: 1e-06
42 | weight_decay: 0.01
43 |
44 | lr_scheduler:
45 | _name: polynomial_decay
46 | warmup_updates: 32000
47 |
48 | model:
49 | _name: wav2vec2
50 | quantize_targets: true
51 | final_dim: 256
52 | encoder_layerdrop: 0.05
53 | dropout_input: 0.1
54 | dropout_features: 0.1
55 | feature_grad_mult: 0.1
56 |
--------------------------------------------------------------------------------
/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | common:
4 | fp16: true
5 | log_format: json
6 | log_interval: 200
7 |
8 | checkpoint:
9 | save_interval_updates: 25000
10 | keep_interval_updates: 1
11 | no_epoch_checkpoints: true
12 |
13 | task:
14 | _name: audio_pretraining
15 | data: ???
16 | max_sample_size: 320000
17 | min_sample_size: 32000
18 | normalize: true
19 |
20 | dataset:
21 | num_workers: 6
22 | max_tokens: 1200000
23 | skip_invalid_size_inputs_valid_test: true
24 |
25 | distributed_training:
26 | distributed_world_size: 128
27 | ddp_backend: no_c10d
28 |
29 | criterion:
30 | _name: wav2vec
31 | infonce: true
32 | log_keys: ["prob_perplexity","code_perplexity","temp"]
33 | loss_weights: [0.1, 0]
34 |
35 | optimization:
36 | max_update: 1000000
37 | lr: [0.005]
38 |
39 | optimizer:
40 | _name: adam
41 | adam_betas: (0.9,0.98)
42 | adam_eps: 1e-06
43 | weight_decay: 0.01
44 |
45 | lr_scheduler:
46 | _name: polynomial_decay
47 | warmup_updates: 32000
48 |
49 | model:
50 | _name: wav2vec2
51 | quantize_targets: true
52 | extractor_mode: layer_norm
53 | layer_norm_first: true
54 | final_dim: 768
55 | latent_temp: [2.0,0.1,0.999995]
56 | encoder_layerdrop: 0.00
57 | dropout_input: 0.0
58 | dropout_features: 0.0
59 | dropout: 0.0
60 | attention_dropout: 0.0
61 | conv_bias: true
62 |
63 | encoder_layers: 24
64 | encoder_embed_dim: 1024
65 | encoder_ffn_embed_dim: 4096
66 | encoder_attention_heads: 16
67 |
68 | feature_grad_mult: 1.0
69 |
70 |
--------------------------------------------------------------------------------
/fairseq.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/fairseq.egg-info/entry_points.txt:
--------------------------------------------------------------------------------
1 | [console_scripts]
2 | fairseq-eval-lm = fairseq_cli.eval_lm:cli_main
3 | fairseq-generate = fairseq_cli.generate:cli_main
4 | fairseq-hydra-train = fairseq_cli.hydra_train:cli_main
5 | fairseq-interactive = fairseq_cli.interactive:cli_main
6 | fairseq-preprocess = fairseq_cli.preprocess:cli_main
7 | fairseq-score = fairseq_cli.score:cli_main
8 | fairseq-train = fairseq_cli.train:cli_main
9 | fairseq-validate = fairseq_cli.validate:cli_main
10 |
11 |
--------------------------------------------------------------------------------
/fairseq.egg-info/not-zip-safe:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/fairseq.egg-info/requires.txt:
--------------------------------------------------------------------------------
1 | cffi
2 | cython
3 | hydra-core<1.1
4 | omegaconf<2.1
5 | regex
6 | sacrebleu>=1.4.12
7 | torch
8 | tqdm
9 |
10 | [:python_version < "3.7"]
11 | dataclasses
12 | numpy<1.20.0
13 |
14 | [:python_version >= "3.7"]
15 | numpy
16 |
--------------------------------------------------------------------------------
/fairseq.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | fairseq
2 | fairseq_cli
3 | fs_plugins
4 |
--------------------------------------------------------------------------------
/fairseq/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/.DS_Store
--------------------------------------------------------------------------------
/fairseq/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """isort:skip_file"""
6 |
7 | import os
8 | import sys
9 |
10 | try:
11 | from .version import __version__ # noqa
12 | except ImportError:
13 | version_txt = os.path.join(os.path.dirname(__file__), "version.txt")
14 | with open(version_txt) as f:
15 | __version__ = f.read().strip()
16 |
17 | __all__ = ["pdb"]
18 |
19 | # backwards compatibility to support `from fairseq.meters import AverageMeter`
20 | from fairseq.logging import meters, metrics, progress_bar # noqa
21 |
22 | sys.modules["fairseq.meters"] = meters
23 | sys.modules["fairseq.metrics"] = metrics
24 | sys.modules["fairseq.progress_bar"] = progress_bar
25 |
26 | # initialize hydra
27 | from fairseq.dataclass.initialize import hydra_init
28 | hydra_init()
29 |
30 | import fairseq.criterions # noqa
31 | import fairseq.models # noqa
32 | import fairseq.modules # noqa
33 | import fairseq.optim # noqa
34 | import fairseq.optim.lr_scheduler # noqa
35 | import fairseq.pdb # noqa
36 | import fairseq.scoring # noqa
37 | import fairseq.tasks # noqa
38 | import fairseq.token_generation_constraints # noqa
39 |
40 | import fairseq.benchmark # noqa
41 | import fairseq.model_parallel # noqa
42 |
--------------------------------------------------------------------------------
/fairseq/benchmark/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | # import models/tasks to register them
7 | from . import dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa
8 |
--------------------------------------------------------------------------------
/fairseq/clib/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/clib/.DS_Store
--------------------------------------------------------------------------------
/fairseq/clib/cuda/ngram_repeat_block_cuda.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright (c) Microsoft Corporation.
3 | Licensed under the MIT License.
4 | */
5 |
6 | #include
7 | #include
8 |
9 | /*
10 | CPP Binding for CUDA OP
11 | */
12 |
13 | // CUDA forward declarations
14 | torch::Tensor ngram_repeat_block_cuda_forward(torch::Tensor tokens,
15 | torch::Tensor lprobs, int bsz,
16 | int step, int beam_size,
17 | int no_repeat_ngram_size);
18 |
19 | #define CHECK_CUDA(x) \
20 | TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
21 | #define CHECK_CONTIGUOUS(x) \
22 | TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
23 | #define CHECK_INPUT(x) \
24 | CHECK_CUDA(x); \
25 | CHECK_CONTIGUOUS(x)
26 |
27 | // Input check and call to CUDA OP
28 | // Backward method not required
29 | torch::Tensor ngram_repeat_block_forward(torch::Tensor tokens,
30 | torch::Tensor lprobs, int bsz,
31 | int step, int beam_size,
32 | int no_repeat_ngram_size) {
33 | CHECK_INPUT(tokens);
34 | CHECK_INPUT(lprobs);
35 | assert(bsz > 0);
36 | assert(step >= 0);
37 | assert(beam_size > 0);
38 | assert(no_repeat_ngram_size > 0);
39 |
40 | return ngram_repeat_block_cuda_forward(tokens, lprobs, bsz, step, beam_size,
41 | no_repeat_ngram_size);
42 | }
43 |
44 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
45 | m.def("forward", &ngram_repeat_block_forward,
46 | "No Repeat Ngram Block forward (CUDA)");
47 | }
48 |
--------------------------------------------------------------------------------
/fairseq/clib/libbleu/module.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2017-present, Facebook, Inc.
3 | * All rights reserved.
4 | *
5 | * This source code is licensed under the license found in the
6 | * LICENSE file in the root directory of this source tree.
7 | */
8 |
9 | #include
10 |
11 |
12 | static PyMethodDef method_def[] = {
13 | {NULL, NULL, 0, NULL}
14 | };
15 |
16 | static struct PyModuleDef module_def = {
17 | PyModuleDef_HEAD_INIT,
18 | "libbleu", /* name of module */
19 | NULL, /* module documentation, may be NULL */
20 | -1, /* size of per-interpreter state of the module,
21 | or -1 if the module keeps state in global variables. */
22 | method_def
23 | };
24 |
25 |
26 | #if PY_MAJOR_VERSION == 2
27 | PyMODINIT_FUNC init_libbleu()
28 | #else
29 | PyMODINIT_FUNC PyInit_libbleu()
30 | #endif
31 | {
32 | PyObject *m = PyModule_Create(&module_def);
33 | if (!m) {
34 | return NULL;
35 | }
36 | return m;
37 | }
38 |
--------------------------------------------------------------------------------
/fairseq/clib/libnat_cuda/edit_dist.h:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2017-present, Facebook, Inc.
3 | * All rights reserved.
4 | *
5 | * This source code is licensed under the license found in the
6 | * LICENSE file in the root directory of this source tree.
7 | */
8 |
9 | #pragma once
10 |
11 | #include
12 |
13 | torch::Tensor LevenshteinDistanceCuda(
14 | torch::Tensor source,
15 | torch::Tensor target,
16 | torch::Tensor source_length,
17 | torch::Tensor target_length);
18 |
19 | torch::Tensor GenerateDeletionLabelCuda(
20 | torch::Tensor source,
21 | torch::Tensor operations);
22 |
23 | std::pair GenerateInsertionLabelCuda(
24 | torch::Tensor source,
25 | torch::Tensor operations);
26 |
--------------------------------------------------------------------------------
/fairseq/config/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/config/.DS_Store
--------------------------------------------------------------------------------
/fairseq/config/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/fairseq/config/config.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | hydra:
4 | run:
5 | dir: .
6 |
7 | defaults:
8 | - task: null
9 | - model: null
10 | - criterion: cross_entropy
11 | - optimizer: null
12 | - lr_scheduler: fixed
13 | - bpe: null
14 | - tokenizer: null
15 | - scoring: null
16 | - generation: null
17 | - common_eval: null
18 | - eval_lm: null
19 |
--------------------------------------------------------------------------------
/fairseq/config/model/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/config/model/.DS_Store
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_baevski_gbw.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "relu"
3 | dropout: 0.1
4 | attention_dropout: 0.1
5 | activation_dropout: 0.0
6 | relu_dropout: 0.0
7 | decoder_embed_dim: 512
8 | decoder_output_dim: 512
9 | decoder_input_dim: 512
10 | decoder_ffn_embed_dim: 4096
11 | decoder_layers: 12
12 | decoder_attention_heads: 16
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: true
15 | adaptive_softmax_cutoff: null
16 | adaptive_softmax_dropout: 0
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: false
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: null
27 | tie_adaptive_weights: false
28 | tie_adaptive_proj: false
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_baevski_wiki103.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "relu"
3 | dropout: 0.3
4 | attention_dropout: 0.1
5 | activation_dropout: 0.1
6 | relu_dropout: 0.1
7 | decoder_embed_dim: 1024
8 | decoder_output_dim: 1024
9 | decoder_input_dim: 1024
10 | decoder_ffn_embed_dim: 4096
11 | decoder_layers: 16
12 | decoder_attention_heads: 8
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: true
15 | adaptive_softmax_cutoff: "20000,60000"
16 | adaptive_softmax_dropout: 0.2
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: true
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: "20000,60000"
27 | tie_adaptive_weights: true
28 | tie_adaptive_proj: true
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_big.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "relu"
3 | dropout: 0.1
4 | attention_dropout: 0.0
5 | activation_dropout: 0.0
6 | relu_dropout: 0.0
7 | decoder_embed_dim: 1024
8 | decoder_output_dim: 1024
9 | decoder_input_dim: 1024
10 | decoder_ffn_embed_dim: 4096
11 | decoder_layers: 12
12 | decoder_attention_heads: 16
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: false
15 | adaptive_softmax_cutoff: null
16 | adaptive_softmax_dropout: 0
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: false
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: null
27 | tie_adaptive_weights: false
28 | tie_adaptive_proj: false
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_gbw.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "relu"
3 | dropout: 0.1
4 | attention_dropout: 0.1
5 | activation_dropout: 0.0
6 | relu_dropout: 0.0
7 | decoder_embed_dim: 512
8 | decoder_output_dim: 512
9 | decoder_input_dim: 512
10 | decoder_ffn_embed_dim: 4096
11 | decoder_layers: 12
12 | decoder_attention_heads: 16
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: true
15 | adaptive_softmax_cutoff: null
16 | adaptive_softmax_dropout: 0
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: false
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: null
27 | tie_adaptive_weights: false
28 | tie_adaptive_proj: false
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_gpt.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "gelu"
3 | dropout: 0.1
4 | attention_dropout: 0.1
5 | activation_dropout: 0.0
6 | relu_dropout: 0.0
7 | decoder_embed_dim: 768
8 | decoder_output_dim: 768
9 | decoder_input_dim: 768
10 | decoder_ffn_embed_dim: 3072
11 | decoder_layers: 12
12 | decoder_attention_heads: 12
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: false
15 | adaptive_softmax_cutoff: null
16 | adaptive_softmax_dropout: 0
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: false
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: null
27 | tie_adaptive_weights: false
28 | tie_adaptive_proj: false
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_gpt2_big.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "gelu"
3 | dropout: 0.1
4 | attention_dropout: 0.1
5 | activation_dropout: 0.0
6 | relu_dropout: 0.0
7 | decoder_embed_dim: 1600
8 | decoder_output_dim: 1600
9 | decoder_input_dim: 1600
10 | decoder_ffn_embed_dim: 6400
11 | decoder_layers: 48
12 | decoder_attention_heads: 25
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: false
15 | adaptive_softmax_cutoff: null
16 | adaptive_softmax_dropout: 0
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: false
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: null
27 | tie_adaptive_weights: false
28 | tie_adaptive_proj: false
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_gpt2_medium.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "gelu"
3 | dropout: 0.1
4 | attention_dropout: 0.1
5 | activation_dropout: 0.0
6 | relu_dropout: 0.0
7 | decoder_embed_dim: 1280
8 | decoder_output_dim: 1280
9 | decoder_input_dim: 1280
10 | decoder_ffn_embed_dim: 5120
11 | decoder_layers: 36
12 | decoder_attention_heads: 20
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: false
15 | adaptive_softmax_cutoff: null
16 | adaptive_softmax_dropout: 0
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: false
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: null
27 | tie_adaptive_weights: false
28 | tie_adaptive_proj: false
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_gpt2_small.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "gelu"
3 | dropout: 0.1
4 | attention_dropout: 0.1
5 | activation_dropout: 0.0
6 | relu_dropout: 0.0
7 | decoder_embed_dim: 1024
8 | decoder_output_dim: 1024
9 | decoder_input_dim: 1024
10 | decoder_ffn_embed_dim: 4096
11 | decoder_layers: 24
12 | decoder_attention_heads: 16
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: false
15 | adaptive_softmax_cutoff: null
16 | adaptive_softmax_dropout: 0
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: false
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: null
27 | tie_adaptive_weights: false
28 | tie_adaptive_proj: false
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/transformer_lm/transformer_lm_wiki103.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation_fn: "relu"
3 | dropout: 0.3
4 | attention_dropout: 0.1
5 | activation_dropout: 0.1
6 | relu_dropout: 0.1
7 | decoder_embed_dim: 1024
8 | decoder_output_dim: 1024
9 | decoder_input_dim: 1024
10 | decoder_ffn_embed_dim: 4096
11 | decoder_layers: 16
12 | decoder_attention_heads: 8
13 | decoder_normalize_before: true
14 | no_decoder_final_norm: true
15 | adaptive_softmax_cutoff: "20000,60000"
16 | adaptive_softmax_dropout: 0.2
17 | adaptive_softmax_factor: 4
18 | no_token_positional_embeddings: false
19 | share_decoder_input_output_embed: false
20 | character_embeddings: false
21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
22 | character_embedding_dim: 4
23 | char_embedder_highway_layers: 2
24 | adaptive_input: true
25 | adaptive_input_factor: 4
26 | adaptive_input_cutoff: "20000,60000"
27 | tie_adaptive_weights: true
28 | tie_adaptive_proj: true
29 | decoder_learned_pos: false
30 | decoder_layerdrop: 0
31 | decoder_layers_to_keep: null
32 | layernorm_embedding: false
33 | no_scale_embedding: false
34 | quant_noise_pq: 0
35 | quant_noise_pq_block_size: 8
36 | quant_noise_scalar: 0
37 |
--------------------------------------------------------------------------------
/fairseq/config/model/wav2vec/vq_wav2vec_gumbel.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation: gelu
3 | vq_type: gumbel
4 | vq_depth: 2
5 | combine_groups: true
6 |
--------------------------------------------------------------------------------
/fairseq/config/model/wav2vec2/wav2vec2_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | quantize_targets: true
4 | final_dim: 256
5 | encoder_layerdrop: 0.05
6 | dropout_input: 0.1
7 | dropout_features: 0.1
8 | feature_grad_mult: 0.1
9 |
--------------------------------------------------------------------------------
/fairseq/config/model/wav2vec2/wav2vec2_large.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | quantize_targets: true
4 | extractor_mode: layer_norm
5 | layer_norm_first: true
6 | final_dim: 768
7 | latent_temp: [2.0,0.1,0.999995]
8 | encoder_layerdrop: 0.0
9 | dropout_input: 0.0
10 | dropout_features: 0.0
11 | dropout: 0.0
12 | attention_dropout: 0.0
13 | conv_bias: true
14 |
15 | encoder_layers: 24
16 | encoder_embed_dim: 1024
17 | encoder_ffn_embed_dim: 4096
18 | encoder_attention_heads: 16
19 |
20 | feature_grad_mult: 1.0
21 |
--------------------------------------------------------------------------------
/fairseq/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """isort:skip_file"""
6 |
7 | import importlib
8 | import os
9 |
10 | from fairseq import registry
11 | from fairseq.criterions.fairseq_criterion import ( # noqa
12 | FairseqCriterion,
13 | LegacyFairseqCriterion,
14 | )
15 | from omegaconf import DictConfig
16 |
17 |
18 | (
19 | build_criterion_,
20 | register_criterion,
21 | CRITERION_REGISTRY,
22 | CRITERION_DATACLASS_REGISTRY,
23 | ) = registry.setup_registry(
24 | "--criterion", base_class=FairseqCriterion, default="cross_entropy"
25 | )
26 |
27 |
28 | def build_criterion(cfg: DictConfig, task):
29 | return build_criterion_(cfg, task)
30 |
31 |
32 | # automatically import any Python files in the criterions/ directory
33 | for file in os.listdir(os.path.dirname(__file__)):
34 | if file.endswith(".py") and not file.startswith("_"):
35 | file_name = file[: file.find(".py")]
36 | importlib.import_module("fairseq.criterions." + file_name)
37 |
--------------------------------------------------------------------------------
/fairseq/data/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/data/.DS_Store
--------------------------------------------------------------------------------
/fairseq/data/append_token_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch
8 |
9 | from . import BaseWrapperDataset
10 |
11 |
12 | class AppendTokenDataset(BaseWrapperDataset):
13 | def __init__(self, dataset, token=None):
14 | super().__init__(dataset)
15 | self.token = token
16 | if token is not None:
17 | self._sizes = np.array(dataset.sizes) + 1
18 | else:
19 | self._sizes = dataset.sizes
20 |
21 | def __getitem__(self, idx):
22 | item = self.dataset[idx]
23 | if self.token is not None:
24 | item = torch.cat([item, item.new([self.token])])
25 | return item
26 |
27 | @property
28 | def sizes(self):
29 | return self._sizes
30 |
31 | def num_tokens(self, index):
32 | n = self.dataset.num_tokens(index)
33 | if self.token is not None:
34 | n += 1
35 | return n
36 |
37 | def size(self, index):
38 | n = self.dataset.size(index)
39 | if self.token is not None:
40 | n += 1
41 | return n
42 |
--------------------------------------------------------------------------------
/fairseq/data/audio/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/data/audio/__init__.py
--------------------------------------------------------------------------------
/fairseq/data/audio/feature_transforms/global_cmvn.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from fairseq.data.audio.feature_transforms import (
3 | AudioFeatureTransform,
4 | register_audio_feature_transform,
5 | )
6 |
7 |
8 | @register_audio_feature_transform("global_cmvn")
9 | class GlobalCMVN(AudioFeatureTransform):
10 | """Global CMVN (cepstral mean and variance normalization). The global mean
11 | and variance need to be pre-computed and stored in NumPy format (.npz)."""
12 |
13 | @classmethod
14 | def from_config_dict(cls, config=None):
15 | _config = {} if config is None else config
16 | return GlobalCMVN(_config.get("stats_npz_path"))
17 |
18 | def __init__(self, stats_npz_path):
19 | self.stats_npz_path = stats_npz_path
20 | stats = np.load(stats_npz_path)
21 | self.mean, self.std = stats["mean"], stats["std"]
22 |
23 | def __repr__(self):
24 | return self.__class__.__name__ + f'(stats_npz_path="{self.stats_npz_path}")'
25 |
26 | def __call__(self, x):
27 | x = np.subtract(x, self.mean)
28 | x = np.divide(x, self.std)
29 | return x
30 |
--------------------------------------------------------------------------------
/fairseq/data/audio/feature_transforms/utterance_cmvn.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from fairseq.data.audio.feature_transforms import (
3 | AudioFeatureTransform,
4 | register_audio_feature_transform,
5 | )
6 |
7 |
8 | @register_audio_feature_transform("utterance_cmvn")
9 | class UtteranceCMVN(AudioFeatureTransform):
10 | """Utterance-level CMVN (cepstral mean and variance normalization)"""
11 |
12 | @classmethod
13 | def from_config_dict(cls, config=None):
14 | _config = {} if config is None else config
15 | return UtteranceCMVN(
16 | _config.get("norm_means", True),
17 | _config.get("norm_vars", True),
18 | )
19 |
20 | def __init__(self, norm_means=True, norm_vars=True):
21 | self.norm_means, self.norm_vars = norm_means, norm_vars
22 |
23 | def __repr__(self):
24 | return (
25 | self.__class__.__name__
26 | + f"(norm_means={self.norm_means}, norm_vars={self.norm_vars})"
27 | )
28 |
29 | def __call__(self, x):
30 | mean = x.mean(axis=0)
31 | square_sums = (x ** 2).sum(axis=0)
32 |
33 | if self.norm_means:
34 | x = np.subtract(x, mean)
35 | if self.norm_vars:
36 | var = square_sums / x.shape[0] - mean ** 2
37 | std = np.sqrt(np.maximum(var, 1e-10))
38 | x = np.divide(x, std)
39 |
40 | return x
41 |
--------------------------------------------------------------------------------
/fairseq/data/colorize_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class ColorizeDataset(BaseWrapperDataset):
12 | """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """
13 |
14 | def __init__(self, dataset, color_getter):
15 | super().__init__(dataset)
16 | self.color_getter = color_getter
17 |
18 | def collater(self, samples):
19 | base_collate = super().collater(samples)
20 | if len(base_collate) > 0:
21 | base_collate["net_input"]["colors"] = torch.tensor(
22 | list(self.color_getter(self.dataset, s["id"]) for s in samples),
23 | dtype=torch.long,
24 | )
25 | return base_collate
26 |
--------------------------------------------------------------------------------
/fairseq/data/concat_sentences_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class ConcatSentencesDataset(FairseqDataset):
12 | def __init__(self, *datasets):
13 | super().__init__()
14 | self.datasets = datasets
15 | assert all(
16 | len(ds) == len(datasets[0]) for ds in datasets
17 | ), "datasets must have the same length"
18 |
19 | def __getitem__(self, index):
20 | return torch.cat([ds[index] for ds in self.datasets])
21 |
22 | def __len__(self):
23 | return len(self.datasets[0])
24 |
25 | def collater(self, samples):
26 | return self.datasets[0].collater(samples)
27 |
28 | @property
29 | def sizes(self):
30 | return sum(ds.sizes for ds in self.datasets)
31 |
32 | def num_tokens(self, index):
33 | return sum(ds.num_tokens(index) for ds in self.datasets)
34 |
35 | def size(self, index):
36 | return sum(ds.size(index) for ds in self.datasets)
37 |
38 | def ordered_indices(self):
39 | return self.datasets[0].ordered_indices()
40 |
41 | @property
42 | def supports_prefetch(self):
43 | return any(getattr(ds, "supports_prefetch", False) for ds in self.datasets)
44 |
45 | def prefetch(self, indices):
46 | for ds in self.datasets:
47 | if getattr(ds, "supports_prefetch", False):
48 | ds.prefetch(indices)
49 |
50 | def set_epoch(self, epoch):
51 | super().set_epoch(epoch)
52 | for ds in self.datasets:
53 | if hasattr(ds, "set_epoch"):
54 | ds.set_epoch(epoch)
55 |
--------------------------------------------------------------------------------
/fairseq/data/data_utils_fast.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/data/data_utils_fast.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/data/data_utils_fast.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/data/data_utils_fast.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/data/data_utils_fast.cpython-38-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/data/data_utils_fast.cpython-38-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/data/encoders/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | import importlib
8 | import os
9 |
10 | from fairseq import registry
11 |
12 |
13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry(
14 | "--tokenizer",
15 | default=None,
16 | )
17 |
18 |
19 | build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry(
20 | "--bpe",
21 | default=None,
22 | )
23 |
24 |
25 | # automatically import any Python files in the encoders/ directory
26 | for file in os.listdir(os.path.dirname(__file__)):
27 | if file.endswith(".py") and not file.startswith("_"):
28 | module = file[: file.find(".py")]
29 | importlib.import_module("fairseq.data.encoders." + module)
30 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/byte_bpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | from dataclasses import dataclass, field
8 |
9 | from fairseq import file_utils
10 | from fairseq.data.encoders import register_bpe
11 | from fairseq.data.encoders.byte_utils import (
12 | SPACE,
13 | SPACE_ESCAPE,
14 | byte_encode,
15 | smart_byte_decode,
16 | )
17 | from fairseq.dataclass import FairseqDataclass
18 |
19 |
20 | @dataclass
21 | class ByteBpeConfig(FairseqDataclass):
22 | sentencepiece_model_path: str = field(
23 | default="???", metadata={"help": "path to sentencepiece model"}
24 | )
25 |
26 |
27 | @register_bpe("byte_bpe", dataclass=ByteBpeConfig)
28 | class ByteBPE(object):
29 | def __init__(self, cfg):
30 | vocab = file_utils.cached_path(cfg.sentencepiece_model_path)
31 | try:
32 | import sentencepiece as spm
33 |
34 | self.sp = spm.SentencePieceProcessor()
35 | self.sp.Load(vocab)
36 | except ImportError:
37 | raise ImportError(
38 | "Please install sentencepiece with: pip install sentencepiece"
39 | )
40 |
41 | def encode(self, x: str) -> str:
42 | byte_encoded = byte_encode(x)
43 | return SPACE.join(self.sp.EncodeAsPieces(byte_encoded))
44 |
45 | @staticmethod
46 | def decode(x: str) -> str:
47 | unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
48 | return smart_byte_decode(unescaped)
49 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/byte_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import re
7 |
8 |
9 | WHITESPACE_NORMALIZER = re.compile(r"\s+")
10 | SPACE = chr(32)
11 | SPACE_ESCAPE = chr(9601)
12 | # excluding non-breaking space (160) here
13 | PRINTABLE_LATIN = set(
14 | list(range(32, 126 + 1)) + list(range(161, 172 + 1)) + list(range(174, 255 + 1))
15 | )
16 | BYTE_TO_BCHAR = {
17 | b: chr(b) if b in PRINTABLE_LATIN else chr(256 + b) for b in range(256)
18 | }
19 | BCHAR_TO_BYTE = {bc: b for b, bc in BYTE_TO_BCHAR.items()}
20 |
21 |
22 | def byte_encode(x: str) -> str:
23 | normalized = WHITESPACE_NORMALIZER.sub(SPACE, x)
24 | return "".join([BYTE_TO_BCHAR[b] for b in normalized.encode("utf-8")])
25 |
26 |
27 | def byte_decode(x: str) -> str:
28 | try:
29 | return bytes([BCHAR_TO_BYTE[bc] for bc in x]).decode("utf-8")
30 | except ValueError:
31 | return ""
32 |
33 |
34 | def smart_byte_decode(x: str) -> str:
35 | output = byte_decode(x)
36 | if output == "":
37 | # DP the best recovery (max valid chars) if it's broken
38 | n_bytes = len(x)
39 | f = [0 for _ in range(n_bytes + 1)]
40 | pt = [0 for _ in range(n_bytes + 1)]
41 | for i in range(1, n_bytes + 1):
42 | f[i], pt[i] = f[i - 1], i - 1
43 | for j in range(1, min(4, i) + 1):
44 | if f[i - j] + 1 > f[i] and len(byte_decode(x[i - j : i])) > 0:
45 | f[i], pt[i] = f[i - j] + 1, i - j
46 | cur_pt = n_bytes
47 | while cur_pt > 0:
48 | if f[cur_pt] == f[pt[cur_pt]] + 1:
49 | output = byte_decode(x[pt[cur_pt] : cur_pt]) + output
50 | cur_pt = pt[cur_pt]
51 | return output
52 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/bytes.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | from fairseq.data.encoders import register_bpe
8 | from fairseq.data.encoders.byte_utils import (
9 | SPACE,
10 | SPACE_ESCAPE,
11 | byte_encode,
12 | smart_byte_decode,
13 | )
14 |
15 |
16 | @register_bpe("bytes")
17 | class Bytes(object):
18 | def __init__(self, *unused):
19 | pass
20 |
21 | @staticmethod
22 | def add_args(parser):
23 | pass
24 |
25 | @staticmethod
26 | def encode(x: str) -> str:
27 | encoded = byte_encode(x)
28 | escaped = encoded.replace(SPACE, SPACE_ESCAPE)
29 | return SPACE.join(list(escaped))
30 |
31 | @staticmethod
32 | def decode(x: str) -> str:
33 | unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
34 | return smart_byte_decode(unescaped)
35 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/characters.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | from fairseq.data.encoders import register_bpe
8 |
9 |
10 | SPACE = chr(32)
11 | SPACE_ESCAPE = chr(9601)
12 |
13 |
14 | @register_bpe("characters")
15 | class Characters(object):
16 | def __init__(self, *unused):
17 | pass
18 |
19 | @staticmethod
20 | def add_args(parser):
21 | pass
22 |
23 | @staticmethod
24 | def encode(x: str) -> str:
25 | escaped = x.replace(SPACE, SPACE_ESCAPE)
26 | return SPACE.join(list(escaped))
27 |
28 | @staticmethod
29 | def decode(x: str) -> str:
30 | return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
31 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/fastbpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from dataclasses import dataclass, field
7 |
8 | from fairseq import file_utils
9 | from fairseq.data.encoders import register_bpe
10 | from fairseq.dataclass import FairseqDataclass
11 |
12 |
13 | @dataclass
14 | class fastBPEConfig(FairseqDataclass):
15 | bpe_codes: str = field(default="???", metadata={"help": "path to fastBPE BPE"})
16 |
17 |
18 | @register_bpe("fastbpe", dataclass=fastBPEConfig)
19 | class fastBPE(object):
20 | def __init__(self, cfg):
21 | if cfg.bpe_codes is None:
22 | raise ValueError("--bpe-codes is required for --bpe=fastbpe")
23 | codes = file_utils.cached_path(cfg.bpe_codes)
24 | try:
25 | import fastBPE
26 |
27 | self.bpe = fastBPE.fastBPE(codes)
28 | self.bpe_symbol = "@@ "
29 | except ImportError:
30 | raise ImportError("Please install fastBPE with: pip install fastBPE")
31 |
32 | def encode(self, x: str) -> str:
33 | return self.bpe.apply([x])[0]
34 |
35 | def decode(self, x: str) -> str:
36 | return (x + " ").replace(self.bpe_symbol, "").rstrip()
37 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/gpt2_bpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from dataclasses import dataclass, field
7 |
8 | from fairseq import file_utils
9 | from fairseq.data.encoders import register_bpe
10 | from fairseq.dataclass import FairseqDataclass
11 |
12 | from .gpt2_bpe_utils import get_encoder
13 |
14 |
15 | DEFAULT_ENCODER_JSON = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json"
16 | DEFAULT_VOCAB_BPE = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe"
17 |
18 |
19 | @dataclass
20 | class GPT2BPEConfig(FairseqDataclass):
21 | gpt2_encoder_json: str = field(
22 | default=DEFAULT_ENCODER_JSON, metadata={"help": "path to encoder.json"}
23 | )
24 | gpt2_vocab_bpe: str = field(
25 | default=DEFAULT_VOCAB_BPE, metadata={"help": "path to vocab.bpe"}
26 | )
27 |
28 |
29 | @register_bpe("gpt2", dataclass=GPT2BPEConfig)
30 | class GPT2BPE(object):
31 | def __init__(self, cfg):
32 | encoder_json = file_utils.cached_path(cfg.gpt2_encoder_json)
33 | vocab_bpe = file_utils.cached_path(cfg.gpt2_vocab_bpe)
34 | self.bpe = get_encoder(encoder_json, vocab_bpe)
35 |
36 | def encode(self, x: str) -> str:
37 | return " ".join(map(str, self.bpe.encode(x)))
38 |
39 | def decode(self, x: str) -> str:
40 | return self.bpe.decode(
41 | [int(tok) if tok not in {"", ""} else tok for tok in x.split()]
42 | )
43 |
44 | def is_beginning_of_word(self, x: str) -> bool:
45 | return self.decode(x).startswith(" ")
46 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/hf_bert_bpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from dataclasses import dataclass, field
7 | from typing import Optional
8 |
9 | from fairseq.data.encoders import register_bpe
10 | from fairseq.dataclass import FairseqDataclass
11 |
12 |
13 | @dataclass
14 | class BertBPEConfig(FairseqDataclass):
15 | bpe_cased: bool = field(default=False, metadata={"help": "set for cased BPE"})
16 | bpe_vocab_file: Optional[str] = field(
17 | default=None, metadata={"help": "bpe vocab file"}
18 | )
19 |
20 |
21 | @register_bpe("bert", dataclass=BertBPEConfig)
22 | class BertBPE(object):
23 | def __init__(self, cfg):
24 | try:
25 | from transformers import BertTokenizer
26 | except ImportError:
27 | raise ImportError(
28 | "Please install transformers with: pip install transformers"
29 | )
30 |
31 | if cfg.bpe_vocab_file:
32 | self.bert_tokenizer = BertTokenizer(
33 | cfg.bpe_vocab_file, do_lower_case=not cfg.bpe_cased
34 | )
35 | else:
36 | vocab_file_name = (
37 | "bert-base-cased" if cfg.bpe_cased else "bert-base-uncased"
38 | )
39 | self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name)
40 |
41 | def encode(self, x: str) -> str:
42 | return " ".join(self.bert_tokenizer.tokenize(x))
43 |
44 | def decode(self, x: str) -> str:
45 | return self.bert_tokenizer.clean_up_tokenization(
46 | self.bert_tokenizer.convert_tokens_to_string(x.split(" "))
47 | )
48 |
49 | def is_beginning_of_word(self, x: str) -> bool:
50 | return not x.startswith("##")
51 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/hf_byte_bpe.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from dataclasses import dataclass, field
7 |
8 | from fairseq.data.encoders import register_bpe
9 | from fairseq.dataclass import FairseqDataclass
10 | from fairseq import file_utils
11 |
12 |
13 | @dataclass
14 | class HuggingFaceByteLevelBPEConfig(FairseqDataclass):
15 | bpe_merges: str = field(default="???", metadata={"help": "path to merges.txt"})
16 | bpe_vocab: str = field(default="???", metadata={"help": "path to vocab.json"})
17 | bpe_add_prefix_space: bool = field(
18 | default=False, metadata={"help": "add prefix space before encoding"}
19 | )
20 |
21 |
22 | @register_bpe("hf_byte_bpe", dataclass=HuggingFaceByteLevelBPEConfig)
23 | class HuggingFaceByteLevelBPE(object):
24 | def __init__(self, cfg):
25 | try:
26 | from tokenizers import ByteLevelBPETokenizer
27 | except ImportError:
28 | raise ImportError(
29 | "Please install huggingface/tokenizers with: " "pip install tokenizers"
30 | )
31 |
32 | bpe_vocab = file_utils.cached_path(cfg.bpe_vocab)
33 | bpe_merges = file_utils.cached_path(cfg.bpe_merges)
34 |
35 | self.bpe = ByteLevelBPETokenizer(
36 | bpe_vocab,
37 | bpe_merges,
38 | add_prefix_space=cfg.bpe_add_prefix_space,
39 | )
40 |
41 | def encode(self, x: str) -> str:
42 | return " ".join(map(str, self.bpe.encode(x).ids))
43 |
44 | def decode(self, x: str) -> str:
45 | return self.bpe.decode(
46 | [int(tok) if tok not in {"", ""} else tok for tok in x.split()]
47 | )
48 |
49 | def is_beginning_of_word(self, x: str) -> bool:
50 | return self.decode(x).startswith(" ")
51 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/moses_tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from dataclasses import dataclass, field
7 |
8 | from fairseq.data.encoders import register_tokenizer
9 | from fairseq.dataclass import FairseqDataclass
10 |
11 |
12 | @dataclass
13 | class MosesTokenizerConfig(FairseqDataclass):
14 | source_lang: str = field(default="en", metadata={"help": "source language"})
15 | target_lang: str = field(default="en", metadata={"help": "target language"})
16 | moses_no_dash_splits: bool = field(
17 | default=False, metadata={"help": "don't apply dash split rules"}
18 | )
19 | moses_no_escape: bool = field(
20 | default=False,
21 | metadata={"help": "don't perform HTML escaping on apostrophe, quotes, etc."},
22 | )
23 |
24 |
25 | @register_tokenizer("moses", dataclass=MosesTokenizerConfig)
26 | class MosesTokenizer(object):
27 | def __init__(self, cfg: MosesTokenizerConfig):
28 | self.cfg = cfg
29 |
30 | try:
31 | from sacremoses import MosesTokenizer, MosesDetokenizer
32 |
33 | self.tok = MosesTokenizer(cfg.source_lang)
34 | self.detok = MosesDetokenizer(cfg.target_lang)
35 | except ImportError:
36 | raise ImportError(
37 | "Please install Moses tokenizer with: pip install sacremoses"
38 | )
39 |
40 | def encode(self, x: str) -> str:
41 | return self.tok.tokenize(
42 | x,
43 | aggressive_dash_splits=(not self.cfg.moses_no_dash_splits),
44 | return_str=True,
45 | escape=(not self.cfg.moses_no_escape),
46 | )
47 |
48 | def decode(self, x: str) -> str:
49 | return self.detok.detokenize(x.split())
50 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/nltk_tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data.encoders import register_tokenizer
7 | from fairseq.dataclass import FairseqDataclass
8 |
9 |
10 | @register_tokenizer("nltk", dataclass=FairseqDataclass)
11 | class NLTKTokenizer(object):
12 | def __init__(self, *unused):
13 | try:
14 | from nltk.tokenize import word_tokenize
15 |
16 | self.word_tokenize = word_tokenize
17 | except ImportError:
18 | raise ImportError("Please install nltk with: pip install nltk")
19 |
20 | def encode(self, x: str) -> str:
21 | return " ".join(self.word_tokenize(x))
22 |
23 | def decode(self, x: str) -> str:
24 | return x
25 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/space_tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import re
7 |
8 | from fairseq.data.encoders import register_tokenizer
9 | from fairseq.dataclass import FairseqDataclass
10 |
11 |
12 | @register_tokenizer("space", dataclass=FairseqDataclass)
13 | class SpaceTokenizer(object):
14 | def __init__(self, *unused):
15 | self.space_tok = re.compile(r"\s+")
16 |
17 | def encode(self, x: str) -> str:
18 | return self.space_tok.sub(" ", x)
19 |
20 | def decode(self, x: str) -> str:
21 | return x
22 |
--------------------------------------------------------------------------------
/fairseq/data/encoders/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 | from fairseq.data import encoders
8 |
9 |
10 | def get_whole_word_mask(args, dictionary):
11 | bpe = encoders.build_bpe(args)
12 | if bpe is not None:
13 |
14 | def is_beginning_of_word(i):
15 | if i < dictionary.nspecial:
16 | # special elements are always considered beginnings
17 | return True
18 | tok = dictionary[i]
19 | if tok.startswith("madeupword"):
20 | return True
21 | try:
22 | return bpe.is_beginning_of_word(tok)
23 | except ValueError:
24 | return True
25 |
26 | mask_whole_words = torch.ByteTensor(
27 | list(map(is_beginning_of_word, range(len(dictionary))))
28 | )
29 | return mask_whole_words
30 | return None
31 |
--------------------------------------------------------------------------------
/fairseq/data/id_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class IdDataset(FairseqDataset):
12 | def __getitem__(self, index):
13 | return index
14 |
15 | def __len__(self):
16 | return 0
17 |
18 | def collater(self, samples):
19 | return torch.tensor(samples)
20 |
--------------------------------------------------------------------------------
/fairseq/data/legacy/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .block_pair_dataset import BlockPairDataset
7 | from .masked_lm_dataset import MaskedLMDataset
8 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary
9 |
10 |
11 | __all__ = [
12 | "BertDictionary",
13 | "BlockPairDataset",
14 | "MaskedLMDataset",
15 | "MaskedLMDictionary",
16 | ]
17 |
--------------------------------------------------------------------------------
/fairseq/data/legacy/masked_lm_dictionary.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data import Dictionary
7 |
8 |
9 | class MaskedLMDictionary(Dictionary):
10 | """
11 | Dictionary for Masked Language Modelling tasks. This extends Dictionary by
12 | adding the mask symbol.
13 | """
14 |
15 | def __init__(
16 | self,
17 | pad="",
18 | eos="",
19 | unk="",
20 | mask="",
21 | ):
22 | super().__init__(pad=pad, eos=eos, unk=unk)
23 | self.mask_word = mask
24 | self.mask_index = self.add_symbol(mask)
25 | self.nspecial = len(self.symbols)
26 |
27 | def mask(self):
28 | """Helper to get index of mask symbol"""
29 | return self.mask_index
30 |
31 |
32 | class BertDictionary(MaskedLMDictionary):
33 | """
34 | Dictionary for BERT task. This extends MaskedLMDictionary by adding support
35 | for cls and sep symbols.
36 | """
37 |
38 | def __init__(
39 | self,
40 | pad="",
41 | eos="",
42 | unk="",
43 | mask="",
44 | cls="",
45 | sep="",
46 | ):
47 | super().__init__(pad=pad, eos=eos, unk=unk, mask=mask)
48 | self.cls_word = cls
49 | self.sep_word = sep
50 | self.cls_index = self.add_symbol(cls)
51 | self.sep_index = self.add_symbol(sep)
52 | self.nspecial = len(self.symbols)
53 |
54 | def cls(self):
55 | """Helper to get index of cls symbol"""
56 | return self.cls_index
57 |
58 | def sep(self):
59 | """Helper to get index of sep symbol"""
60 | return self.sep_index
61 |
--------------------------------------------------------------------------------
/fairseq/data/list_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class ListDataset(BaseWrapperDataset):
10 | def __init__(self, dataset, sizes=None):
11 | super().__init__(dataset)
12 | self._sizes = sizes
13 |
14 | def __iter__(self):
15 | for x in self.dataset:
16 | yield x
17 |
18 | def collater(self, samples):
19 | return samples
20 |
21 | @property
22 | def sizes(self):
23 | return self._sizes
24 |
25 | def num_tokens(self, index):
26 | return self.sizes[index]
27 |
28 | def size(self, index):
29 | return self.sizes[index]
30 |
31 | def set_epoch(self, epoch):
32 | pass
33 |
--------------------------------------------------------------------------------
/fairseq/data/lru_cache_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from functools import lru_cache
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class LRUCacheDataset(BaseWrapperDataset):
12 | def __init__(self, dataset, token=None):
13 | super().__init__(dataset)
14 |
15 | @lru_cache(maxsize=8)
16 | def __getitem__(self, index):
17 | return self.dataset[index]
18 |
19 | @lru_cache(maxsize=8)
20 | def collater(self, samples):
21 | return self.dataset.collater(samples)
22 |
--------------------------------------------------------------------------------
/fairseq/data/multilingual/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/fairseq/data/multilingual/multilingual_utils.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import Dict, List, Optional, Sequence
3 |
4 | import torch
5 | from fairseq.data import Dictionary
6 |
7 |
8 | class EncoderLangtok(Enum):
9 | """
10 | Prepend to the beginning of source sentence either the
11 | source or target language token. (src/tgt).
12 | """
13 |
14 | src = "src"
15 | tgt = "tgt"
16 |
17 |
18 | class LangTokSpec(Enum):
19 | main = "main"
20 | mono_dae = "mono_dae"
21 |
22 |
23 | class LangTokStyle(Enum):
24 | multilingual = "multilingual"
25 | mbart = "mbart"
26 |
27 |
28 | @torch.jit.export
29 | def get_lang_tok(
30 | lang: str, lang_tok_style: str, spec: str = LangTokSpec.main.value
31 | ) -> str:
32 | # TOKEN_STYLES can't be defined outside this fn since it needs to be
33 | # TorchScriptable.
34 | TOKEN_STYLES: Dict[str, str] = {
35 | LangTokStyle.mbart.value: "[{}]",
36 | LangTokStyle.multilingual.value: "__{}__",
37 | }
38 |
39 | if spec.endswith("dae"):
40 | lang = f"{lang}_dae"
41 | elif spec.endswith("mined"):
42 | lang = f"{lang}_mined"
43 | style = TOKEN_STYLES[lang_tok_style]
44 | return style.format(lang)
45 |
46 |
47 | def augment_dictionary(
48 | dictionary: Dictionary,
49 | language_list: List[str],
50 | lang_tok_style: str,
51 | langtoks_specs: Sequence[str] = (LangTokSpec.main.value,),
52 | extra_data: Optional[Dict[str, str]] = None,
53 | ) -> None:
54 | for spec in langtoks_specs:
55 | for language in language_list:
56 | dictionary.add_symbol(
57 | get_lang_tok(lang=language, lang_tok_style=lang_tok_style, spec=spec)
58 | )
59 |
60 | if lang_tok_style == LangTokStyle.mbart.value or (
61 | extra_data is not None and LangTokSpec.mono_dae.value in extra_data
62 | ):
63 | dictionary.add_symbol("")
64 |
--------------------------------------------------------------------------------
/fairseq/data/num_samples_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import FairseqDataset
7 |
8 |
9 | class NumSamplesDataset(FairseqDataset):
10 | def __getitem__(self, index):
11 | return 1
12 |
13 | def __len__(self):
14 | return 0
15 |
16 | def collater(self, samples):
17 | return sum(samples)
18 |
--------------------------------------------------------------------------------
/fairseq/data/numel_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch
8 |
9 | from . import BaseWrapperDataset
10 |
11 |
12 | class NumelDataset(BaseWrapperDataset):
13 | def __init__(self, dataset, reduce=False):
14 | super().__init__(dataset)
15 | self.reduce = reduce
16 |
17 | def __getitem__(self, index):
18 | item = self.dataset[index]
19 | if torch.is_tensor(item):
20 | return torch.numel(item)
21 | else:
22 | return np.size(item)
23 |
24 | def __len__(self):
25 | return len(self.dataset)
26 |
27 | def collater(self, samples):
28 | if self.reduce:
29 | return sum(samples)
30 | else:
31 | return torch.tensor(samples)
32 |
--------------------------------------------------------------------------------
/fairseq/data/offset_tokens_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class OffsetTokensDataset(BaseWrapperDataset):
10 | def __init__(self, dataset, offset):
11 | super().__init__(dataset)
12 | self.offset = offset
13 |
14 | def __getitem__(self, idx):
15 | return self.dataset[idx] + self.offset
16 |
--------------------------------------------------------------------------------
/fairseq/data/pad_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data import data_utils
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class PadDataset(BaseWrapperDataset):
12 | def __init__(self, dataset, pad_idx, left_pad):
13 | super().__init__(dataset)
14 | self.pad_idx = pad_idx
15 | self.left_pad = left_pad
16 |
17 | def collater(self, samples):
18 | return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad)
19 |
20 |
21 | class LeftPadDataset(PadDataset):
22 | def __init__(self, dataset, pad_idx):
23 | super().__init__(dataset, pad_idx, left_pad=True)
24 |
25 |
26 | class RightPadDataset(PadDataset):
27 | def __init__(self, dataset, pad_idx):
28 | super().__init__(dataset, pad_idx, left_pad=False)
29 |
--------------------------------------------------------------------------------
/fairseq/data/prepend_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch
8 |
9 | from . import BaseWrapperDataset
10 |
11 |
12 | class PrependDataset(BaseWrapperDataset):
13 | def __init__(self, dataset, prepend_getter, ensure_first_token_is=None):
14 | super().__init__(dataset)
15 | self.prepend_getter = prepend_getter
16 | self.ensure_first_token = ensure_first_token_is
17 |
18 | def __getitem__(self, idx):
19 | item = self.dataset[idx]
20 | is_tuple = isinstance(item, tuple)
21 | src = item[0] if is_tuple else item
22 |
23 | assert self.ensure_first_token is None or src[0] == self.ensure_first_token
24 | prepend_idx = self.prepend_getter(self.dataset, idx)
25 | assert isinstance(prepend_idx, int)
26 | src[0] = prepend_idx
27 | item = tuple((src,) + item[1:]) if is_tuple else src
28 | return item
29 |
--------------------------------------------------------------------------------
/fairseq/data/prepend_token_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import torch
8 |
9 | from . import BaseWrapperDataset
10 |
11 |
12 | class PrependTokenDataset(BaseWrapperDataset):
13 | def __init__(self, dataset, token=None):
14 | super().__init__(dataset)
15 | self.token = token
16 | if token is not None:
17 | self._sizes = np.array(dataset.sizes) + 1
18 | else:
19 | self._sizes = dataset.sizes
20 |
21 | def __getitem__(self, idx):
22 | item = self.dataset[idx]
23 | if self.token is not None:
24 | item = torch.cat([item.new([self.token]), item])
25 | return item
26 |
27 | @property
28 | def sizes(self):
29 | return self._sizes
30 |
31 | def num_tokens(self, index):
32 | n = self.dataset.num_tokens(index)
33 | if self.token is not None:
34 | n += 1
35 | return n
36 |
37 | def size(self, index):
38 | n = self.dataset.size(index)
39 | if self.token is not None:
40 | n += 1
41 | return n
42 |
--------------------------------------------------------------------------------
/fairseq/data/raw_label_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class RawLabelDataset(FairseqDataset):
12 | def __init__(self, labels):
13 | super().__init__()
14 | self.labels = labels
15 |
16 | def __getitem__(self, index):
17 | return self.labels[index]
18 |
19 | def __len__(self):
20 | return len(self.labels)
21 |
22 | def collater(self, samples):
23 | return torch.tensor(samples)
24 |
--------------------------------------------------------------------------------
/fairseq/data/replace_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class ReplaceDataset(BaseWrapperDataset):
10 | """Replaces tokens found in the dataset by a specified replacement token
11 |
12 | Args:
13 | dataset (~torch.utils.data.Dataset): dataset to replace tokens in
14 | replace_map(Dictionary[int,int]): map of token to replace -> replacement token
15 | offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be
16 | as many as the number of objects returned by the underlying dataset __getitem__ method.
17 | """
18 |
19 | def __init__(self, dataset, replace_map, offsets):
20 | super().__init__(dataset)
21 | assert len(replace_map) > 0
22 | self.replace_map = replace_map
23 | self.offsets = offsets
24 |
25 | def __getitem__(self, index):
26 | item = self.dataset[index]
27 | is_tuple = isinstance(item, tuple)
28 | srcs = item if is_tuple else [item]
29 |
30 | for offset, src in zip(self.offsets, srcs):
31 | for k, v in self.replace_map.items():
32 | src_off = src[offset:] if offset >= 0 else src[:offset]
33 | src_off.masked_fill_(src_off == k, v)
34 |
35 | item = srcs if is_tuple else srcs[0]
36 | return item
37 |
--------------------------------------------------------------------------------
/fairseq/data/roll_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class RollDataset(BaseWrapperDataset):
12 | def __init__(self, dataset, shifts):
13 | super().__init__(dataset)
14 | self.shifts = shifts
15 |
16 | def __getitem__(self, index):
17 | item = self.dataset[index]
18 | return torch.roll(item, self.shifts)
19 |
--------------------------------------------------------------------------------
/fairseq/data/sort_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class SortDataset(BaseWrapperDataset):
12 | def __init__(self, dataset, sort_order):
13 | super().__init__(dataset)
14 | if not isinstance(sort_order, (list, tuple)):
15 | sort_order = [sort_order]
16 | self.sort_order = sort_order
17 |
18 | assert all(len(so) == len(dataset) for so in sort_order)
19 |
20 | def ordered_indices(self):
21 | return np.lexsort(self.sort_order)
22 |
--------------------------------------------------------------------------------
/fairseq/data/strip_token_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class StripTokenDataset(BaseWrapperDataset):
10 | def __init__(self, dataset, id_to_strip):
11 | super().__init__(dataset)
12 | self.id_to_strip = id_to_strip
13 |
14 | def __getitem__(self, index):
15 | item = self.dataset[index]
16 | while len(item) > 0 and item[-1] == self.id_to_strip:
17 | item = item[:-1]
18 | while len(item) > 0 and item[0] == self.id_to_strip:
19 | item = item[1:]
20 | return item
21 |
--------------------------------------------------------------------------------
/fairseq/data/token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/data/token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/data/token_block_utils_fast.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/data/token_block_utils_fast.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/data/token_block_utils_fast.cpython-38-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/data/token_block_utils_fast.cpython-38-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/dataclass/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .configs import FairseqDataclass
7 | from .constants import ChoiceEnum
8 |
9 |
10 | __all__ = [
11 | "FairseqDataclass",
12 | "ChoiceEnum",
13 | ]
14 |
--------------------------------------------------------------------------------
/fairseq/dataclass/constants.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from enum import Enum, EnumMeta
7 | from typing import List
8 |
9 |
10 | class StrEnumMeta(EnumMeta):
11 | # this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see
12 | # https://github.com/facebookresearch/hydra/issues/1156
13 | @classmethod
14 | def __instancecheck__(cls, other):
15 | return "enum" in str(type(other))
16 |
17 |
18 | class StrEnum(Enum, metaclass=StrEnumMeta):
19 | def __str__(self):
20 | return self.value
21 |
22 | def __eq__(self, other: str):
23 | return self.value == other
24 |
25 | def __repr__(self):
26 | return self.value
27 |
28 | def __hash__(self):
29 | return hash(str(self))
30 |
31 |
32 | def ChoiceEnum(choices: List[str]):
33 | """return the Enum class used to enforce list of choices"""
34 | return StrEnum("Choices", {k: k for k in choices})
35 |
36 |
37 | LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"])
38 | DDP_BACKEND_CHOICES = ChoiceEnum(["c10d", "no_c10d"])
39 | DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta"])
40 | DISTRIBUTED_WRAPPER_CHOICES = ChoiceEnum(["DDP", "SlowMo"])
41 | GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"])
42 | GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum(
43 | ["unigram", "ensemble", "vote", "dp", "bs"]
44 | )
45 | ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"])
46 | PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"])
47 | PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
48 |
--------------------------------------------------------------------------------
/fairseq/dataclass/initialize.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """isort:skip_file"""
6 |
7 | import logging
8 | from typing import Dict, Any
9 | from hydra.core.config_store import ConfigStore
10 | from fairseq.dataclass.configs import FairseqConfig
11 |
12 |
13 | logger = logging.getLogger(__name__)
14 |
15 |
16 | def hydra_init(cfg_name="config") -> None:
17 |
18 | cs = ConfigStore.instance()
19 | cs.store(name=cfg_name, node=FairseqConfig)
20 |
21 | for k in FairseqConfig.__dataclass_fields__:
22 | v = FairseqConfig.__dataclass_fields__[k].default
23 | try:
24 | cs.store(name=k, node=v)
25 | except BaseException:
26 | logger.error(f"{k} - {v}")
27 | raise
28 |
--------------------------------------------------------------------------------
/fairseq/libbleu.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/libbleu.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/libbleu.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/libbleu.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/libbleu.cpython-38-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/libbleu.cpython-38-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/libnat.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/libnat.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/libnat.cpython-38-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/libnat.cpython-38-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/fairseq/logging/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/logging/__init__.py
--------------------------------------------------------------------------------
/fairseq/model_parallel/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/model_parallel/.DS_Store
--------------------------------------------------------------------------------
/fairseq/model_parallel/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import criterions, models, modules # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/model_parallel/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | # automatically import any Python files in the criterions/ directory
11 | for file in os.listdir(os.path.dirname(__file__)):
12 | if file.endswith(".py") and not file.startswith("_"):
13 | module = file[: file.find(".py")]
14 | importlib.import_module("fairseq.model_parallel.criterions." + module)
15 |
--------------------------------------------------------------------------------
/fairseq/model_parallel/models/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/model_parallel/models/.DS_Store
--------------------------------------------------------------------------------
/fairseq/model_parallel/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | # automatically import any Python files in the models/ directory
11 | models_dir = os.path.dirname(__file__)
12 | for file in os.listdir(models_dir):
13 | path = os.path.join(models_dir, file)
14 | if (
15 | not file.startswith("_")
16 | and not file.startswith(".")
17 | and (file.endswith(".py") or os.path.isdir(path))
18 | ):
19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file
20 | module = importlib.import_module("fairseq.model_parallel.models." + model_name)
21 |
--------------------------------------------------------------------------------
/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .model import * # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/model_parallel/models/roberta/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .model import * # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/model_parallel/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """isort:skip_file"""
6 |
7 | from .multihead_attention import ModelParallelMultiheadAttention
8 | from .transformer_layer import (
9 | ModelParallelTransformerEncoderLayer,
10 | ModelParallelTransformerDecoderLayer,
11 | )
12 | from .transformer_sentence_encoder_layer import (
13 | ModelParallelTransformerSentenceEncoderLayer,
14 | )
15 | from .transformer_sentence_encoder import ModelParallelTransformerSentenceEncoder
16 |
17 | __all__ = [
18 | "ModelParallelMultiheadAttention",
19 | "ModelParallelTransformerEncoderLayer",
20 | "ModelParallelTransformerDecoderLayer",
21 | "ModelParallelTransformerSentenceEncoder",
22 | "ModelParallelTransformerSentenceEncoderLayer",
23 | ]
24 |
--------------------------------------------------------------------------------
/fairseq/models/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/models/.DS_Store
--------------------------------------------------------------------------------
/fairseq/models/archived/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/models/archived/__init__.py
--------------------------------------------------------------------------------
/fairseq/models/bart/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .hub_interface import * # noqa
7 | from .model import * # noqa
8 |
--------------------------------------------------------------------------------
/fairseq/models/future_work/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/models/future_work/__init__.py
--------------------------------------------------------------------------------
/fairseq/models/huggingface/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | # automatically import any Python files in the models/huggingface/ directory
11 | models_dir = os.path.dirname(__file__)
12 | for file in os.listdir(models_dir):
13 | path = os.path.join(models_dir, file)
14 | if (
15 | not file.startswith("_")
16 | and not file.startswith(".")
17 | and (file.endswith(".py") or os.path.isdir(path))
18 | ):
19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file
20 | module = importlib.import_module("fairseq.models.huggingface." + model_name)
21 |
--------------------------------------------------------------------------------
/fairseq/models/roberta/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .hub_interface import * # noqa
7 | from .model import * # noqa
8 | from .model_camembert import * # noqa
9 | from .model_gottbert import * # noqa
10 | from .model_xlmr import * # noqa
11 |
--------------------------------------------------------------------------------
/fairseq/models/roberta/model_gottbert.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | GottBERT: a pure German Language Model
7 | """
8 |
9 | from fairseq.models import register_model
10 |
11 | from .hub_interface import RobertaHubInterface
12 | from .model import RobertaModel
13 |
14 |
15 | @register_model('gottbert')
16 | class GottbertModel(RobertaModel):
17 |
18 | @classmethod
19 | def hub_models(cls):
20 | return {
21 | 'gottbert-base': 'https://dl.gottbert.de/fairseq/models/gottbert-base.tar.gz',
22 | }
23 |
24 | @classmethod
25 | def from_pretrained(cls,
26 | model_name_or_path,
27 | checkpoint_file='model.pt',
28 | data_name_or_path='.',
29 | bpe='hf_byte_bpe',
30 | bpe_vocab='vocab.json',
31 | bpe_merges='merges.txt',
32 | bpe_add_prefix_space=False,
33 | **kwargs
34 | ):
35 | from fairseq import hub_utils
36 |
37 | x = hub_utils.from_pretrained(
38 | model_name_or_path,
39 | checkpoint_file,
40 | data_name_or_path,
41 | archive_map=cls.hub_models(),
42 | bpe=bpe,
43 | load_checkpoint_heads=True,
44 | bpe_vocab=bpe_vocab,
45 | bpe_merges=bpe_merges,
46 | bpe_add_prefix_space=bpe_add_prefix_space,
47 | **kwargs,
48 | )
49 | return RobertaHubInterface(x['args'], x['task'], x['models'][0])
50 |
--------------------------------------------------------------------------------
/fairseq/models/roberta/model_xlmr.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | Unsupervised Cross-lingual Representation Learning at Scale
7 | """
8 |
9 | from fairseq.models import register_model
10 |
11 | from .hub_interface import RobertaHubInterface
12 | from .model import RobertaModel
13 |
14 |
15 | @register_model("xlmr")
16 | class XLMRModel(RobertaModel):
17 | @classmethod
18 | def hub_models(cls):
19 | return {
20 | "xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz",
21 | "xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz",
22 | }
23 |
24 | @classmethod
25 | def from_pretrained(
26 | cls,
27 | model_name_or_path,
28 | checkpoint_file="model.pt",
29 | data_name_or_path=".",
30 | bpe="sentencepiece",
31 | **kwargs
32 | ):
33 | from fairseq import hub_utils
34 |
35 | x = hub_utils.from_pretrained(
36 | model_name_or_path,
37 | checkpoint_file,
38 | data_name_or_path,
39 | archive_map=cls.hub_models(),
40 | bpe=bpe,
41 | load_checkpoint_heads=True,
42 | **kwargs,
43 | )
44 | return RobertaHubInterface(x["args"], x["task"], x["models"][0])
45 |
--------------------------------------------------------------------------------
/fairseq/models/speech_to_text/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .berard import * # noqa
7 | from .s2t_transformer import * # noqa
8 |
--------------------------------------------------------------------------------
/fairseq/models/wav2vec/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .wav2vec import * # noqa
7 | from .wav2vec2 import * # noqa
8 | from .wav2vec2_asr import * # noqa
9 |
--------------------------------------------------------------------------------
/fairseq/modules/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/modules/.DS_Store
--------------------------------------------------------------------------------
/fairseq/modules/conv_tbc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 | from torch.nn.modules.utils import _single
8 | from torch import Tensor
9 |
10 |
11 | class ConvTBC(torch.nn.Module):
12 | """1D convolution over an input of shape (time x batch x channel)
13 |
14 | The implementation uses gemm to perform the convolution. This implementation
15 | is faster than cuDNN for small kernel sizes.
16 | """
17 |
18 | def __init__(self, in_channels, out_channels, kernel_size, padding=0):
19 | super(ConvTBC, self).__init__()
20 | self.in_channels = in_channels
21 | self.out_channels = out_channels
22 | self.kernel_size = _single(kernel_size)
23 | self.padding = _single(padding)
24 |
25 | self.weight = torch.nn.Parameter(
26 | torch.Tensor(self.kernel_size[0], in_channels, out_channels)
27 | )
28 | self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
29 |
30 | def conv_tbc(self, input: Tensor):
31 | return torch.conv_tbc(
32 | input.contiguous(), self.weight, self.bias, self.padding[0]
33 | )
34 |
35 | def forward(self, input: Tensor):
36 | return self.conv_tbc(input)
37 |
38 | def __repr__(self):
39 | s = (
40 | "{name}({in_channels}, {out_channels}, kernel_size={kernel_size}"
41 | ", padding={padding}"
42 | )
43 | if self.bias is None:
44 | s += ", bias=False"
45 | s += ")"
46 | return s.format(name=self.__class__.__name__, **self.__dict__)
47 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .dynamicconv_layer import DynamicconvLayer # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | #include
9 | #include
10 |
11 | std::vector dynamicconv_cuda_forward(
12 | at::Tensor input,
13 | at::Tensor filters,
14 | int padding_l);
15 |
16 | std::vector dynamicconv_cuda_backward(
17 | at::Tensor gradOutput,
18 | int padding_l,
19 | at::Tensor input,
20 | at::Tensor filters);
21 |
22 |
23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
26 |
27 | std::vector dynamicconv_forward(
28 | at::Tensor input,
29 | at::Tensor filters,
30 | int padding_l) {
31 |
32 | CHECK_INPUT(input);
33 | CHECK_INPUT(filters);
34 |
35 | return dynamicconv_cuda_forward(input, filters,
36 | padding_l);
37 | }
38 |
39 | std::vector dynamicconv_backward(
40 | at::Tensor gradOutput,
41 | int padding_l,
42 | at::Tensor input,
43 | at::Tensor filters) {
44 |
45 | CHECK_INPUT(gradOutput);
46 | CHECK_INPUT(input);
47 | CHECK_INPUT(filters);
48 |
49 | return dynamicconv_cuda_backward(gradOutput, padding_l,
50 | input, filters);
51 | }
52 |
53 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
54 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)");
55 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)");
56 | }
57 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cuh:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | #include
9 | #include
10 |
11 | #include
12 | #include
13 | #include
14 |
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 |
22 | #include
23 | #include
24 | #include
25 |
26 | #define SHFL_MASK 0xffffffff
27 |
28 | template
29 | __global__
30 | void dynamicconv_forward_kernel(const scalar_t* input,
31 | const scalar_t* weight,
32 | int minibatch,
33 | int sequenceLength,
34 | int numFeatures,
35 | int numFiltersInBlock,
36 | int numHeads,
37 | scalar_t* output);
38 |
39 | template
40 | __global__
41 | void dynamicconv_backward_kernel(
42 | const scalar_t* gradOutput, // B * C * T
43 | const scalar_t* input, // B * C * T
44 | const scalar_t* weight,
45 | int minibatch,
46 | int sequenceLength,
47 | int numFeatures,
48 | int numFiltersInBlock,
49 | int numHeads,
50 | scalar_t* gradWeight,
51 | scalar_t* gradInput); // B * H * k * T
52 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | std::vector dynamicconv_cpu_forward(
5 | float* input,
6 | float* filters,
7 | int padding_l);
8 |
9 | std::vector dynamicconv_cpu_backward(
10 | float* gradOutput,
11 | int padding_l,
12 | float* input,
13 | float* filters);
14 |
15 | std::vector dynamicconv_forward(
16 | float* input,
17 | float* filters,
18 | int padding_l) {
19 |
20 | return dynamicconv_cpu_forward(input, filters, padding_l);
21 | }
22 |
23 | std::vector dynamicconv_backward(
24 | float* gradOutput,
25 | int padding_l,
26 | float* input,
27 | float* filters) {
28 |
29 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters);
30 | }
31 |
32 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
33 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)");
34 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)");
35 | }
36 |
--------------------------------------------------------------------------------
/fairseq/modules/dynamicconv_layer/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | from setuptools import setup
8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension
9 |
10 |
11 | setup(
12 | name="dynamicconv_layer",
13 | ext_modules=[
14 | CUDAExtension(
15 | name="dynamicconv_cuda",
16 | sources=[
17 | "dynamicconv_cuda.cpp",
18 | "dynamicconv_cuda_kernel.cu",
19 | ],
20 | ),
21 | ],
22 | cmdclass={"build_ext": BuildExtension},
23 | )
24 |
--------------------------------------------------------------------------------
/fairseq/modules/fp32_group_norm.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | Layer norm done in fp32 (for fp16 training)
7 | """
8 |
9 | import torch.nn as nn
10 | import torch.nn.functional as F
11 |
12 |
13 | class Fp32GroupNorm(nn.GroupNorm):
14 | def __init__(self, *args, **kwargs):
15 | super().__init__(*args, **kwargs)
16 |
17 | def forward(self, input):
18 | output = F.group_norm(
19 | input.float(),
20 | self.num_groups,
21 | self.weight.float() if self.weight is not None else None,
22 | self.bias.float() if self.bias is not None else None,
23 | self.eps,
24 | )
25 | return output.type_as(input)
26 |
--------------------------------------------------------------------------------
/fairseq/modules/gelu.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with
7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs
8 | """
9 |
10 | import math
11 |
12 | import torch
13 | import torch.nn as nn
14 |
15 |
16 | def gelu_accurate(x):
17 | if not hasattr(gelu_accurate, "_a"):
18 | gelu_accurate._a = math.sqrt(2 / math.pi)
19 | return (
20 | 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
21 | )
22 |
23 |
24 | def gelu(x: torch.Tensor) -> torch.Tensor:
25 | return torch.nn.functional.gelu(x.float()).type_as(x)
26 |
--------------------------------------------------------------------------------
/fairseq/modules/grad_multiply.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 |
9 | class GradMultiply(torch.autograd.Function):
10 | @staticmethod
11 | def forward(ctx, x, scale):
12 | ctx.scale = scale
13 | res = x.new(x)
14 | return res
15 |
16 | @staticmethod
17 | def backward(ctx, grad):
18 | return grad * ctx.scale, None
19 |
--------------------------------------------------------------------------------
/fairseq/modules/layer_drop.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | LayerDrop as described in https://arxiv.org/abs/1909.11556.
7 | """
8 |
9 | import torch
10 | import torch.nn as nn
11 |
12 |
13 | class LayerDropModuleList(nn.ModuleList):
14 | """
15 | A LayerDrop implementation based on :class:`torch.nn.ModuleList`.
16 |
17 | We refresh the choice of which layers to drop every time we iterate
18 | over the LayerDropModuleList instance. During evaluation we always
19 | iterate over all layers.
20 |
21 | Usage::
22 |
23 | layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
24 | for layer in layers: # this might iterate over layers 1 and 3
25 | x = layer(x)
26 | for layer in layers: # this might iterate over all layers
27 | x = layer(x)
28 | for layer in layers: # this might not iterate over any layers
29 | x = layer(x)
30 |
31 | Args:
32 | p (float): probability of dropping out each layer
33 | modules (iterable, optional): an iterable of modules to add
34 | """
35 |
36 | def __init__(self, p, modules=None):
37 | super().__init__(modules)
38 | self.p = p
39 |
40 | def __iter__(self):
41 | dropout_probs = torch.empty(len(self)).uniform_()
42 | for i, m in enumerate(super().__iter__()):
43 | if not self.training or (dropout_probs[i] > self.p):
44 | yield m
45 |
--------------------------------------------------------------------------------
/fairseq/modules/layer_norm.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | try:
12 | from apex.normalization import FusedLayerNorm as _FusedLayerNorm
13 |
14 | has_fused_layernorm = True
15 |
16 | class FusedLayerNorm(_FusedLayerNorm):
17 | @torch.jit.unused
18 | def forward(self, x):
19 | if not x.is_cuda:
20 | return super().forward(x)
21 | else:
22 | with torch.cuda.device(x.device):
23 | return super().forward(x)
24 |
25 |
26 | except ImportError:
27 | has_fused_layernorm = False
28 |
29 |
30 | def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
31 | if torch.jit.is_scripting():
32 | export = True
33 | if not export and torch.cuda.is_available() and has_fused_layernorm:
34 | return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
35 | return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
36 |
37 |
38 | class Fp32LayerNorm(nn.LayerNorm):
39 | def __init__(self, *args, **kwargs):
40 | super().__init__(*args, **kwargs)
41 |
42 | def forward(self, input):
43 | output = F.layer_norm(
44 | input.float(),
45 | self.normalized_shape,
46 | self.weight.float() if self.weight is not None else None,
47 | self.bias.float() if self.bias is not None else None,
48 | self.eps,
49 | )
50 | return output.type_as(input)
51 |
--------------------------------------------------------------------------------
/fairseq/modules/lightconv_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .lightconv_layer import LightconvLayer # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/modules/lightconv_layer/lightconv_cuda.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | #include
9 | #include
10 |
11 | std::vector lightconv_cuda_forward(
12 | at::Tensor input,
13 | at::Tensor filters,
14 | int padding_l);
15 |
16 | std::vector lightconv_cuda_backward(
17 | at::Tensor gradOutput,
18 | int padding_l,
19 | at::Tensor input,
20 | at::Tensor filters);
21 |
22 |
23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
26 |
27 | std::vector lightconv_forward(
28 | at::Tensor input,
29 | at::Tensor filters,
30 | int padding_l) {
31 |
32 | CHECK_INPUT(input);
33 | CHECK_INPUT(filters);
34 |
35 | return lightconv_cuda_forward(input, filters, padding_l);
36 | }
37 |
38 | std::vector lightconv_backward(
39 | at::Tensor gradOutput,
40 | int padding_l,
41 | at::Tensor input,
42 | at::Tensor filters) {
43 |
44 | CHECK_INPUT(gradOutput);
45 | CHECK_INPUT(input);
46 | CHECK_INPUT(filters);
47 |
48 | return lightconv_cuda_backward(gradOutput, padding_l, input, filters);
49 | }
50 |
51 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
52 | m.def("forward", &lightconv_forward, "lighconv forward (CUDA)");
53 | m.def("backward", &lightconv_backward, "lighconv backward (CUDA)");
54 | }
55 |
--------------------------------------------------------------------------------
/fairseq/modules/lightconv_layer/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | from setuptools import setup
8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension
9 |
10 |
11 | setup(
12 | name="lightconv_layer",
13 | ext_modules=[
14 | CUDAExtension(
15 | "lightconv_cuda",
16 | [
17 | "lightconv_cuda.cpp",
18 | "lightconv_cuda_kernel.cu",
19 | ],
20 | ),
21 | ],
22 | cmdclass={"build_ext": BuildExtension},
23 | )
24 |
--------------------------------------------------------------------------------
/fairseq/modules/positional_embedding.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.nn as nn
7 |
8 | from .learned_positional_embedding import LearnedPositionalEmbedding
9 | from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
10 |
11 |
12 | def PositionalEmbedding(
13 | num_embeddings: int,
14 | embedding_dim: int,
15 | padding_idx: int,
16 | learned: bool = False,
17 | ):
18 | if learned:
19 | # if padding_idx is specified then offset the embedding ids by
20 | # this index and adjust num_embeddings appropriately
21 | # TODO: The right place for this offset would be inside
22 | # LearnedPositionalEmbedding. Move this there for a cleaner implementation.
23 | if padding_idx is not None:
24 | num_embeddings = num_embeddings + padding_idx + 1
25 | m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
26 | nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
27 | if padding_idx is not None:
28 | nn.init.constant_(m.weight[padding_idx], 0)
29 | else:
30 | m = SinusoidalPositionalEmbedding(
31 | embedding_dim,
32 | padding_idx,
33 | init_size=num_embeddings + padding_idx + 1,
34 | )
35 | return m
36 |
--------------------------------------------------------------------------------
/fairseq/modules/quantization/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/modules/quantization/.DS_Store
--------------------------------------------------------------------------------
/fairseq/modules/quantization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq/modules/quantization/__init__.py
--------------------------------------------------------------------------------
/fairseq/modules/quantization/pq/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .utils import SizeTracker, quantize_model_ # NOQA
7 |
--------------------------------------------------------------------------------
/fairseq/modules/quantization/pq/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .qconv import PQConv2d # NOQA
7 | from .qemb import PQEmbedding # NOQA
8 | from .qlinear import PQLinear # NOQA
9 |
--------------------------------------------------------------------------------
/fairseq/modules/quantization/quantization_options.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | def parse_config_yaml(yaml_data):
8 | # Initialize to default options.
9 | quantization_options = {
10 | "n_centroids": {
11 | "Linear": ["in_features", {"*": 256}],
12 | "Embedding": ["embedding_dim", {"*": 256}],
13 | },
14 | "block_sizes": {
15 | "Linear": ["fuzzy_name", {"fc": 8, "attn": 4, "emb": 4}],
16 | "Embedding": ["fuzzy_name", {"emb": 8}],
17 | },
18 | "layers_to_quantize": [
19 | "decoder\\.layers\\.\\d+\\.fc[12]",
20 | "decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01]",
21 | "decoder\\.layers\\.\\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj)",
22 | ],
23 | }
24 |
25 | if "n_centroids" in yaml_data:
26 | quantization_options["n_centroids"] = {
27 | layer: convert_yaml_to_tuple(layer_data)
28 | for layer, layer_data in yaml_data["n_centroids"].items()
29 | }
30 | if "block_sizes" in yaml_data:
31 | quantization_options["block_sizes"] = {
32 | layer: convert_yaml_to_tuple(layer_data)
33 | for layer, layer_data in yaml_data["block_sizes"].items()
34 | }
35 | if "layers_to_quantize" in yaml_data:
36 | quantization_options["layers_to_quantize"] = yaml_data["layers_to_quantize"]
37 |
38 | return quantization_options
39 |
40 |
41 | def convert_yaml_to_tuple(yaml_dictionary):
42 | """Converts a yaml dictionary with two keys: `key` and `value` into a two
43 | argument tuple of those values."""
44 | return (yaml_dictionary["key"], yaml_dictionary["value"])
45 |
--------------------------------------------------------------------------------
/fairseq/modules/quantization/scalar/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .utils import quantize_model_ # NOQA
7 |
--------------------------------------------------------------------------------
/fairseq/modules/quantization/scalar/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .qact import ActivationQuantizer # NOQA
7 | from .qconv import IntConv2d # NOQA
8 | from .qemb import IntEmbedding # NOQA
9 | from .qlinear import IntLinear # NOQA
10 |
--------------------------------------------------------------------------------
/fairseq/modules/quantization/scalar/ops.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 |
9 | def emulate_int(w, bits, method, scale=None, zero_point=None):
10 | q = globals()[f"emulate_int{bits}_{method}"]
11 | return q(w, scale=scale, zero_point=zero_point)
12 |
13 |
14 | def quantize(w, scale, zero_point):
15 | return (
16 | torch.clamp(torch.round(w / scale + zero_point), 0, 255) - zero_point
17 | ) * scale
18 |
19 |
20 | def emulate_int8_histogram(w, scale=None, zero_point=None):
21 | if scale is None:
22 | obs = torch.quantization.observer.HistogramObserver()
23 | _ = obs(w.float())
24 | scale, zero_point = obs.calculate_qparams()
25 | scale = scale.cuda().type_as(w)
26 | zero_point = zero_point.cuda().type_as(w)
27 | return quantize(w, scale, zero_point), scale, zero_point
28 |
29 |
30 | def emulate_int8_channel(w, scale=None, zero_point=None):
31 | if scale is None:
32 | obs = torch.quantization.observer.PerChannelMinMaxObserver(
33 | ch_axis=-1, qscheme=torch.per_channel_symmetric
34 | )
35 | _ = obs(w)
36 | scale, zero_point, ch_axis = obs.get_qparams()
37 | scale = scale.cuda().type_as(w)
38 | zero_point = zero_point.cuda().type_as(w)
39 | return quantize(w, scale, zero_point), scale, zero_point
40 |
41 |
42 | def emulate_int8_tensor(w, scale=None, zero_point=None):
43 | if scale is None:
44 | obs = torch.quantization.observer.MinMaxObserver()
45 | _ = obs(w)
46 | scale, zero_point = obs.calculate_qparams()
47 | scale = scale.cuda().type_as(w)
48 | zero_point = zero_point.cuda().type_as(w)
49 | return quantize(w, scale, zero_point), scale, zero_point
50 |
--------------------------------------------------------------------------------
/fairseq/modules/same_pad.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | from torch import nn
8 |
9 |
10 | class SamePad(nn.Module):
11 | def __init__(self, kernel_size, causal=False):
12 | super().__init__()
13 | if causal:
14 | self.remove = kernel_size - 1
15 | else:
16 | self.remove = 1 if kernel_size % 2 == 0 else 0
17 |
18 | def forward(self, x):
19 | if self.remove > 0:
20 | x = x[:, :, : -self.remove]
21 | return x
22 |
--------------------------------------------------------------------------------
/fairseq/modules/scalar_bias.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | #
6 |
7 | import torch
8 |
9 |
10 | class ScalarBias(torch.autograd.Function):
11 | """
12 | Adds a vector of scalars, used in self-attention mechanism to allow
13 | the model to optionally attend to this vector instead of the past
14 | """
15 |
16 | @staticmethod
17 | def forward(ctx, input, dim, bias_init):
18 | size = list(input.size())
19 | size[dim] += 1
20 | output = input.new(*size).fill_(bias_init)
21 | output.narrow(dim, 1, size[dim] - 1).copy_(input)
22 | ctx.dim = dim
23 | return output
24 |
25 | @staticmethod
26 | def backward(ctx, grad):
27 | return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
28 |
29 |
30 | def scalar_bias(input, dim, bias_init=0):
31 | return ScalarBias.apply(input, dim, bias_init)
32 |
--------------------------------------------------------------------------------
/fairseq/modules/sparse_transformer_sentence_encoder_layer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.modules import TransformerSentenceEncoderLayer
7 | from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
8 |
9 |
10 | class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
11 | """
12 | Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention)
13 | """
14 |
15 | def __init__(
16 | self,
17 | embedding_dim: int = 768,
18 | ffn_embedding_dim: int = 3072,
19 | num_attention_heads: int = 8,
20 | dropout: float = 0.1,
21 | attention_dropout: float = 0.1,
22 | activation_dropout: float = 0.1,
23 | activation_fn: str = "relu",
24 | export: bool = False,
25 | is_bidirectional: bool = True,
26 | stride: int = 32,
27 | expressivity: int = 8,
28 | ) -> None:
29 |
30 | super().__init__(
31 | embedding_dim,
32 | ffn_embedding_dim,
33 | num_attention_heads,
34 | dropout,
35 | attention_dropout,
36 | activation_dropout,
37 | activation_fn,
38 | export,
39 | )
40 |
41 | self.self_attn = SparseMultiheadAttention(
42 | self.embedding_dim,
43 | num_attention_heads,
44 | dropout=attention_dropout,
45 | add_bias_kv=False,
46 | add_zero_attn=False,
47 | self_attention=True,
48 | is_bidirectional=is_bidirectional,
49 | stride=stride,
50 | expressivity=expressivity,
51 | )
52 |
--------------------------------------------------------------------------------
/fairseq/modules/transpose_last.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | transpose last 2 dimensions of the input
7 | """
8 |
9 | import torch.nn as nn
10 |
11 |
12 | class TransposeLast(nn.Module):
13 | def __init__(self, deconstruct_idx=None):
14 | super().__init__()
15 | self.deconstruct_idx = deconstruct_idx
16 |
17 | def forward(self, x):
18 | if self.deconstruct_idx is not None:
19 | x = x[self.deconstruct_idx]
20 | return x.transpose(-2, -1)
21 |
--------------------------------------------------------------------------------
/fairseq/modules/unfold.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.nn.functional as F
7 |
8 |
9 | def unfold1d(x, kernel_size, padding_l, pad_value=0):
10 | """unfold T x B x C to T x B x C x K"""
11 | if kernel_size > 1:
12 | T, B, C = x.size()
13 | x = F.pad(
14 | x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value
15 | )
16 | x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C))
17 | else:
18 | x = x.unsqueeze(3)
19 | return x
20 |
--------------------------------------------------------------------------------
/fairseq/optim/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """isort:skip_file"""
6 |
7 | import importlib
8 | import os
9 |
10 | from fairseq import registry
11 | from fairseq.optim.bmuf import FairseqBMUF # noqa
12 | from fairseq.optim.fairseq_optimizer import ( # noqa
13 | FairseqOptimizer,
14 | LegacyFairseqOptimizer,
15 | )
16 | from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
17 | from fairseq.optim.shard import shard_
18 | from omegaconf import DictConfig
19 |
20 | __all__ = [
21 | "FairseqOptimizer",
22 | "FP16Optimizer",
23 | "MemoryEfficientFP16Optimizer",
24 | "shard_",
25 | ]
26 |
27 | (
28 | _build_optimizer,
29 | register_optimizer,
30 | OPTIMIZER_REGISTRY,
31 | OPTIMIZER_DATACLASS_REGISTRY,
32 | ) = registry.setup_registry("--optimizer", base_class=FairseqOptimizer, required=True)
33 |
34 |
35 | def build_optimizer(cfg: DictConfig, params, *extra_args, **extra_kwargs):
36 | if all(isinstance(p, dict) for p in params):
37 | params = [t for p in params for t in p.values()]
38 | params = list(filter(lambda p: p.requires_grad, params))
39 | return _build_optimizer(cfg, params, *extra_args, **extra_kwargs)
40 |
41 |
42 | # automatically import any Python files in the optim/ directory
43 | for file in os.listdir(os.path.dirname(__file__)):
44 | if file.endswith(".py") and not file.startswith("_"):
45 | file_name = file[: file.find(".py")]
46 | importlib.import_module("fairseq.optim." + file_name)
47 |
--------------------------------------------------------------------------------
/fairseq/optim/adagrad.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.optim
7 |
8 | from . import LegacyFairseqOptimizer, register_optimizer
9 |
10 |
11 | @register_optimizer("adagrad")
12 | class Adagrad(LegacyFairseqOptimizer):
13 | def __init__(self, args, params):
14 | super().__init__(args)
15 | self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)
16 |
17 | @staticmethod
18 | def add_args(parser):
19 | """Add optimizer-specific arguments to the parser."""
20 | # fmt: off
21 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
22 | help='weight decay')
23 | # fmt: on
24 |
25 | @property
26 | def optimizer_config(self):
27 | """
28 | Return a kwarg dictionary that will be used to override optimizer
29 | args stored in checkpoints. This allows us to load a checkpoint and
30 | resume training using a different set of optimizer args, e.g., with a
31 | different learning rate.
32 | """
33 | return {
34 | "lr": self.args.lr[0],
35 | "weight_decay": self.args.weight_decay,
36 | }
37 |
38 | @property
39 | def supports_flat_params(self):
40 | return False
41 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """isort:skip_file"""
6 |
7 | import importlib
8 | import os
9 |
10 | from fairseq import registry
11 | from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import ( # noqa
12 | FairseqLRScheduler,
13 | LegacyFairseqLRScheduler,
14 | )
15 | from omegaconf import DictConfig
16 |
17 |
18 | (
19 | build_lr_scheduler_,
20 | register_lr_scheduler,
21 | LR_SCHEDULER_REGISTRY,
22 | LR_SCHEDULER_DATACLASS_REGISTRY,
23 | ) = registry.setup_registry(
24 | "--lr-scheduler", base_class=FairseqLRScheduler, default="fixed"
25 | )
26 |
27 |
28 | def build_lr_scheduler(cfg: DictConfig, optimizer):
29 | return build_lr_scheduler_(cfg, optimizer)
30 |
31 |
32 | # automatically import any Python files in the optim/lr_scheduler/ directory
33 | for file in os.listdir(os.path.dirname(__file__)):
34 | if file.endswith(".py") and not file.startswith("_"):
35 | file_name = file[: file.find(".py")]
36 | importlib.import_module("fairseq.optim.lr_scheduler." + file_name)
37 |
--------------------------------------------------------------------------------
/fairseq/optim/lr_scheduler/pass_through.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from dataclasses import dataclass
7 |
8 | from fairseq.dataclass import FairseqDataclass
9 | from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
10 |
11 |
12 | @dataclass
13 | class PassThroughScheduleConfig(FairseqDataclass):
14 | pass
15 |
16 |
17 | @register_lr_scheduler("pass_through", dataclass=PassThroughScheduleConfig)
18 | class PassThroughScheduleSchedule(FairseqLRScheduler):
19 | """Delegate lr scheduling to the optimizer."""
20 |
21 | def __init__(self, cfg: PassThroughScheduleConfig, optimizer):
22 | super().__init__(cfg, optimizer)
23 | assert (
24 | hasattr(optimizer, "lr_scheduler") and optimizer.lr_scheduler is not None
25 | ), "Pass-through schedule can only be used with optimizers with their own schedulers"
26 |
27 | def state_dict(self):
28 | return self.optimizer.lr_scheduler.state_dict()
29 |
30 | def load_state_dict(self, state_dict):
31 | self.optimizer.lr_scheduler.load_state_dict(state_dict)
32 |
33 | def step_begin_epoch(self, epoch):
34 | """Update the learning rate at the beginning of the given epoch."""
35 | return self.optimizer.lr_scheduler.step_begin_epoch(epoch)
36 |
37 | def step_update(self, num_updates):
38 | """Update the learning rate after each update."""
39 | return self.optimizer.lr_scheduler.step_update(num_updates)
40 |
--------------------------------------------------------------------------------
/fairseq/optim/sgd.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.optim
7 |
8 | from . import LegacyFairseqOptimizer, register_optimizer
9 |
10 |
11 | @register_optimizer("sgd")
12 | class SGD(LegacyFairseqOptimizer):
13 | def __init__(self, args, params):
14 | super().__init__(args)
15 | self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
16 |
17 | @staticmethod
18 | def add_args(parser):
19 | """Add optimizer-specific arguments to the parser."""
20 | # fmt: off
21 | parser.add_argument('--momentum', default=0.0, type=float, metavar='M',
22 | help='momentum factor')
23 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
24 | help='weight decay')
25 | # fmt: on
26 |
27 | @property
28 | def optimizer_config(self):
29 | """
30 | Return a kwarg dictionary that will be used to override optimizer
31 | args stored in checkpoints. This allows us to load a checkpoint and
32 | resume training using a different set of optimizer args, e.g., with a
33 | different learning rate.
34 | """
35 | return {
36 | "lr": self.args.lr[0],
37 | "momentum": self.args.momentum,
38 | "weight_decay": self.args.weight_decay,
39 | }
40 |
41 | @property
42 | def supports_flat_params(self):
43 | return True
44 |
--------------------------------------------------------------------------------
/fairseq/pdb.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import multiprocessing
7 | import os
8 | import pdb
9 | import sys
10 |
11 |
12 | __all__ = ["set_trace"]
13 |
14 |
15 | _stdin = [None]
16 | _stdin_lock = multiprocessing.Lock()
17 | try:
18 | _stdin_fd = sys.stdin.fileno()
19 | except Exception:
20 | _stdin_fd = None
21 |
22 |
23 | class MultiprocessingPdb(pdb.Pdb):
24 | """A Pdb wrapper that works in a multiprocessing environment.
25 |
26 | Usage: `from fairseq import pdb; pdb.set_trace()`
27 | """
28 |
29 | def __init__(self):
30 | pdb.Pdb.__init__(self, nosigint=True)
31 |
32 | def _cmdloop(self):
33 | stdin_bak = sys.stdin
34 | with _stdin_lock:
35 | try:
36 | if _stdin_fd is not None:
37 | if not _stdin[0]:
38 | _stdin[0] = os.fdopen(_stdin_fd)
39 | sys.stdin = _stdin[0]
40 | self.cmdloop()
41 | finally:
42 | sys.stdin = stdin_bak
43 |
44 |
45 | def set_trace():
46 | pdb = MultiprocessingPdb()
47 | pdb.set_trace(sys._getframe().f_back)
48 |
--------------------------------------------------------------------------------
/fairseq/scoring/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | import importlib
8 | import os
9 | from abc import ABC, abstractmethod
10 |
11 | from fairseq import registry
12 | from omegaconf import DictConfig
13 |
14 |
15 | class BaseScorer(ABC):
16 | def __init__(self, cfg):
17 | self.cfg = cfg
18 | self.ref = []
19 | self.pred = []
20 |
21 | def add_string(self, ref, pred):
22 | self.ref.append(ref)
23 | self.pred.append(pred)
24 |
25 | @abstractmethod
26 | def score(self) -> float:
27 | pass
28 |
29 | @abstractmethod
30 | def result_string(self) -> str:
31 | pass
32 |
33 |
34 | _build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
35 | "--scoring", default="bleu"
36 | )
37 |
38 |
39 | def build_scorer(choice, tgt_dict):
40 | _choice = choice._name if isinstance(choice, DictConfig) else choice
41 |
42 | if _choice == "bleu":
43 | from fairseq.scoring import bleu
44 |
45 | return bleu.Scorer(
46 | bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
47 | )
48 | return _build_scorer(choice)
49 |
50 |
51 | # automatically import any Python files in the current directory
52 | for file in os.listdir(os.path.dirname(__file__)):
53 | if file.endswith(".py") and not file.startswith("_"):
54 | module = file[: file.find(".py")]
55 | importlib.import_module("fairseq.scoring." + module)
56 |
--------------------------------------------------------------------------------
/fairseq/scoring/chrf.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.scoring import BaseScorer, register_scorer
7 |
8 |
9 | @register_scorer("chrf")
10 | class ChrFScorer(BaseScorer):
11 | def __init__(self, args):
12 | super(ChrFScorer, self).__init__(args)
13 | import sacrebleu
14 |
15 | self.sacrebleu = sacrebleu
16 |
17 | def add_string(self, ref, pred):
18 | self.ref.append(ref)
19 | self.pred.append(pred)
20 |
21 | def score(self, order=4):
22 | return self.result_string(order).score
23 |
24 | def result_string(self, order=4):
25 | if order != 4:
26 | raise NotImplementedError
27 | return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format()
28 |
--------------------------------------------------------------------------------
/fairseq/tasks/translation_from_pretrained_xlm.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
7 | from fairseq.tasks.translation import TranslationTask
8 |
9 | from . import register_task
10 |
11 |
12 | @register_task("translation_from_pretrained_xlm")
13 | class TranslationFromPretrainedXLMTask(TranslationTask):
14 | """
15 | Same as TranslationTask except use the MaskedLMDictionary class so that
16 | we can load data that was binarized with the MaskedLMDictionary class.
17 |
18 | This task should be used for the entire training pipeline when we want to
19 | train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,
20 | training NMT with the pretrained XLM checkpoint, and subsequent evaluation
21 | of that trained model.
22 | """
23 |
24 | @classmethod
25 | def load_dictionary(cls, filename):
26 | """Load the masked LM dictionary from the filename
27 |
28 | Args:
29 | filename (str): the filename
30 | """
31 | return MaskedLMDictionary.load(filename)
32 |
--------------------------------------------------------------------------------
/fairseq/tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import re
7 |
8 |
9 | SPACE_NORMALIZER = re.compile(r"\s+")
10 |
11 |
12 | def tokenize_line(line):
13 | line = SPACE_NORMALIZER.sub(" ", line)
14 | line = line.strip()
15 | return line.split()
16 |
--------------------------------------------------------------------------------
/fairseq/torch_imputer/__init__.py:
--------------------------------------------------------------------------------
1 | from .imputer import imputer_loss, ImputerLoss, best_alignment, ctc_decode
2 |
--------------------------------------------------------------------------------
/fairseq/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "1.0.0a0+d2873b9"
2 |
--------------------------------------------------------------------------------
/fairseq/version.txt:
--------------------------------------------------------------------------------
1 | 1.0.0a0
2 |
--------------------------------------------------------------------------------
/fairseq_cli/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fairseq_cli/__init__.py
--------------------------------------------------------------------------------
/fs_plugins/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fs_plugins/.DS_Store
--------------------------------------------------------------------------------
/fs_plugins/__init__.py:
--------------------------------------------------------------------------------
1 | # from .criterions imporst *
2 | from .models import *
3 | from .tasks import *
4 | # from .optimizer import *
5 |
6 | print("fairseq plugins loaded...")
--------------------------------------------------------------------------------
/fs_plugins/models/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fs_plugins/models/.DS_Store
--------------------------------------------------------------------------------
/fs_plugins/models/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import importlib
3 |
4 | # automatically import any Python files in the models/ directory
5 | for file in os.listdir(os.path.dirname(__file__)):
6 | if file.endswith(".py") and not file.startswith("_"):
7 | file_name = file[: file.find(".py")]
8 | importlib.import_module("fs_plugins.models." + file_name)
--------------------------------------------------------------------------------
/fs_plugins/tasks/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yafuly/SyntacticGen/2d42a1d0b81ddf4fb72f3e812de4c913492fee04/fs_plugins/tasks/.DS_Store
--------------------------------------------------------------------------------
/fs_plugins/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import importlib
3 |
4 | # automatically import any Python files in the models/ directory
5 | for file in os.listdir(os.path.dirname(__file__)):
6 | if file.endswith(".py") and not file.startswith("_"):
7 | file_name = file[: file.find(".py")]
8 | importlib.import_module("fs_plugins.tasks." + file_name)
--------------------------------------------------------------------------------
/generate.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | """
7 | Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
8 | """
9 |
10 | from fairseq_cli.generate import cli_main
11 |
12 |
13 | if __name__ == "__main__":
14 | cli_main()
15 |
16 |
--------------------------------------------------------------------------------
/setup_env.sh:
--------------------------------------------------------------------------------
1 | pip install -e . # under SyntacticGen directory
2 | pip install tensorboard sacremoses nltk Ninja omegaconf
3 | pip install 'fuzzywuzzy[speedup]'
4 | pip install hydra-core==1.0.6
5 | pip install sacrebleu==1.5.1
6 | pip install subword-nmt
--------------------------------------------------------------------------------
/shell/apply-bpe.sh:
--------------------------------------------------------------------------------
1 | code=$1
2 | cat - | subword-nmt apply-bpe --codes $code --glossaries "" "<-ADV>" "" "<-BNF>" "" "" "<-CLF>" "<-CLR>" "" "<-DIR>" "" "<-DTV>" "" "<-EXT>" "" "" "<-HLN>" "" "" "" "" "" "<-LGS>" "<-LOC>" "" "" "" "<-MNR>" "" "" "" "" "" "<-NOM>" "" "" "" "" "" "<-PRD>" "" "" "<-PRP>" "" "" "" "<-PUT>" "" "" "" "" "