├── asr_bleu ├── __init__.py └── requirements.txt ├── fairseq ├── scripts │ ├── __init__.py │ ├── spm_train.py │ ├── compound_split_bleu.sh │ ├── sacrebleu.sh │ └── convert_dictionary.lua ├── tests │ ├── __init__.py │ ├── gpu │ │ ├── __init__.py │ │ └── transformer_quantization_config.yaml │ ├── distributed │ │ └── __init__.py │ ├── speech_recognition │ │ └── __init__.py │ ├── speech │ │ ├── test_s2t_conformer.py │ │ └── test_s2t_transformer.py │ └── test_hf_hub.py ├── fairseq_cli │ └── __init__.py ├── examples │ ├── data2vec │ │ ├── __init__.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ └── modalities │ │ │ │ └── __init__.py │ │ ├── config │ │ │ ├── v2 │ │ │ │ ├── run_config │ │ │ │ │ └── local.yaml │ │ │ │ └── text_finetuning │ │ │ │ │ └── run_config │ │ │ │ │ └── local.yaml │ │ │ ├── audio │ │ │ │ └── pretraining │ │ │ │ │ └── run_config │ │ │ │ │ └── local.yaml │ │ │ ├── text │ │ │ │ └── pretraining │ │ │ │ │ └── run_config │ │ │ │ │ └── local.yaml │ │ │ └── vision │ │ │ │ ├── finetuning │ │ │ │ └── run_config │ │ │ │ │ └── local.yaml │ │ │ │ └── pretraining │ │ │ │ └── run_config │ │ │ │ └── local.yaml │ │ ├── data │ │ │ ├── modality.py │ │ │ └── __init__.py │ │ ├── scripts │ │ │ ├── text │ │ │ │ ├── finetune_all_fair_nodep_aws_local_lr.sh │ │ │ │ ├── finetune_all_large_fair_nodep_aws_local_lr.sh │ │ │ │ ├── finetune_all_char_fair_aws_local_lr.sh │ │ │ │ ├── finetune_all_fair_aws_local_lr.sh │ │ │ │ ├── finetune_all_large_fair_aws_local_lr.sh │ │ │ │ ├── finetune_sst2_qnli_sweep_fair_nodep.sh │ │ │ │ ├── finetune_all_fair_nodep.sh │ │ │ │ └── finetune_all_fair_nodep_aws.sh │ │ │ └── multi │ │ │ │ ├── finetune_all_fair_aws_local_lr_nodep.sh │ │ │ │ └── finetune_all_fair_aws_local_lr.sh │ │ └── tasks │ │ │ └── __init__.py │ ├── wav2vec │ │ ├── __init__.py │ │ ├── unsupervised │ │ │ ├── __init__.py │ │ │ ├── kaldi_self_train │ │ │ │ └── st │ │ │ │ │ ├── steps │ │ │ │ │ ├── utils │ │ │ │ │ ├── local │ │ │ │ │ ├── copy_aligned_text.py │ │ │ │ │ ├── decode.sh │ │ │ │ │ └── prepare_lm.sh │ │ │ │ │ ├── path.sh │ │ │ │ │ └── cmd.sh │ │ │ ├── models │ │ │ │ └── __init__.py │ │ │ ├── tasks │ │ │ │ └── __init__.py │ │ │ ├── scripts │ │ │ │ ├── copy_labels.py │ │ │ │ ├── ltr_to_wrd.py │ │ │ │ ├── wrd_to_ltr.py │ │ │ │ └── normalize_text.py │ │ │ ├── config │ │ │ │ └── generate │ │ │ │ │ └── viterbi.yaml │ │ │ └── data │ │ │ │ └── __init__.py │ │ └── config │ │ │ └── finetuning │ │ │ └── run_config │ │ │ ├── slurm_1.yaml │ │ │ ├── slurm_2g.yaml │ │ │ ├── slurm_4g.yaml │ │ │ ├── slurm_8.yaml │ │ │ ├── slurm_1_old.yaml │ │ │ ├── slurm_16.yaml │ │ │ ├── slurm_2.yaml │ │ │ └── slurm_3.yaml │ ├── .gitignore │ ├── speech_recognition │ │ ├── kaldi │ │ │ ├── __init__.py │ │ │ └── config │ │ │ │ └── kaldi_initializer.yaml │ │ ├── new │ │ │ ├── __init__.py │ │ │ ├── decoders │ │ │ │ ├── __init__.py │ │ │ │ └── viterbi_decoder.py │ │ │ └── conf │ │ │ │ ├── infer.yaml │ │ │ │ ├── hydra │ │ │ │ └── sweeper │ │ │ │ │ ├── ax.yaml │ │ │ │ │ └── ax_sil.yaml │ │ │ │ └── run_config │ │ │ │ ├── fb_slurm_2g.yaml │ │ │ │ └── fb_slurm_1.yaml │ │ ├── __init__.py │ │ ├── data │ │ │ └── __init__.py │ │ ├── tasks │ │ │ └── __init__.py │ │ ├── models │ │ │ └── __init__.py │ │ └── criterions │ │ │ └── __init__.py │ ├── attention_head_selection │ │ └── src │ │ │ ├── __init__.py │ │ │ ├── data │ │ │ └── __init__.py │ │ │ ├── loss │ │ │ └── __init__.py │ │ │ ├── models │ │ │ └── __init__.py │ │ │ └── modules │ │ │ └── __init__.py │ ├── emotion_conversion │ │ ├── preprocess │ │ │ └── __init__.py │ │ ├── emotion_models │ │ │ └── __init__.py │ │ └── requirements.txt │ ├── hubert │ │ ├── tests │ │ │ ├── sample.base.L9.len │ │ │ ├── sample.large.L20.len │ │ │ ├── sample.xlarge.L30.len │ │ │ ├── sample.base.L9.npy │ │ │ ├── 6313-76958-0021.flac │ │ │ ├── sample.large.L20.npy │ │ │ ├── sample.xlarge.L30.npy │ │ │ ├── sample.large.hypo.word │ │ │ └── sample.xlarge.hypo.word │ │ ├── config │ │ │ ├── pretrain │ │ │ │ ├── data │ │ │ │ │ ├── iter2.yaml │ │ │ │ │ └── iter1.yaml │ │ │ │ └── run │ │ │ │ │ └── submitit_reg.yaml │ │ │ ├── finetune │ │ │ │ ├── ckpt │ │ │ │ │ └── it1.yaml │ │ │ │ ├── lm │ │ │ │ │ └── ls_4gram.yaml │ │ │ │ └── run │ │ │ │ │ └── submitit_reg.yaml │ │ │ └── decode │ │ │ │ ├── run │ │ │ │ ├── submitit_slurm.yaml │ │ │ │ └── submitit_slurm_8gpu.yaml │ │ │ │ ├── infer_viterbi.yaml │ │ │ │ ├── infer_kenlm.yaml │ │ │ │ └── infer_fsqlm.yaml │ │ └── update_ckpt.py │ ├── speech_to_speech │ │ ├── asr_bleu │ │ │ ├── __init__.py │ │ │ └── requirements.txt │ │ ├── preprocessing │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── benchmarking │ │ │ └── configs │ │ │ │ ├── S2T.yaml │ │ │ │ ├── 2StageS2ST.yaml │ │ │ │ ├── DirectS2U.yaml │ │ │ │ └── 3StageS2ST.yaml │ │ ├── unity │ │ │ └── __init__.py │ │ └── README.md │ ├── textless_nlp │ │ ├── gslm │ │ │ ├── speech2unit │ │ │ │ ├── __init__.py │ │ │ │ └── clustering │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── utils.py │ │ │ ├── unit2speech │ │ │ │ ├── tacotron2 │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── symbols.py │ │ │ │ └── multiproc.py │ │ │ └── metrics │ │ │ │ ├── asr_metrics │ │ │ │ └── misc │ │ │ │ │ └── dict.ltr.txt │ │ │ │ └── README.md │ │ ├── speech-resynth │ │ │ └── img │ │ │ │ └── fig.png │ │ └── pgslm │ │ │ ├── eval │ │ │ └── __init__.py │ │ │ ├── sample │ │ │ └── __init__.py │ │ │ └── scripts │ │ │ └── prepare_f0_quantization.sh │ ├── latent_depth │ │ └── latent_depth_src │ │ │ ├── loss │ │ │ └── __init__.py │ │ │ ├── models │ │ │ └── __init__.py │ │ │ ├── modules │ │ │ └── __init__.py │ │ │ └── __init__.py │ ├── linformer │ │ ├── linformer_src │ │ │ ├── models │ │ │ │ └── __init__.py │ │ │ ├── modules │ │ │ │ └── __init__.py │ │ │ └── __init__.py │ │ └── README.md │ ├── multilingual │ │ ├── data_scripts │ │ │ ├── requirement.txt │ │ │ ├── utils │ │ │ │ └── strip_sgm.sh │ │ │ ├── README.md │ │ │ └── preprocess_ML50_v1.sh │ │ ├── ML50_langs.txt │ │ └── multilingual_fairseq_gen.sh │ ├── adaptive_span │ │ ├── truncated_bptt_lm_task.py │ │ └── __init__.py │ ├── MMPT │ │ ├── vlm.png │ │ ├── videoclip.png │ │ ├── projects │ │ │ ├── mtm │ │ │ │ ├── vlm.yaml │ │ │ │ ├── mmfusionmtm.yaml │ │ │ │ └── vlm │ │ │ │ │ ├── test_vttqa.yaml │ │ │ │ │ ├── test_vtt.yaml │ │ │ │ │ ├── test_youcook.yaml │ │ │ │ │ ├── test_youcookcap.yaml │ │ │ │ │ └── test_coin.yaml │ │ │ ├── task │ │ │ │ ├── test_vtt_videoclip.yaml │ │ │ │ ├── test_vttqa_videoclip.yaml │ │ │ │ ├── test_youcook_videoclip.yaml │ │ │ │ ├── coin_videoclip.yaml │ │ │ │ ├── test_crosstask_videoclip.yaml │ │ │ │ ├── test_coin_videoclip.yaml │ │ │ │ ├── test_crosstask_zs_videoclip.yaml │ │ │ │ ├── vttqa_videoclip.yaml │ │ │ │ ├── youcook_videoclip.yaml │ │ │ │ ├── vtt_videoclip.yaml │ │ │ │ ├── test.yaml │ │ │ │ ├── test_coin_zs.yaml │ │ │ │ ├── crosstask_videoclip.yaml │ │ │ │ ├── test_vtt_zs.yaml │ │ │ │ ├── test_vttqa_zs.yaml │ │ │ │ ├── test_youcook_zs.yaml │ │ │ │ ├── ft.yaml │ │ │ │ ├── test_vtt.yaml │ │ │ │ ├── test_vttqa.yaml │ │ │ │ ├── vttqa.yaml │ │ │ │ ├── default.yaml │ │ │ │ ├── youcookcap.yaml │ │ │ │ ├── test_didemo_zs.yaml │ │ │ │ ├── how2.yaml │ │ │ │ ├── coin.yaml │ │ │ │ ├── vtt.yaml │ │ │ │ ├── test_youcookcap.yaml │ │ │ │ ├── test_coin.yaml │ │ │ │ ├── youcook.yaml │ │ │ │ └── test_youcook.yaml │ │ │ └── retri │ │ │ │ ├── videoclip.yaml │ │ │ │ └── videoclip │ │ │ │ ├── test_vttqa_zs.yaml │ │ │ │ ├── test_didemo_zs.yaml │ │ │ │ ├── test_vtt_zs.yaml │ │ │ │ ├── test_vttqa_videoclip.yaml │ │ │ │ └── test_vtt_videoclip.yaml │ │ ├── scripts │ │ │ ├── text_token_extractor │ │ │ │ └── configs │ │ │ │ │ └── bert-base-uncased.yaml │ │ │ └── video_feature_extractor │ │ │ │ └── how2 │ │ │ │ └── s3d.sh │ │ ├── mmpt │ │ │ ├── modules │ │ │ │ └── __init__.py │ │ │ ├── datasets │ │ │ │ └── __init__.py │ │ │ ├── evaluators │ │ │ │ └── __init__.py │ │ │ ├── losses │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ │ └── __init__.py │ │ │ ├── tasks │ │ │ │ ├── __init__.py │ │ │ │ └── vlmtask.py │ │ │ └── processors │ │ │ │ └── __init__.py │ │ └── setup.py │ ├── discriminative_reranking_nmt │ │ ├── __init__.py │ │ ├── models │ │ │ └── __init__.py │ │ ├── tasks │ │ │ └── __init__.py │ │ └── criterions │ │ │ └── __init__.py │ ├── flores101 │ │ └── flores_logo.png │ ├── speech_synthesis │ │ ├── __init__.py │ │ ├── evaluation │ │ │ └── __init__.py │ │ └── preprocessing │ │ │ ├── __init__.py │ │ │ └── denoiser │ │ │ └── __init__.py │ ├── rxf │ │ ├── __init__.py │ │ └── rxf_src │ │ │ └── __init__.py │ ├── noisychannel │ │ └── __init__.py │ ├── simultaneous_translation │ │ ├── __init__.py │ │ ├── README.md │ │ ├── models │ │ │ └── __init__.py │ │ ├── utils │ │ │ └── __init__.py │ │ └── modules │ │ │ └── __init__.py │ ├── roberta │ │ ├── commonsense_qa │ │ │ ├── __init__.py │ │ │ └── download_cqa_data.sh │ │ ├── wsc │ │ │ └── __init__.py │ │ ├── config │ │ │ ├── finetuning │ │ │ │ └── run_config │ │ │ │ │ ├── local.yaml │ │ │ │ │ ├── slurm_1g_aws.yaml │ │ │ │ │ └── slurm_1g.yaml │ │ │ └── pretraining │ │ │ │ ├── run_config │ │ │ │ └── local.yaml │ │ │ │ └── base.yaml │ │ └── fb_multilingual │ │ │ └── README.multilingual.pretraining.md │ ├── speech_text_joint_to_text │ │ ├── tasks │ │ │ └── __init__.py │ │ ├── models │ │ │ └── __init__.py │ │ ├── __init__.py │ │ └── criterions │ │ │ └── __init__.py │ ├── pointer_generator │ │ └── pointer_generator_src │ │ │ └── __init__.py │ ├── translation_moe │ │ └── translation_moe_src │ │ │ ├── __init__.py │ │ │ └── logsumexp_moe.py │ ├── truncated_bptt │ │ └── __init__.py │ ├── __init__.py │ ├── m2m_100 │ │ └── tokenizers │ │ │ ├── thirdparty │ │ │ └── .gitignore │ │ │ ├── tokenize_zh.py │ │ │ ├── tokenize_thai.py │ │ │ ├── seg_ja.sh │ │ │ ├── seg_ko.sh │ │ │ ├── README.md │ │ │ ├── tokenize_indic.py │ │ │ └── tokenizer_ar.sh │ ├── laser │ │ └── laser_src │ │ │ └── __init__.py │ ├── fast_noisy_channel │ │ └── __init__.py │ ├── operators │ │ ├── alignment_train_cuda.h │ │ ├── utils.h │ │ └── alignment_train_cuda.cpp │ ├── constrained_decoding │ │ └── normalize.py │ ├── wmt21 │ │ └── README.md │ ├── megatron_11b │ │ └── detok.py │ └── unsupervised_quality_estimation │ │ └── repeat_lines.py ├── fairseq │ ├── logging │ │ └── __init__.py │ ├── version.txt │ ├── modules │ │ ├── quantization │ │ │ ├── __init__.py │ │ │ ├── scalar │ │ │ │ ├── __init__.py │ │ │ │ └── modules │ │ │ │ │ └── __init__.py │ │ │ └── pq │ │ │ │ ├── __init__.py │ │ │ │ └── modules │ │ │ │ └── __init__.py │ │ ├── lightconv_layer │ │ │ ├── __init__.py │ │ │ └── setup.py │ │ ├── dynamicconv_layer │ │ │ ├── __init__.py │ │ │ ├── setup.py │ │ │ └── dynamiconv_cpu.cpp │ │ ├── grad_multiply.py │ │ ├── unfold.py │ │ ├── transpose_last.py │ │ ├── gelu.py │ │ └── fp32_group_norm.py │ ├── models │ │ ├── speech_dlm │ │ │ ├── modules │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── sequence_generator │ │ │ │ └── __init__.py │ │ ├── speech_to_text │ │ │ ├── modules │ │ │ │ └── __init__.py │ │ │ └── __init__.py │ │ ├── speech_to_speech │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ └── ctc_decoder.py │ │ │ └── __init__.py │ │ ├── bart │ │ │ └── __init__.py │ │ ├── hubert │ │ │ └── __init__.py │ │ ├── xmod │ │ │ └── __init__.py │ │ ├── wav2vec │ │ │ ├── __init__.py │ │ │ └── utils.py │ │ ├── text_to_speech │ │ │ └── __init__.py │ │ ├── roberta │ │ │ └── __init__.py │ │ ├── nat │ │ │ └── __init__.py │ │ ├── ema │ │ │ └── __init__.py │ │ └── huggingface │ │ │ └── __init__.py │ ├── config │ │ ├── model │ │ │ ├── wav2vec │ │ │ │ └── vq_wav2vec_gumbel.yaml │ │ │ └── wav2vec2 │ │ │ │ ├── wav2vec2_base.yaml │ │ │ │ └── wav2vec2_large.yaml │ │ ├── __init__.py │ │ ├── config.yaml │ │ └── fb_run_config │ │ │ └── slurm.yaml │ ├── data │ │ ├── multilingual │ │ │ └── __init__.py │ │ ├── num_samples_dataset.py │ │ ├── id_dataset.py │ │ ├── offset_tokens_dataset.py │ │ ├── legacy │ │ │ └── __init__.py │ │ ├── roll_dataset.py │ │ ├── raw_label_dataset.py │ │ ├── lru_cache_dataset.py │ │ ├── huffman │ │ │ └── __init__.py │ │ ├── encoders │ │ │ ├── space_tokenizer.py │ │ │ ├── characters.py │ │ │ ├── nltk_tokenizer.py │ │ │ └── __init__.py │ │ ├── sort_dataset.py │ │ ├── strip_token_dataset.py │ │ ├── list_dataset.py │ │ ├── numel_dataset.py │ │ └── colorize_dataset.py │ ├── model_parallel │ │ ├── models │ │ │ ├── roberta │ │ │ │ └── __init__.py │ │ │ ├── pipeline_parallel_transformer │ │ │ │ └── __init__.py │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── criterions │ │ │ └── __init__.py │ │ └── modules │ │ │ └── __init__.py │ ├── benchmark │ │ ├── __init__.py │ │ └── dummy_dataset.py │ ├── dataclass │ │ └── __init__.py │ ├── tokenizer.py │ ├── clib │ │ ├── libnat_cuda │ │ │ └── edit_dist.h │ │ └── libbleu │ │ │ └── module.cpp │ └── distributed │ │ └── __init__.py ├── MANIFEST.in ├── docs │ ├── docutils.conf │ ├── fairseq.gif │ ├── fairseq_logo.png │ ├── modules.rst │ ├── Makefile │ ├── criterions.rst │ └── make.bat ├── setup.cfg ├── .gitmodules ├── hydra_plugins │ └── dependency_submitit_launcher │ │ └── hydra_plugins │ │ └── dependency_submitit_launcher │ │ ├── __init__.py │ │ └── config.py ├── .github │ ├── ISSUE_TEMPLATE.md │ ├── ISSUE_TEMPLATE │ │ ├── documentation.md │ │ ├── feature_request.md │ │ └── how-to-question.md │ └── PULL_REQUEST_TEMPLATE.md ├── train.py └── RELEASE.md ├── assets └── daspeech.png ├── hifi-gan ├── requirements.txt ├── env.py ├── config_v3.json ├── config_v1.json └── config_v2.json ├── translatotron └── preprocess │ ├── s2ut │ ├── mhubert.km1000.layer11.pt │ ├── run_mhubert.sh │ └── create_manifest.py │ └── extract_ref_txt.py ├── DASpeech ├── custom_ops │ ├── __init__.py │ └── utilities.h ├── __init__.py ├── tasks │ └── __init__.py ├── datasets │ └── __init__.py ├── models │ └── __init__.py ├── criterions │ └── __init__.py └── generator │ └── __init__.py └── test_scripts └── convert_id.py /asr_bleu/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/fairseq_cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/tests/gpu/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/fairseq/logging/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/fairseq/version.txt: -------------------------------------------------------------------------------- 1 | 0.12.2 2 | -------------------------------------------------------------------------------- /fairseq/tests/distributed/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/quantization/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/tests/speech_recognition/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include fairseq/version.txt 2 | -------------------------------------------------------------------------------- /fairseq/examples/.gitignore: -------------------------------------------------------------------------------- 1 | !*/*.sh 2 | !*/*.md 3 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/kaldi/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/new/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/speech_dlm/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/attention_head_selection/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/models/modalities/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/emotion_conversion/preprocess/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/sample.base.L9.len: -------------------------------------------------------------------------------- 1 | 596 2 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/sample.large.L20.len: -------------------------------------------------------------------------------- 1 | 596 2 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/asr_bleu/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/gslm/speech2unit/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/speech_to_text/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/docs/docutils.conf: -------------------------------------------------------------------------------- 1 | [writers] 2 | option-limit=0 3 | -------------------------------------------------------------------------------- /fairseq/examples/attention_head_selection/src/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/attention_head_selection/src/loss/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/emotion_conversion/emotion_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/sample.xlarge.L30.len: -------------------------------------------------------------------------------- 1 | 596 2 | -------------------------------------------------------------------------------- /fairseq/examples/latent_depth/latent_depth_src/loss/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/linformer/linformer_src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/linformer/linformer_src/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/new/decoders/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/speech_to_speech/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/attention_head_selection/src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/attention_head_selection/src/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/latent_depth/latent_depth_src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/latent_depth/latent_depth_src/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/multilingual/data_scripts/requirement.txt: -------------------------------------------------------------------------------- 1 | wget 2 | pandas -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/steps: -------------------------------------------------------------------------------- 1 | ../../wsj/s5/steps -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/utils: -------------------------------------------------------------------------------- 1 | ../../wsj/s5/utils -------------------------------------------------------------------------------- /assets/daspeech.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/assets/daspeech.png -------------------------------------------------------------------------------- /fairseq/docs/fairseq.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/docs/fairseq.gif -------------------------------------------------------------------------------- /fairseq/examples/adaptive_span/truncated_bptt_lm_task.py: -------------------------------------------------------------------------------- 1 | ../truncated_bptt/truncated_bptt_lm_task.py -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/__init__.py: -------------------------------------------------------------------------------- 1 | from . import criterions, models, tasks # noqa 2 | -------------------------------------------------------------------------------- /fairseq/docs/fairseq_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/docs/fairseq_logo.png -------------------------------------------------------------------------------- /fairseq/examples/MMPT/vlm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/examples/MMPT/vlm.png -------------------------------------------------------------------------------- /fairseq/examples/discriminative_reranking_nmt/__init__.py: -------------------------------------------------------------------------------- 1 | from . import criterions, models, tasks # noqa 2 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/videoclip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/examples/MMPT/videoclip.png -------------------------------------------------------------------------------- /fairseq/examples/multilingual/data_scripts/utils/strip_sgm.sh: -------------------------------------------------------------------------------- 1 | grep "seg id" | sed 's///g' | sed 's/<\/seg>//g' 2 | -------------------------------------------------------------------------------- /fairseq/examples/flores101/flores_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/examples/flores101/flores_logo.png -------------------------------------------------------------------------------- /fairseq/setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 127 3 | extend-ignore = E203, W503 4 | extend-exclude = fairseq/model_parallel/megatron 5 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/sample.base.L9.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/examples/hubert/tests/sample.base.L9.npy -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/6313-76958-0021.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/examples/hubert/tests/6313-76958-0021.flac -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/sample.large.L20.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/examples/hubert/tests/sample.large.L20.npy -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/sample.xlarge.L30.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/examples/hubert/tests/sample.xlarge.L30.npy -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/speech-resynth/img/fig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/fairseq/examples/textless_nlp/speech-resynth/img/fig.png -------------------------------------------------------------------------------- /hifi-gan/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.4.0 2 | numpy==1.17.4 3 | librosa==0.7.2 4 | scipy==1.4.1 5 | tensorboard==2.0 6 | soundfile==0.10.3.post1 7 | matplotlib==3.1.3 -------------------------------------------------------------------------------- /translatotron/preprocess/s2ut/mhubert.km1000.layer11.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ictnlp/DASpeech/HEAD/translatotron/preprocess/s2ut/mhubert.km1000.layer11.pt -------------------------------------------------------------------------------- /asr_bleu/requirements.txt: -------------------------------------------------------------------------------- 1 | fairseq==0.12.2 2 | pandas==1.4.3 3 | sacrebleu==2.2.0 4 | torch==1.12.1 5 | torchaudio==0.12.1 6 | tqdm==4.64.0 7 | transformers==4.21.1 8 | -------------------------------------------------------------------------------- /fairseq/fairseq/config/model/wav2vec/vq_wav2vec_gumbel.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation: gelu 3 | vq_type: gumbel 4 | vq_depth: 2 5 | combine_groups: true 6 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/pretrain/data/iter2.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | task: 4 | label_dir: ??? 5 | labels: ["km"] 6 | 7 | model: 8 | label_rate: 50 9 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/pretrain/data/iter1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | task: 4 | label_dir: ??? 5 | labels: ["km"] 6 | 7 | model: 8 | label_rate: 100 9 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | for idx, line in enumerate(sys.stdin): 4 | print(f"utt{idx:010d} {line}", end='') -------------------------------------------------------------------------------- /fairseq/.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "fairseq/model_parallel/megatron"] 2 | path = fairseq/model_parallel/megatron 3 | url = https://github.com/ngoyal2707/Megatron-LM 4 | branch = fairseq 5 | -------------------------------------------------------------------------------- /fairseq/examples/emotion_conversion/requirements.txt: -------------------------------------------------------------------------------- 1 | scipy 2 | einops 3 | amfm_decompy 4 | joblib 5 | numba 6 | decorator 7 | requests 8 | appdirs 9 | packaging 10 | six 11 | sklearn 12 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/asr_bleu/requirements.txt: -------------------------------------------------------------------------------- 1 | fairseq==0.12.2 2 | pandas==1.4.3 3 | sacrebleu==2.2.0 4 | torch==1.12.1 5 | torchaudio==0.12.1 6 | tqdm==4.64.0 7 | transformers==4.21.1 8 | -------------------------------------------------------------------------------- /fairseq/examples/discriminative_reranking_nmt/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .discriminative_reranking_model import DiscriminativeNMTReranker 2 | 3 | 4 | __all__ = [ 5 | "DiscriminativeNMTReranker", 6 | ] 7 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/kaldi/config/kaldi_initializer.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | data_dir: ??? 4 | fst_dir: ??? 5 | in_labels: ??? 6 | kaldi_root: ??? 7 | lm_arpa: ??? 8 | blank_symbol: 9 | -------------------------------------------------------------------------------- /fairseq/hydra_plugins/dependency_submitit_launcher/hydra_plugins/dependency_submitit_launcher/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | __version__ = "0.1" 4 | -------------------------------------------------------------------------------- /fairseq/examples/discriminative_reranking_nmt/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | from .discriminative_reranking_task import DiscriminativeRerankingNMTTask 2 | 3 | 4 | __all__ = [ 5 | "DiscriminativeRerankingNMTTask", 6 | ] 7 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/mtm/vlm.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/mtm/mmfusionmtm.yaml 2 | project_dir: mtm/vlm 3 | task_group: 4 | pretrain: 5 | dataset: 6 | sampled_min_len: 8 7 | loss: 8 | loss_cls: MTM 9 | -------------------------------------------------------------------------------- /fairseq/examples/discriminative_reranking_nmt/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | from .discriminative_reranking_criterion import KLDivergenceRerankingCriterion 2 | 3 | 4 | __all__ = [ 5 | "KLDivergenceRerankingCriterion", 6 | ] 7 | -------------------------------------------------------------------------------- /fairseq/fairseq/config/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /DASpeech/custom_ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .dag_loss import dag_loss, dag_loss_with_alpha_beta, dag_best_alignment, dag_logsoftmax_gather_inplace, torch_dag_loss, torch_dag_best_alignment, torch_dag_logsoftmax_gather_inplace, logsumexp_keepdim -------------------------------------------------------------------------------- /fairseq/examples/speech_synthesis/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/fairseq/config/model/wav2vec2/wav2vec2_base.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | quantize_targets: true 4 | final_dim: 256 5 | encoder_layerdrop: 0.05 6 | dropout_input: 0.1 7 | dropout_features: 0.1 8 | feature_grad_mult: 0.1 9 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/multilingual/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/pgslm/eval/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/pgslm/sample/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/examples/speech_synthesis/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/examples/speech_synthesis/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/scripts/text_token_extractor/configs/bert-base-uncased.yaml: -------------------------------------------------------------------------------- 1 | dataset: 2 | bert_name: bert-base-uncased 3 | caption_pkl_path: data/how2/raw_caption_dedup.pkl 4 | use_fast: true 5 | target_dir: data/feat/feat_how2_s3d_shard_small 6 | -------------------------------------------------------------------------------- /fairseq/examples/speech_synthesis/preprocessing/denoiser/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/examples/rxf/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import rxf_src # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import unity # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_vtt_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_vtt.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | 9 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/sample.large.hypo.word: -------------------------------------------------------------------------------- 1 | KEEP A GOING AN IF YOU'RE LUCKY YOU'LL RUN PLUMB INTO THEM WAS THE JEERING ANSWER AS THE SLEEPY COWMEN SPURRED THEIR PONIES ON TOWARD CAMP MUTTERING THEIR DISAPPROVAL OF TAKING ALONG A BUNCH OF BOYS ON A CATTLE DRIVE (None-0) 2 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/tests/sample.xlarge.hypo.word: -------------------------------------------------------------------------------- 1 | KEEP A GOIN AND IF YOU'RE LUCKY YOU'LL RUN PLUMB INTO THEM WAS THE JEERING ANSWER AS THE SLEEPY COWMEN SPURRED THEIR PONIES ON TOWARD CAMP MUTTERING THEIR DISAPPROVAL OF TAKING ALONG A BUNCH OF BOYS ON A CATTLE DRIVE (None-0) 2 | -------------------------------------------------------------------------------- /fairseq/examples/noisychannel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .rerank_options import * # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_vttqa_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_vttqa.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | 9 | -------------------------------------------------------------------------------- /fairseq/examples/simultaneous_translation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import models # noqa 7 | -------------------------------------------------------------------------------- /fairseq/fairseq/model_parallel/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | -------------------------------------------------------------------------------- /fairseq/.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## 👉 [Please follow one of these issue templates](https://github.com/pytorch/fairseq/issues/new/choose) 👈 2 | 3 | Note: to keep the backlog clean and actionable, issues may be immediately closed if they do not follow one of the above issue templates. 4 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_youcook_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_youcook.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | 9 | -------------------------------------------------------------------------------- /fairseq/examples/roberta/commonsense_qa/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import commonsense_qa_task # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/speech_text_joint_to_text/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | -------------------------------------------------------------------------------- /fairseq/fairseq/model_parallel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import criterions, models, modules # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/linformer/linformer_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .models import linformer_roberta # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/speech_text_joint_to_text/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/quantization/scalar/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import quantize_model_ # NOQA 7 | -------------------------------------------------------------------------------- /DASpeech/__init__.py: -------------------------------------------------------------------------------- 1 | from .criterions import * 2 | from .models import * 3 | from .tasks import * 4 | from .datasets import * 5 | from .generator import * 6 | 7 | import torch.multiprocessing 8 | torch.multiprocessing.set_sharing_strategy('file_system') 9 | 10 | print("DASpeech plugins loaded...") -------------------------------------------------------------------------------- /fairseq/examples/pointer_generator/pointer_generator_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import transformer_pg # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/speech_text_joint_to_text/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import tasks, criterions, models # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/translation_moe/translation_moe_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import translation_moe # noqa 7 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/lightconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .lightconv_layer import LightconvLayer # noqa 7 | -------------------------------------------------------------------------------- /fairseq/docs/modules.rst: -------------------------------------------------------------------------------- 1 | Modules 2 | ======= 3 | 4 | Fairseq provides several stand-alone :class:`torch.nn.Module` classes that may 5 | be helpful when implementing a new :class:`~fairseq.models.BaseFairseqModel`. 6 | 7 | .. automodule:: fairseq.modules 8 | :members: 9 | :undoc-members: 10 | -------------------------------------------------------------------------------- /fairseq/examples/roberta/wsc/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import wsc_criterion # noqa 7 | from . import wsc_task # noqa 8 | -------------------------------------------------------------------------------- /fairseq/examples/truncated_bptt/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import transformer_xl_model, truncated_bptt_lm_task # noqa 7 | -------------------------------------------------------------------------------- /fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/bart/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/hubert/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hubert import * # noqa 7 | from .hubert_asr import * # noqa 8 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/dynamicconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .dynamicconv_layer import DynamicconvLayer # noqa 7 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/scripts/video_feature_extractor/how2/s3d.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | python scripts/video_feature_extractor/extract.py \ 5 | --vdir \ 6 | --fdir data/feat/feat_how2_s3d \ 7 | --type=s3d --num_decoding_thread=4 \ 8 | --batch_size 32 --half_precision 1 9 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/finetune/ckpt/it1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | task: 4 | normalize: false 5 | 6 | model: 7 | w2v_path: /checkpoint/wnhsu/w2v/hubert_final/iter1/hubert.km.randcrop.pmw1_0.puw0_0.grpnorm.ml10.mp0_8.untie.mxsz250000.ufreq1.maxtok1400000.MU400k.s1337.ngpu32/checkpoint_last.pt 8 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/coin_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/coin.yaml 2 | model: 3 | model_cls: MMFusionSeparateActionSegmentation 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForTokenClassification 6 | text_encoder_cls: BertModel # dummy, not used. 7 | num_hidden_video_layers: 6 8 | -------------------------------------------------------------------------------- /fairseq/examples/rxf/rxf_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa 7 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/speech_dlm/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .speech_dlm import * # noqa 7 | from .hub_interface import * # noqa 8 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/speech_dlm/sequence_generator/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .multichannel_sequence_generator import * # noqa 7 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/xmod/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | from .transformer_layer_xmod import * # noqa 8 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_crosstask_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_crosstask.yaml 2 | model: 3 | model_cls: MMFusionSeparateActionLocalization 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel # dummy, not used. 7 | num_hidden_video_layers: 6 8 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_coin_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_coin.yaml 2 | model: 3 | model_cls: MMFusionSeparateActionSegmentation 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForTokenClassification 6 | text_encoder_cls: BertModel # dummy, not used. 7 | num_hidden_video_layers: 6 8 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/quantization/pq/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import SizeTracker, get_param, attrsetter, quantize_model_ # NOQA 7 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_crosstask_zs_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_crosstask_zs.yaml 2 | model: 3 | model_cls: MMFusionSeparateActionLocalization 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel # dummy, not used. 7 | num_hidden_video_layers: 6 8 | -------------------------------------------------------------------------------- /fairseq/examples/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | try: 7 | from fairseq.version import __version__ # noqa 8 | except ImportError: 9 | pass 10 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/finetune/lm/ls_4gram.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | criterion: 4 | wer_kenlm_model: /checkpoint/abdo/old_checkpoint02/datasets/librispeech/4-gram.bin 5 | wer_lexicon: /checkpoint/abdo/old_checkpoint02/datasets/librispeech/10h/raw/lexicon_ltr.lst 6 | wer_lm_weight: 2.0 7 | wer_word_score: -1.0 8 | -------------------------------------------------------------------------------- /fairseq/examples/m2m_100/tokenizers/thirdparty/.gitignore: -------------------------------------------------------------------------------- 1 | seg_my.py 2 | indic_nlp_library/ 3 | indic_nlp_resources/ 4 | kytea/ 5 | mecab-0.996-ko-0.9.2.tar.gz 6 | mecab-0.996-ko-0.9.2/ 7 | mosesdecoder/ 8 | wat2020.my-en.zip 9 | wat2020.my-en/ 10 | wmt16-scripts/ 11 | mecab-ko-dic-2.1.1-20180720/ 12 | mecab-ko-dic-2.1.1-20180720.tar.gz -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/benchmarking/configs/S2T.yaml: -------------------------------------------------------------------------------- 1 | general: 2 | dataset_path: $npy_dataset 3 | cpu: True 4 | model_type: S2T 5 | dataset_size: 1 6 | 7 | stage1: 8 | data: $data_bin 9 | task: speech_to_text 10 | path: $checkpoint 11 | config_yaml: config.yaml 12 | max_len_a: 2 13 | max_len_b: 500 14 | -------------------------------------------------------------------------------- /DASpeech/custom_ops/utilities.h: -------------------------------------------------------------------------------- 1 | #define GCC_VERSION (__GNUC__ * 10000 \ 2 | + __GNUC_MINOR__ * 100 \ 3 | + __GNUC_PATCHLEVEL__) 4 | 5 | #if GCC_VERSION >= 70000 6 | #define if_constexpr(expression) if constexpr (expression) 7 | #else 8 | #define if_constexpr(expression) if(expression) 9 | #endif 10 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | from .mm import * 6 | 7 | try: 8 | from .expmm import * 9 | except ImportError: 10 | pass 11 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/unity/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import sequence_generator # noqa 7 | from . import sequence_generator_multi_decoder # noqa 8 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .asr_dataset import AsrDataset 7 | 8 | 9 | __all__ = [ 10 | "AsrDataset", 11 | ] 12 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | for file in sorted(os.listdir(os.path.dirname(__file__))): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | task_name = file[: file.find(".py")] 8 | importlib.import_module("examples.speech_recognition.tasks." + task_name) 9 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .wav2vec_u import Wav2vec_U 7 | 8 | 9 | __all__ = [ 10 | "Wav2vec_U", 11 | ] 12 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/vttqa_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/vttqa.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | 9 | # model_cls: MMFusionShare 10 | # mm_encoder_cls: MMBertForEncoder 11 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/youcook_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/youcook.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | # model_cls: MMFusionShare 9 | # mm_encoder_cls: MMBertForEncoder 10 | -------------------------------------------------------------------------------- /fairseq/examples/laser/laser_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .laser_task import * # noqa 7 | from .laser_lstm import * # noqa 8 | from .laser_transformer import * # noqa 9 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/models/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | for file in sorted(os.listdir(os.path.dirname(__file__))): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | model_name = file[: file.find(".py")] 8 | importlib.import_module("examples.speech_recognition.models." + model_name) 9 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/config/v2/run_config/local.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | sweep: 4 | dir: ${env:PWD}/tmp_dbg/${now:%H-%M-%S} 5 | 6 | distributed_training: 7 | distributed_world_size: 1 8 | nprocs_per_node: 1 9 | distributed_port: -1 10 | 11 | common: 12 | log_interval: 1 13 | 14 | dataset: 15 | num_workers: 0 16 | -------------------------------------------------------------------------------- /fairseq/fairseq/benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | # import models/tasks to register them 7 | from . import dummy_dataset, dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa 8 | -------------------------------------------------------------------------------- /DASpeech/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib 3 | 4 | # automatically import any Python files in the criterions/ directory 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | file_name = file[: file.find(".py")] 8 | importlib.import_module("DASpeech.tasks." + file_name) 9 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | from .mmdataset import * 6 | 7 | try: 8 | from .fairseqmmdataset import * 9 | except ImportError: 10 | pass 11 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/retri/videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/retri/videoretri.yaml 2 | project_dir: retri/videoclip 3 | task_group: 4 | pretrain: 5 | model: 6 | model_cls: MMFusionSeparate 7 | mm_encoder_cls: 8 | video_encoder_cls: MMBertForEncoder 9 | text_encoder_cls: BertModel 10 | num_hidden_video_layers: 6 11 | -------------------------------------------------------------------------------- /fairseq/examples/roberta/config/finetuning/run_config/local.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | sweep: 4 | dir: ${env:PWD}/tmp_dbg/${now:%H-%M-%S} 5 | 6 | distributed_training: 7 | distributed_world_size: 1 8 | nprocs_per_node: 1 9 | distributed_port: -1 10 | 11 | common: 12 | log_interval: 1 13 | 14 | dataset: 15 | num_workers: 0 16 | -------------------------------------------------------------------------------- /DASpeech/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib 3 | 4 | # automatically import any Python files in the criterions/ directory 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | file_name = file[: file.find(".py")] 8 | importlib.import_module("DASpeech.datasets." + file_name) 9 | -------------------------------------------------------------------------------- /DASpeech/models/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib 3 | 4 | # automatically import any Python files in the criterions/ directory 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | file_name = file[: file.find(".py")] 8 | importlib.import_module("DASpeech.models." + file_name) 9 | -------------------------------------------------------------------------------- /fairseq/examples/roberta/config/pretraining/run_config/local.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | sweep: 4 | dir: ${env:PWD}/tmp_dbg/${now:%H-%M-%S} 5 | 6 | distributed_training: 7 | distributed_world_size: 1 8 | nprocs_per_node: 1 9 | distributed_port: -1 10 | 11 | common: 12 | log_interval: 1 13 | 14 | dataset: 15 | num_workers: 0 16 | -------------------------------------------------------------------------------- /DASpeech/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib 3 | 4 | # automatically import any Python files in the criterions/ directory 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | file_name = file[: file.find(".py")] 8 | importlib.import_module("DASpeech.criterions." + file_name) 9 | -------------------------------------------------------------------------------- /DASpeech/generator/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib 3 | 4 | # automatically import any Python files in the criterions/ directory 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | file_name = file[: file.find(".py")] 8 | importlib.import_module("DASpeech.generator." + file_name) 9 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/config/audio/pretraining/run_config/local.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | sweep: 4 | dir: ${env:PWD}/tmp_dbg/${now:%H-%M-%S} 5 | 6 | distributed_training: 7 | distributed_world_size: 1 8 | nprocs_per_node: 1 9 | distributed_port: -1 10 | 11 | common: 12 | log_interval: 1 13 | 14 | dataset: 15 | num_workers: 0 16 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/config/text/pretraining/run_config/local.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | sweep: 4 | dir: ${env:PWD}/tmp_dbg/${now:%H-%M-%S} 5 | 6 | distributed_training: 7 | distributed_world_size: 1 8 | nprocs_per_node: 1 9 | distributed_port: -1 10 | 11 | common: 12 | log_interval: 1 13 | 14 | dataset: 15 | num_workers: 0 16 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/config/v2/text_finetuning/run_config/local.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | sweep: 4 | dir: ${env:PWD}/tmp_dbg/${now:%H-%M-%S} 5 | 6 | distributed_training: 7 | distributed_world_size: 1 8 | nprocs_per_node: 1 9 | distributed_port: -1 10 | 11 | common: 12 | log_interval: 1 13 | 14 | dataset: 15 | num_workers: 0 16 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/config/vision/finetuning/run_config/local.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | sweep: 4 | dir: ${env:PWD}/tmp_dbg/${now:%H-%M-%S} 5 | 6 | distributed_training: 7 | distributed_world_size: 1 8 | nprocs_per_node: 1 9 | distributed_port: -1 10 | 11 | common: 12 | log_interval: 1 13 | 14 | dataset: 15 | num_workers: 0 16 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/config/vision/pretraining/run_config/local.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | sweep: 4 | dir: ${env:PWD}/tmp_dbg/${now:%H-%M-%S} 5 | 6 | distributed_training: 7 | distributed_world_size: 1 8 | nprocs_per_node: 1 9 | distributed_port: -1 10 | 11 | common: 12 | log_interval: 1 13 | 14 | dataset: 15 | num_workers: 0 16 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .unpaired_audio_text import UnpairedAudioText 7 | 8 | 9 | __all__ = [ 10 | "UnpairedAudioText", 11 | ] 12 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/quantization/pq/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .qconv import PQConv2d # NOQA 7 | from .qemb import PQEmbedding # NOQA 8 | from .qlinear import PQLinear # NOQA 9 | -------------------------------------------------------------------------------- /fairseq/.github/ISSUE_TEMPLATE/documentation.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 📚 Documentation/Typos 3 | about: Report an issue related to documentation or a typo 4 | labels: 'documentation, needs triage' 5 | --- 6 | 7 | ## 📚 Documentation 8 | 9 | For typos and doc fixes, please go ahead and: 10 | 11 | 1. Create an issue. 12 | 2. Fix the typo. 13 | 3. Submit a PR. 14 | 15 | Thanks! 16 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh: -------------------------------------------------------------------------------- 1 | export KALDI_ROOT=`pwd`/../../.. 2 | export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH 3 | [ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1 4 | . $KALDI_ROOT/tools/config/common_path.sh 5 | export LC_ALL=C 6 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/wav2vec/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .wav2vec import * # noqa 7 | from .wav2vec2 import * # noqa 8 | from .wav2vec2_asr import * # noqa 9 | from .wav2vec2_laser import * # noqa 10 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/vtt_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/vtt.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | fairseq: 9 | dataset: 10 | batch_size: 224 11 | # model_cls: MMFusionShare 12 | # mm_encoder_cls: MMBertForEncoder 13 | -------------------------------------------------------------------------------- /fairseq/examples/fast_noisy_channel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import noisy_channel_translation # noqa 7 | from . import noisy_channel_sequence_generator # noqa 8 | from . import noisy_channel_beam_search # noqa 9 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test.yaml: -------------------------------------------------------------------------------- 1 | # this yaml cannot be run alone: implement a test_${dataset}.yaml 2 | slurm_config: big 3 | task_type: local_predict 4 | dataset: 5 | split: test 6 | video_processor: VideoProcessor 7 | aligner: DSAligner 8 | bert_name: bert-base-uncased 9 | fairseq: 10 | dataset: 11 | batch_size: 256 12 | valid_subset: test 13 | num_workers: 2 14 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/scripts/copy_labels.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import sys 8 | 9 | for idx, line in enumerate(sys.stdin): 10 | print(f"utt{idx:010d} {line}", end="") 11 | -------------------------------------------------------------------------------- /fairseq/fairseq/dataclass/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .configs import FairseqDataclass 7 | from .constants import ChoiceEnum 8 | 9 | 10 | __all__ = [ 11 | "FairseqDataclass", 12 | "ChoiceEnum", 13 | ] 14 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/text_to_speech/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .tacotron2 import * # noqa 7 | from .tts_transformer import * # noqa 8 | from .fastspeech2 import * # noqa 9 | from .vocoder import * # noqa 10 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | from .metric import * 6 | from .evaluator import * 7 | 8 | 9 | # experimental. 10 | try: 11 | from .expmetric import * 12 | except ImportError: 13 | pass 14 | -------------------------------------------------------------------------------- /fairseq/examples/simultaneous_translation/README.md: -------------------------------------------------------------------------------- 1 | # Simultaneous Translation 2 | Examples of simultaneous translation in fairseq 3 | - [English-to-Japanese text-to-text wait-k model](docs/enja-waitk.md) 4 | - [English-to-Germen text-to-text monotonic multihead attention model](docs/ende-mma.md) 5 | - [English-to-Germen speech-to-text simultaneous translation model](../speech_to_text/docs/simulst_mustc_example.md) 6 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/dict.ltr.txt: -------------------------------------------------------------------------------- 1 | | 94802 2 | E 51860 3 | T 38431 4 | A 33152 5 | O 31495 6 | N 28855 7 | I 28794 8 | H 27187 9 | S 26071 10 | R 23546 11 | D 18289 12 | L 16308 13 | U 12400 14 | M 10685 15 | W 10317 16 | C 9844 17 | F 9062 18 | G 8924 19 | Y 8226 20 | P 6890 21 | B 6339 22 | V 3936 23 | K 3456 24 | ' 1023 25 | X 636 26 | J 598 27 | Q 437 28 | Z 213 29 | -------------------------------------------------------------------------------- /fairseq/examples/m2m_100/tokenizers/tokenize_zh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | 8 | import fileinput 9 | 10 | import sacrebleu 11 | 12 | 13 | for line in fileinput.input(): 14 | print(sacrebleu.tokenize_zh(line)) 15 | -------------------------------------------------------------------------------- /fairseq/fairseq/config/config.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | hydra: 4 | run: 5 | dir: . 6 | 7 | defaults: 8 | - _self_ 9 | - task: null 10 | - model: null 11 | - criterion: cross_entropy 12 | - optimizer: null 13 | - lr_scheduler: fixed 14 | - bpe: null 15 | - tokenizer: null 16 | - scoring: null 17 | - generation: null 18 | - common_eval: null 19 | - eval_lm: null 20 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_coin_zs.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_coin.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | eval: 9 | save_path: runs/task/coin_zs/eval 10 | fairseq: 11 | common_eval: 12 | path: runs/task/checkpoint_best.pt 13 | predictor: COINZSPredictor 14 | -------------------------------------------------------------------------------- /fairseq/examples/m2m_100/tokenizers/tokenize_thai.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import sys 8 | 9 | from pythainlp import word_tokenize 10 | 11 | 12 | for line in sys.stdin: 13 | print(" ".join(word_tokenize(line.strip()))) 14 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/crosstask_videoclip.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/crosstask.yaml 2 | model: 3 | model_cls: MMFusionSeparateActionLocalization 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel # dummy, not used. 7 | num_hidden_video_layers: 6 8 | fairseq: 9 | checkpoint: 10 | restore_file: runs/task/checkpoint_best.pt # overwrite the default of VLM. 11 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/speech_to_speech/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .s2s_conformer import * # noqa 7 | from .s2s_conformer_translatotron2 import * # noqa 8 | from .s2s_conformer_unity import * # noqa 9 | from .s2s_transformer import * # noqa 10 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/quantization/scalar/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .qact import ActivationQuantizer # NOQA 7 | from .qconv import IntConv2d # NOQA 8 | from .qemb import IntEmbedding # NOQA 9 | from .qlinear import IntLinear # NOQA 10 | -------------------------------------------------------------------------------- /fairseq/fairseq/tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | 9 | SPACE_NORMALIZER = re.compile(r"\s+") 10 | 11 | 12 | def tokenize_line(line): 13 | line = SPACE_NORMALIZER.sub(" ", line) 14 | line = line.strip() 15 | return line.split() 16 | -------------------------------------------------------------------------------- /fairseq/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead. 8 | """ 9 | 10 | from fairseq_cli.train import cli_main 11 | 12 | 13 | if __name__ == "__main__": 14 | cli_main() 15 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_vtt_zs.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_vtt.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | eval: 9 | save_path: runs/task/vtt_zs/eval 10 | fairseq: 11 | # read code and find what is the checkpoint arg. 12 | common_eval: 13 | path: runs/task/checkpoint_best.pt 14 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | from .loss import * 6 | from .nce import * 7 | 8 | try: 9 | from .fairseqmmloss import * 10 | except ImportError: 11 | pass 12 | 13 | try: 14 | from .expnce import * 15 | except ImportError: 16 | pass 17 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_vttqa_zs.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_vttqa.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | eval: 9 | save_path: runs/task/vttqa_zs/eval 10 | fairseq: 11 | # read code and find what is the checkpoint arg. 12 | common_eval: 13 | path: runs/task/checkpoint_best.pt 14 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_youcook_zs.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test_youcook.yaml 2 | model: 3 | model_cls: MMFusionSeparate 4 | mm_encoder_cls: 5 | video_encoder_cls: MMBertForEncoder 6 | text_encoder_cls: BertModel 7 | num_hidden_video_layers: 6 8 | eval: 9 | save_path: runs/task/youcook_zs/eval 10 | fairseq: 11 | # read code and find what is the checkpoint arg. 12 | common_eval: 13 | path: runs/task/checkpoint_best.pt 14 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/config/generate/viterbi.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | fairseq: 4 | task: 5 | _name: unpaired_audio_text 6 | labels: phn 7 | data: ??? 8 | sort_by_length: false 9 | shuffle: false 10 | text_data: '' 11 | 12 | common_eval: 13 | path: ??? 14 | quiet: true 15 | 16 | dataset: 17 | gen_subset: valid 18 | batch_size: 1 19 | 20 | w2l_decoder: VITERBI 21 | post_process: silence 22 | -------------------------------------------------------------------------------- /fairseq/examples/latent_depth/latent_depth_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import multilingual_translation_latent_depth # noqa 7 | from .loss import latent_depth # noqa 8 | from .models import latent_multilingual_transformer # noqa 9 | from .modules import latent_layers # noqa 10 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | from .enc_dec import * # noqa 9 | from .model_camembert import * # noqa 10 | from .model_gottbert import * # noqa 11 | from .model_xlmr import * # noqa 12 | -------------------------------------------------------------------------------- /hifi-gan/env.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | 5 | class AttrDict(dict): 6 | def __init__(self, *args, **kwargs): 7 | super(AttrDict, self).__init__(*args, **kwargs) 8 | self.__dict__ = self 9 | 10 | 11 | def build_env(config, config_name, path): 12 | t_path = os.path.join(path, config_name) 13 | if config != t_path: 14 | os.makedirs(path, exist_ok=True) 15 | shutil.copyfile(config, os.path.join(path, config_name)) 16 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | try: 6 | # fairseq user dir 7 | from .datasets import FairseqMMDataset 8 | from .losses import FairseqCriterion 9 | from .models import FairseqMMModel 10 | from .tasks import FairseqMMTask 11 | except ImportError: 12 | pass 13 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .extracted_features_dataset import ExtractedFeaturesDataset 7 | from .random_input_dataset import RandomInputDataset 8 | 9 | 10 | __all__ = [ 11 | "ExtractedFeaturesDataset", 12 | "RandomInputDataset", 13 | ] 14 | -------------------------------------------------------------------------------- /fairseq/examples/m2m_100/tokenizers/seg_ja.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | SCRIPT=`realpath $0` 7 | KYTEA=`dirname $SCRIPT`/thirdparty/kytea 8 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$KYTEA/lib:/usr/local/lib 9 | export PATH=$PATH:"$KYTEA/bin" 10 | 11 | cat - | tr -d "[:blank:]" | kytea -notags 12 | -------------------------------------------------------------------------------- /fairseq/examples/m2m_100/tokenizers/seg_ko.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | SCRIPT=`realpath $0` 7 | MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2 8 | 9 | export PATH=$PATH:"$MECAB/bin":"$MECAB/lib" 10 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib" 11 | 12 | cat - | mecab -O wakati 13 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import sys 8 | 9 | 10 | def main(): 11 | for line in sys.stdin: 12 | print(line.replace(" ", "").replace("|", " ").strip()) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/data/modality.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017-present, Facebook, Inc. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the LICENSE file in 5 | # the root directory of this source tree. An additional grant of patent rights 6 | # can be found in the PATENTS file in the same directory. 7 | 8 | from enum import Enum, auto 9 | 10 | 11 | class Modality(Enum): 12 | AUDIO = auto() 13 | IMAGE = auto() 14 | TEXT = auto() 15 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import sys 8 | 9 | 10 | def main(): 11 | for line in sys.stdin: 12 | print(" ".join(list(line.strip().replace(" ", "|"))) + " |") 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/benchmarking/configs/2StageS2ST.yaml: -------------------------------------------------------------------------------- 1 | general: 2 | dataset_path: $npy_dataset 3 | cpu: True 4 | model_type: 2StageS2ST 5 | dataset_size: 1 6 | 7 | stage1: 8 | data: $data_bin_stage1 9 | task: speech_to_text 10 | path: $checkpoint_stage1 11 | config_yaml: config.yaml 12 | max_len_a: 2 13 | max_len_b: 500 14 | 15 | stage2: 16 | data: $data_bin_stage2 17 | task: text_to_speech 18 | path: $checkpoint_stage2 19 | config_yaml: config.yaml 20 | -------------------------------------------------------------------------------- /fairseq/examples/operators/alignment_train_cuda.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | 11 | #include // @manual=//caffe2:torch_extension 12 | 13 | void alignmentTrainCUDAWrapper( 14 | const torch::Tensor& p_choose, 15 | torch::Tensor& alpha, 16 | float eps); 17 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | from .mmfusion import * 6 | from .transformermodel import * 7 | from .mmfusionnlg import * 8 | 9 | try: 10 | from .fairseqmmmodel import * 11 | except ImportError: 12 | pass 13 | 14 | try: 15 | from .expmmfusion import * 16 | except ImportError: 17 | pass 18 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/num_samples_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqDataset 7 | 8 | 9 | class NumSamplesDataset(FairseqDataset): 10 | def __getitem__(self, index): 11 | return 1 12 | 13 | def __len__(self): 14 | return 0 15 | 16 | def collater(self, samples): 17 | return sum(samples) 18 | -------------------------------------------------------------------------------- /fairseq/fairseq/config/model/wav2vec2/wav2vec2_large.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | quantize_targets: true 4 | extractor_mode: layer_norm 5 | layer_norm_first: true 6 | final_dim: 768 7 | latent_temp: [2.0,0.1,0.999995] 8 | encoder_layerdrop: 0.0 9 | dropout_input: 0.0 10 | dropout_features: 0.0 11 | dropout: 0.0 12 | attention_dropout: 0.0 13 | conv_bias: true 14 | 15 | encoder_layers: 24 16 | encoder_embed_dim: 1024 17 | encoder_ffn_embed_dim: 4096 18 | encoder_attention_heads: 16 19 | 20 | feature_grad_mult: 1.0 21 | -------------------------------------------------------------------------------- /fairseq/scripts/spm_train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from __future__ import absolute_import, division, print_function, unicode_literals 9 | 10 | import sys 11 | 12 | import sentencepiece as spm 13 | 14 | 15 | if __name__ == "__main__": 16 | spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:])) 17 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/id_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class IdDataset(FairseqDataset): 12 | def __getitem__(self, index): 13 | return index 14 | 15 | def __len__(self): 16 | return 0 17 | 18 | def collater(self, samples): 19 | return torch.tensor(samples) 20 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/offset_tokens_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class OffsetTokensDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, offset): 11 | super().__init__(dataset) 12 | self.offset = offset 13 | 14 | def __getitem__(self, idx): 15 | return self.dataset[idx] + self.offset 16 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/text/finetune_all_fair_nodep_aws_local_lr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | dir="$1" 6 | 7 | echo "dir: $dir" 8 | 9 | mkdir -p "$dir/log" 10 | sbatch_args="-p wav2vec --nodes=1 --ntasks-per-node=1" 11 | sbatch_args="$sbatch_args --gpus-per-node=1 --cpus-per-task=8 --mem=0 --time=24:00:00" 12 | sbatch_args="$sbatch_args -o $dir/log/decode_sweep_%A.out" 13 | sbatch_args="$sbatch_args -e $dir/log/decode_sweep_%A.err" 14 | 15 | sbatch $sbatch_args examples/data2vec/scripts/text/finetune_all_fair_local_lr.sh $dir 16 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/grad_multiply.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class GradMultiply(torch.autograd.Function): 10 | @staticmethod 11 | def forward(ctx, x, scale): 12 | ctx.scale = scale 13 | res = x.new(x) 14 | return res 15 | 16 | @staticmethod 17 | def backward(ctx, grad): 18 | return grad * ctx.scale, None 19 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/mtm/mmfusionmtm.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/mfmmlm.yaml 2 | project_dir: mtm/mmfusionmtm 3 | task_group: 4 | pretrain: 5 | task: VLMTask # reproducible 6 | dataset: 7 | aligner: MFMMLMAligner 8 | model: 9 | use_seg_emb: True # reproducible 10 | model_cls: MMFusionMTM 11 | mm_encoder_cls: MMBertForMFMMLM 12 | loss: 13 | loss_cls: MTM 14 | finetune: 15 | model: 16 | use_seg_emb: True # reproducible 17 | test: 18 | model: 19 | use_seg_emb: True # reproducible 20 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .block_pair_dataset import BlockPairDataset 7 | from .masked_lm_dataset import MaskedLMDataset 8 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary 9 | 10 | 11 | __all__ = [ 12 | "BertDictionary", 13 | "BlockPairDataset", 14 | "MaskedLMDataset", 15 | "MaskedLMDictionary", 16 | ] 17 | -------------------------------------------------------------------------------- /fairseq/scripts/compound_split_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ]; then 4 | echo "usage: $0 GENERATE_PY_OUTPUT" 5 | exit 1 6 | fi 7 | 8 | GEN=$1 9 | 10 | SYS=$GEN.sys 11 | REF=$GEN.ref 12 | 13 | if [ $(tail -n 1 $GEN | grep BLEU | wc -l) -ne 1 ]; then 14 | echo "not done generating" 15 | exit 16 | fi 17 | 18 | grep ^H $GEN | awk -F '\t' '{print $NF}' | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $SYS 19 | grep ^T $GEN | cut -f2- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $REF 20 | fairseq-score --sys $SYS --ref $REF 21 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/ft.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/default.yaml 2 | # all derived config will be run by fairseq-train. 3 | task_type: sweep_small 4 | fairseq: 5 | optimization: 6 | warmup_updates: 122 # copied from roberta glue: https://github.com/pytorch/fairseq/blob/master/examples/roberta/README.glue.md 7 | checkpoint: 8 | # save_interval_updates: 512 9 | # borrowed from Roberta script. 10 | restore_file: runs/task/checkpoint_best.pt 11 | reset_optimizer: True 12 | reset_dataloader: True 13 | reset_meters: True 14 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/multi/finetune_all_fair_aws_local_lr_nodep.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | dir="$1" 6 | 7 | echo "dir: $dir" 8 | 9 | mkdir -p "$dir/log" 10 | sbatch_args="-p wav2vec --nodes=1 --ntasks-per-node=1" 11 | sbatch_args="$sbatch_args --gpus-per-node=1 --cpus-per-task=8 --mem=0 --time=24:00:00" 12 | sbatch_args="$sbatch_args -o $dir/log/decode_sweep_%A.out" 13 | sbatch_args="$sbatch_args -e $dir/log/decode_sweep_%A.err" 14 | 15 | sbatch $sbatch_args examples/data2vec/scripts/multi/finetune_all_fair_local_lr.sh $dir 16 | 17 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/text/finetune_all_large_fair_nodep_aws_local_lr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | dir="$1" 6 | 7 | echo "dir: $dir" 8 | 9 | mkdir -p "$dir/log" 10 | sbatch_args="-p wav2vec --nodes=1 --ntasks-per-node=1" 11 | sbatch_args="$sbatch_args --gpus-per-node=1 --cpus-per-task=8 --mem=0 --time=24:00:00" 12 | sbatch_args="$sbatch_args -o $dir/log/decode_sweep_%A.out" 13 | sbatch_args="$sbatch_args -e $dir/log/decode_sweep_%A.err" 14 | 15 | sbatch $sbatch_args examples/data2vec/scripts/text/finetune_all_large_fair_local_lr.sh $dir 16 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/nat/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | from .fairseq_nat_model import * 8 | from .nonautoregressive_transformer import * 9 | from .nat_crf_transformer import * 10 | from .iterative_nonautoregressive_transformer import * 11 | from .cmlm_transformer import * 12 | from .levenshtein_transformer import * 13 | from .insertion_transformer import * 14 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/README.md: -------------------------------------------------------------------------------- 1 | # Speech to speech translation (S2ST) 2 | 3 | We provide the implementation and resources for the following work on speech-to-speech translation (S2ST): 4 | 5 | * [Direct speech-to-speech translation with discrete units (Lee et al. 2021)](docs/direct_s2st_discrete_units.md) 6 | * [Textless Speech-to-Speech Translation on Real Data (Lee et al. 2021)](docs/textless_s2st_real_data.md) 7 | * [Enhanced Direct Speech-to-Speech Translation Using Self-supervised Pre-training and Data Augmentation](docs/enhanced_direct_s2st_discrete_units.md) 8 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | from .task import * 6 | from .vlmtask import * 7 | from .retritask import * 8 | 9 | try: 10 | from .fairseqmmtask import * 11 | except ImportError: 12 | pass 13 | 14 | try: 15 | from .milncetask import * 16 | except ImportError: 17 | pass 18 | 19 | try: 20 | from .expretritask import * 21 | except ImportError: 22 | pass 23 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .image_dataset import ImageDataset 7 | from .path_dataset import PathDataset 8 | from .mae_image_dataset import MaeImageDataset 9 | from .mae_finetuning_image_dataset import MaeFinetuningImageDataset 10 | 11 | 12 | __all__ = [ 13 | "ImageDataset", 14 | "MaeImageDataset", 15 | "MaeFinetuningImageDataset", 16 | "PathDataset", 17 | ] -------------------------------------------------------------------------------- /fairseq/fairseq/models/speech_to_text/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .berard import * # noqa 7 | from .convtransformer import * # noqa 8 | from .multi_modality_model import * # noqa 9 | from .s2t_conformer import * # noqa 10 | from .s2t_transformer import * # noqa 11 | from .s2t_wav_transformer import * # noqa 12 | from .xm_transformer import * # noqa 13 | from .xm_transformer_unity import * # noqa 14 | -------------------------------------------------------------------------------- /fairseq/examples/multilingual/ML50_langs.txt: -------------------------------------------------------------------------------- 1 | ar_AR 2 | cs_CZ 3 | de_DE 4 | en_XX 5 | es_XX 6 | et_EE 7 | fi_FI 8 | fr_XX 9 | gu_IN 10 | hi_IN 11 | it_IT 12 | ja_XX 13 | kk_KZ 14 | ko_KR 15 | lt_LT 16 | lv_LV 17 | my_MM 18 | ne_NP 19 | nl_XX 20 | ro_RO 21 | ru_RU 22 | si_LK 23 | tr_TR 24 | vi_VN 25 | zh_CN 26 | af_ZA 27 | az_AZ 28 | bn_IN 29 | fa_IR 30 | he_IL 31 | hr_HR 32 | id_ID 33 | ka_GE 34 | km_KH 35 | mk_MK 36 | ml_IN 37 | mn_MN 38 | mr_IN 39 | pl_PL 40 | ps_AF 41 | pt_XX 42 | sv_SE 43 | sw_KE 44 | ta_IN 45 | te_IN 46 | th_TH 47 | tl_XX 48 | uk_UA 49 | ur_PK 50 | xh_ZA 51 | gl_ES 52 | sl_SI -------------------------------------------------------------------------------- /fairseq/fairseq/data/roll_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class RollDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, shifts): 13 | super().__init__(dataset) 14 | self.shifts = shifts 15 | 16 | def __getitem__(self, index): 17 | item = self.dataset[index] 18 | return torch.roll(item, self.shifts) 19 | -------------------------------------------------------------------------------- /fairseq/examples/simultaneous_translation/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | for file in sorted(os.listdir(os.path.dirname(__file__))): 11 | if file.endswith(".py") and not file.startswith("_"): 12 | model_name = file[: file.find(".py")] 13 | importlib.import_module( 14 | "examples.simultaneous_translation.models." + model_name 15 | ) 16 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/benchmarking/configs/DirectS2U.yaml: -------------------------------------------------------------------------------- 1 | general: 2 | dataset_path: $npy_dataset_path 3 | cpu: True 4 | model_type: S2UT 5 | dataset_size: 5 6 | dump_speech_waveforms_dir: $dump_waveforms_dir_path 7 | 8 | stage1: 9 | data: $data_bin 10 | task: speech_to_speech 11 | path: $checkpoint 12 | config_yaml: config.yaml 13 | max_len_b: 100000 14 | beam: 10 15 | target_is_code: True 16 | max_target_positions: 3000 17 | target_code_size: 100 18 | 19 | stage2: 20 | vocoder: $vocoder_path 21 | vocoder_cfg: $vocoder_cfg_json 22 | dur_prediction: True 23 | -------------------------------------------------------------------------------- /fairseq/examples/speech_text_joint_to_text/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | for file in os.listdir(os.path.dirname(__file__)): 11 | if file.endswith(".py") and not file.startswith("_"): 12 | criterion_name = file[: file.find(".py")] 13 | importlib.import_module( 14 | "examples.speech_text_joint_to_text.criterions." + criterion_name 15 | ) 16 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/finetune/run/submitit_reg.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | launcher: 5 | cpus_per_task: 8 6 | gpus_per_node: 8 7 | tasks_per_node: ${hydra.launcher.gpus_per_node} 8 | nodes: 1 9 | comment: null 10 | mem_gb: 384 11 | timeout_min: 4320 12 | max_num_timeout: 100 13 | constraint: volta32gb 14 | name: ${hydra.job.config_name}/${hydra.job.override_dirname} 15 | submitit_folder: ${hydra.sweep.dir}/submitit/%j 16 | 17 | distributed_training: 18 | distributed_world_size: 8 19 | distributed_port: 29671 20 | nprocs_per_node: 8 21 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/pretrain/run/submitit_reg.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | launcher: 5 | cpus_per_task: 8 6 | gpus_per_node: 8 7 | tasks_per_node: ${hydra.launcher.gpus_per_node} 8 | nodes: 4 9 | comment: null 10 | mem_gb: 384 11 | timeout_min: 4320 12 | max_num_timeout: 100 13 | constraint: volta32gb 14 | name: ${hydra.job.config_name}/${hydra.job.override_dirname} 15 | submitit_folder: ${hydra.sweep.dir}/submitit/%j 16 | 17 | distributed_training: 18 | distributed_world_size: 32 19 | distributed_port: 29671 20 | nprocs_per_node: 8 21 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/decode/run/submitit_slurm.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | launcher: 4 | cpus_per_task: ${distributed_training.distributed_world_size} 5 | gpus_per_node: ${distributed_training.distributed_world_size} 6 | tasks_per_node: ${hydra.launcher.gpus_per_node} 7 | nodes: 1 8 | mem_gb: 200 9 | timeout_min: 4320 10 | max_num_timeout: 50 11 | name: ${hydra.job.config_name} 12 | submitit_folder: ${hydra.sweep.dir}/submitit 13 | 14 | distributed_training: 15 | distributed_world_size: 1 16 | distributed_no_spawn: true 17 | distributed_port: 29761 18 | -------------------------------------------------------------------------------- /fairseq/fairseq/model_parallel/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the criterions/ directory 11 | for file in sorted(os.listdir(os.path.dirname(__file__))): 12 | if file.endswith(".py") and not file.startswith("_"): 13 | module = file[: file.find(".py")] 14 | importlib.import_module("fairseq.model_parallel.criterions." + module) 15 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/decode/run/submitit_slurm_8gpu.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | launcher: 4 | cpus_per_task: ${distributed_training.distributed_world_size} 5 | gpus_per_node: ${distributed_training.distributed_world_size} 6 | tasks_per_node: ${hydra.launcher.gpus_per_node} 7 | nodes: 1 8 | mem_gb: 200 9 | timeout_min: 4320 10 | max_num_timeout: 50 11 | name: ${hydra.job.config_name} 12 | submitit_folder: ${hydra.sweep.dir}/submitit 13 | 14 | distributed_training: 15 | distributed_world_size: 8 16 | distributed_no_spawn: true 17 | distributed_port: 29761 18 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/text/finetune_all_char_fair_aws_local_lr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | job_id="$1" 6 | task_id="$2" 7 | dir="$3" 8 | 9 | echo "job_id: $job_id, task_id: $task_id, dir: $dir" 10 | 11 | mkdir -p "$dir/log" 12 | sbatch_args="-p wav2vec --nodes=1 --ntasks-per-node=1" 13 | sbatch_args="$sbatch_args --gpus-per-node=1 --cpus-per-task=8 --mem=0 --time=24:00:00" 14 | sbatch_args="$sbatch_args -d afterok:$job_id -o $dir/log/ft_%A.out" 15 | sbatch_args="$sbatch_args -e $dir/log/ft_%A.err" 16 | 17 | sbatch $sbatch_args examples/data2vec/scripts/text/finetune_all_char_fair_local_lr.sh $dir 18 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/decode/infer_viterbi.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | defaults: 4 | - model: null 5 | 6 | hydra: 7 | run: 8 | dir: ${common_eval.results_path}/viterbi 9 | sweep: 10 | dir: ${common_eval.results_path} 11 | subdir: viterbi 12 | 13 | task: 14 | _name: hubert_pretraining 15 | single_target: true 16 | fine_tuning: true 17 | data: ??? 18 | normalize: ??? 19 | 20 | decoding: 21 | type: viterbi 22 | unique_wer_file: true 23 | common_eval: 24 | results_path: ??? 25 | path: ??? 26 | post_process: letter 27 | dataset: 28 | max_tokens: 1100000 29 | gen_subset: ??? 30 | -------------------------------------------------------------------------------- /fairseq/examples/simultaneous_translation/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the criterions/ directory 11 | for file in sorted(os.listdir(os.path.dirname(__file__))): 12 | if file.endswith(".py") and not file.startswith("_"): 13 | module = file[: file.find(".py")] 14 | importlib.import_module("examples.simultaneous_translation.utils." + module) 15 | -------------------------------------------------------------------------------- /fairseq/scripts/sacrebleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 4 ]; then 4 | echo "usage: $0 TESTSET SRCLANG TGTLANG GEN" 5 | exit 1 6 | fi 7 | 8 | TESTSET=$1 9 | SRCLANG=$2 10 | TGTLANG=$3 11 | 12 | GEN=$4 13 | 14 | if ! command -v sacremoses &> /dev/null 15 | then 16 | echo "sacremoses could not be found, please install with: pip install sacremoses" 17 | exit 18 | fi 19 | 20 | grep ^H $GEN \ 21 | | sed 's/^H\-//' \ 22 | | sort -n -k 1 \ 23 | | cut -f 3 \ 24 | | sacremoses detokenize \ 25 | > $GEN.sorted.detok 26 | 27 | sacrebleu --test-set $TESTSET --language-pair "${SRCLANG}-${TGTLANG}" < $GEN.sorted.detok 28 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/text/finetune_all_fair_aws_local_lr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | job_id="$1" 6 | task_id="$2" 7 | dir="$3" 8 | 9 | echo "job_id: $job_id, task_id: $task_id, dir: $dir" 10 | 11 | mkdir -p "$dir/log" 12 | sbatch_args="-p wav2vec --nodes=1 --ntasks-per-node=1" 13 | sbatch_args="$sbatch_args --gpus-per-node=1 --cpus-per-task=8 --mem=0 --time=24:00:00" 14 | sbatch_args="$sbatch_args -d afterok:$job_id -o $dir/log/decode_sweep_%A.out" 15 | sbatch_args="$sbatch_args -e $dir/log/decode_sweep_%A.err" 16 | 17 | sbatch $sbatch_args examples/data2vec/scripts/text/finetune_all_fair_local_lr.sh $dir 18 | -------------------------------------------------------------------------------- /fairseq/examples/operators/utils.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | 11 | #include // @manual=//caffe2:torch_extension 12 | 13 | #define CHECK_CUDA(x) \ 14 | TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 15 | #define CHECK_CONTIGUOUS(x) \ 16 | TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 17 | #define CHECK_INPUT(x) \ 18 | CHECK_CUDA(x); \ 19 | CHECK_CONTIGUOUS(x) 20 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | # ASG loss requires flashlight bindings 6 | files_to_skip = set() 7 | try: 8 | import flashlight.lib.sequence.criterion 9 | except ImportError: 10 | files_to_skip.add("ASG_loss.py") 11 | 12 | for file in sorted(os.listdir(os.path.dirname(__file__))): 13 | if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip: 14 | criterion_name = file[: file.find(".py")] 15 | importlib.import_module( 16 | "examples.speech_recognition.criterions." + criterion_name 17 | ) 18 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/multi/finetune_all_fair_aws_local_lr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | job_id="$1" 6 | task_id="$2" 7 | dir="$3" 8 | 9 | echo "job_id: $job_id, task_id: $task_id, dir: $dir" 10 | 11 | mkdir -p "$dir/log" 12 | sbatch_args="-p wav2vec --nodes=1 --ntasks-per-node=1" 13 | sbatch_args="$sbatch_args --gpus-per-node=1 --cpus-per-task=8 --mem=0 --time=24:00:00" 14 | sbatch_args="$sbatch_args -d afterok:$job_id -o $dir/log/decode_sweep_%A.out" 15 | sbatch_args="$sbatch_args -e $dir/log/decode_sweep_%A.err" 16 | 17 | sbatch $sbatch_args examples/data2vec/scripts/multi/finetune_all_fair_local_lr.sh $dir 18 | 19 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/scripts/normalize_text.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import regex 8 | import sys 9 | 10 | 11 | def main(): 12 | filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]") 13 | 14 | for line in sys.stdin: 15 | line = line.strip() 16 | line = filter_r.sub(" ", line) 17 | line = " ".join(line.split()) 18 | print(line) 19 | 20 | 21 | if __name__ == "__main__": 22 | main() 23 | -------------------------------------------------------------------------------- /fairseq/fairseq/model_parallel/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | from .multihead_attention import ModelParallelMultiheadAttention 8 | from .transformer_layer import ( 9 | ModelParallelTransformerEncoderLayer, 10 | ModelParallelTransformerDecoderLayer, 11 | ) 12 | 13 | __all__ = [ 14 | "ModelParallelMultiheadAttention", 15 | "ModelParallelTransformerEncoderLayer", 16 | "ModelParallelTransformerDecoderLayer", 17 | ] 18 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/text/finetune_all_large_fair_aws_local_lr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | job_id="$1" 6 | task_id="$2" 7 | dir="$3" 8 | 9 | echo "job_id: $job_id, task_id: $task_id, dir: $dir" 10 | 11 | mkdir -p "$dir/log" 12 | sbatch_args="-p wav2vec --nodes=1 --ntasks-per-node=1" 13 | sbatch_args="$sbatch_args --gpus-per-node=1 --cpus-per-task=8 --mem=0 --time=24:00:00" 14 | sbatch_args="$sbatch_args -d afterok:$job_id -o $dir/log/decode_sweep_%A.out" 15 | sbatch_args="$sbatch_args -e $dir/log/decode_sweep_%A.err" 16 | 17 | sbatch $sbatch_args examples/data2vec/scripts/text/finetune_all_large_fair_local_lr.sh $dir 18 | -------------------------------------------------------------------------------- /translatotron/preprocess/extract_ref_txt.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | from examples.speech_to_text.data_utils import load_df_from_tsv 4 | 5 | def main(): 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("--input-tsv", type=str, required=True) 8 | parser.add_argument("--output-txt", type=str, required=True) 9 | args = parser.parse_args() 10 | df = load_df_from_tsv(args.input_tsv) 11 | data = list(df.T.to_dict().values()) 12 | with open(args.output_txt, "w") as f: 13 | for item in data: 14 | f.write(item["tgt_text"].lower() + "\n") 15 | 16 | 17 | if __name__ == "__main__": 18 | main() -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_vtt.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test.yaml 2 | dataset: 3 | meta_processor: MSRVTTMetaProcessor 4 | test_path: data/msrvtt/MSRVTT_JSFUSION_test.csv 5 | video_processor: VideoProcessor 6 | vfeat_dir: data/feat/feat_vtt_s3d 7 | text_processor: MSRVTTTextProcessor 8 | num_iso_layer: 12 9 | model: 10 | model_cls: MMFusionJoint 11 | mm_encoder_cls: MMBertForJoint 12 | eval: 13 | save_path: runs/task/vtt/eval 14 | fairseq: 15 | # read code and find what is the checkpoint arg. 16 | common_eval: 17 | path: runs/task/vtt/checkpoint_last.pt 18 | metric: RetrievalMetric 19 | predictor: RetrievalPredictor 20 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_vttqa.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test.yaml 2 | dataset: 3 | meta_processor: MSRVTTQAMetaProcessor 4 | test_path: data/msrvtt-qa/MSR_MC_test.csv 5 | video_processor: VideoProcessor 6 | vfeat_dir: data/feat/feat_vtt_s3d 7 | text_processor: MSRVTTQATextProcessor 8 | aligner: MSRVTTQAAligner 9 | num_iso_layer: 12 10 | model: 11 | model_cls: MMFusionJoint 12 | mm_encoder_cls: MMBertForJoint 13 | eval: 14 | save_path: runs/task/vttqa/eval 15 | fairseq: 16 | # read code and find what is the checkpoint arg. 17 | common_eval: 18 | path: runs/task/vttqa/checkpoint_last.pt 19 | metric: QAMetric 20 | predictor: QAPredictor 21 | -------------------------------------------------------------------------------- /fairseq/examples/speech_to_speech/benchmarking/configs/3StageS2ST.yaml: -------------------------------------------------------------------------------- 1 | general: 2 | dataset_path: $npy_dataset 3 | cpu: True 4 | model_type: 3StageS2ST 5 | max_len_a: 2 6 | max_len_b: 500 7 | dataset_size: 1 8 | 9 | stage1: 10 | data: $data_bin_stage1 11 | task: speech_to_text 12 | path: $checkpoint_stage1 13 | config_yaml: config.yaml 14 | max_len_a: 2 15 | max_len_b: 500 16 | 17 | stage2: 18 | data: $data_bin_stage2 19 | task: translation 20 | path: $checkpoint_stage2 21 | config_yaml: config.yaml 22 | 23 | 24 | stage2: 25 | data: $data_bin_stage3 26 | task: text_to_speech 27 | path: $checkpoint_stage3 28 | config_yaml: config.yaml 29 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/raw_label_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class RawLabelDataset(FairseqDataset): 12 | def __init__(self, labels): 13 | super().__init__() 14 | self.labels = labels 15 | 16 | def __getitem__(self, index): 17 | return self.labels[index] 18 | 19 | def __len__(self): 20 | return len(self.labels) 21 | 22 | def collater(self, samples): 23 | return torch.tensor(samples) 24 | -------------------------------------------------------------------------------- /fairseq/.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Before submitting 2 | 3 | - [ ] Was this discussed/approved via a Github issue? (no need for typos, doc improvements) 4 | - [ ] Did you read the [contributor guideline](https://github.com/pytorch/fairseq/blob/main/CONTRIBUTING.md)? 5 | - [ ] Did you make sure to update the docs? 6 | - [ ] Did you write any new necessary tests? 7 | 8 | ## What does this PR do? 9 | Fixes # (issue). 10 | 11 | ## PR review 12 | Anyone in the community is free to review the PR once the tests have passed. 13 | If we didn't discuss your PR in Github issues there's a high chance it will not be merged. 14 | 15 | ## Did you have fun? 16 | Make sure you had fun coding 🙃 17 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/vttqa.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/ft.yaml 2 | dataset: 3 | meta_processor: MSRVTTMetaProcessor 4 | train_path: data/msrvtt/MSRVTT_train.csv 5 | dup: 20 6 | val_path: data/msrvtt/MSRVTT_JSFUSION_test.csv 7 | vfeat_dir: data/feat/feat_vtt_s3d 8 | text_processor: MSRVTTTextProcessor 9 | json_path: data/msrvtt/MSRVTT_data.json 10 | aligner: DSAligner 11 | num_iso_layer: 12 12 | model: 13 | model_cls: MMFusionJoint 14 | mm_encoder_cls: MMBertForJoint 15 | loss: 16 | loss_cls: V2TContraLoss 17 | fairseq: 18 | dataset: 19 | batch_size: 128 20 | optimization: 21 | max_epoch: 5 22 | checkpoint: 23 | save_dir: runs/task/vttqa 24 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/lru_cache_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from functools import lru_cache 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class LRUCacheDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, token=None): 13 | super().__init__(dataset) 14 | 15 | @lru_cache(maxsize=8) 16 | def __getitem__(self, index): 17 | return self.dataset[index] 18 | 19 | @lru_cache(maxsize=8) 20 | def collater(self, samples): 21 | return self.dataset.collater(samples) 22 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/default.yaml: -------------------------------------------------------------------------------- 1 | # this yaml cannot be run alone. you must use `how2.yaml`, `vtt.yaml` etc for training. 2 | dataset: 3 | video_processor: VideoProcessor 4 | bert_name: bert-base-uncased 5 | fairseq: 6 | common: 7 | tensorboard_logdir: run 8 | log_interval: 1000 9 | dataset: 10 | num_workers: 4 11 | optimization: 12 | lr: [ 0.00005 ] 13 | clip_norm: 2.0 14 | optimizer: adam 15 | adam_betas: (0.9, 0.98) 16 | lr_scheduler: polynomial_decay 17 | total_num_update: 1000000 # backward compatible on fairseq 1.0.0a0+af0389f for reproducibility. 18 | warmup_updates: 1000 19 | weight_decay: 0.0 20 | ddp_backend: no_c10d 21 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/huffman/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .huffman_coder import HuffmanCodeBuilder, HuffmanCoder 7 | from .huffman_mmap_indexed_dataset import ( 8 | HuffmanMMapIndex, 9 | HuffmanMMapIndexedDataset, 10 | HuffmanMMapIndexedDatasetBuilder, 11 | vocab_file_path, 12 | ) 13 | 14 | __all__ = [ 15 | "HuffmanCoder", 16 | "HuffmanCodeBuilder", 17 | "HuffmanMMapIndexedDatasetBuilder", 18 | "HuffmanMMapIndexedDataset", 19 | "HuffmanMMapIndex", 20 | "vocab_file_path", 21 | ] 22 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/speech_to_speech/modules/ctc_decoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from torch import nn 7 | 8 | from fairseq.models import FairseqEncoder 9 | 10 | 11 | class CTCDecoder(FairseqEncoder): 12 | def __init__(self, dictionary, in_dim): 13 | super().__init__(dictionary) 14 | self.proj = nn.Linear(in_dim, len(dictionary)) 15 | 16 | def forward(self, src_tokens, src_lengths=None, **kwargs): 17 | encoder_out = self.proj(src_tokens) 18 | return {"encoder_out": encoder_out} 19 | -------------------------------------------------------------------------------- /fairseq/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = fairseq 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /fairseq/examples/roberta/commonsense_qa/download_cqa_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | OUTDIR=data/CommonsenseQA 8 | 9 | mkdir -p $OUTDIR 10 | 11 | wget -O $OUTDIR/train.jsonl https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl 12 | wget -O $OUTDIR/valid.jsonl https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl 13 | wget -O $OUTDIR/test.jsonl https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl 14 | wget -O $OUTDIR/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt 15 | -------------------------------------------------------------------------------- /fairseq/RELEASE.md: -------------------------------------------------------------------------------- 1 | # Creating a New Release 2 | 3 | In order to create a new release: 4 | 5 | 1. Navigate to the [Fairseq Workflows](https://github.com/facebookresearch/fairseq/actions) and find the one named _Fairseq Release_. 6 | 7 | 2. Under _Run Workflow_ choose the branch `main` and for _Release Type_ enter either `major`, `minor`, or `patch`. 8 | 9 | 3. A branch named `$new_version-release` will be created where the `version.txt` file is updated. Merge those changes into `main`. 10 | 11 | 4. Make sure that a [new PYPI package](https://pypi.org/project/fairseq/) has been uploaded. 12 | 13 | 5. Make sure that a [new github release](https://github.com/facebookresearch/fairseq/releases) has been created. 14 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/ema/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from .ema import EMA 10 | 11 | 12 | def build_ema(model, cfg, device): 13 | return EMA(model, cfg, device) 14 | 15 | 16 | # automatically import any Python files in the models/ema/ directory 17 | for file in sorted(os.listdir(os.path.dirname(__file__))): 18 | if file.endswith(".py") and not file.startswith("_"): 19 | file_name = file[: file.find(".py")] 20 | importlib.import_module("fairseq.models.ema." + file_name) 21 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/new/conf/infer.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | defaults: 4 | - task: null 5 | - model: null 6 | 7 | hydra: 8 | run: 9 | dir: ${common_eval.results_path}/${dataset.gen_subset} 10 | sweep: 11 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${common_eval.results_path} 12 | subdir: ${dataset.gen_subset} 13 | common: 14 | user_dir: /private/home/abaevski/fairseq-py/examples/data2vec 15 | common_eval: 16 | results_path: null 17 | path: null 18 | post_process: letter 19 | quiet: true 20 | dataset: 21 | max_tokens: 3000000 22 | gen_subset: test 23 | distributed_training: 24 | distributed_world_size: 1 25 | decoding: 26 | beam: 5 27 | type: viterbi 28 | -------------------------------------------------------------------------------- /translatotron/preprocess/s2ut/run_mhubert.sh: -------------------------------------------------------------------------------- 1 | DATA_ROOT=$1 2 | 3 | ROOT=~/DASpeech 4 | KM_MODEL_PATH=$ROOT/translatotron/preprocess/s2ut/mhubert.km1000.layer11.pt 5 | CKPT_PATH=$ROOT/checkpoints/mhubert_base_vp_en_es_fr_it3.pt 6 | 7 | python $ROOT/translatotron/preprocess/s2ut/create_manifest.py --data-root $DATA_ROOT 8 | 9 | for split in train dev test 10 | do 11 | python $ROOT/translatotron/preprocess/s2ut/quantize_with_kmeans.py \ 12 | --feature_type hubert \ 13 | --kmeans_model_path $KM_MODEL_PATH \ 14 | --acoustic_model_path $CKPT_PATH \ 15 | --layer 11 \ 16 | --manifest_path $DATA_ROOT/$split.txt \ 17 | --out_quantized_file_path $DATA_ROOT/$split.km1000 18 | done -------------------------------------------------------------------------------- /fairseq/fairseq/data/encoders/space_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | from fairseq.data.encoders import register_tokenizer 9 | from fairseq.dataclass import FairseqDataclass 10 | 11 | 12 | @register_tokenizer("space", dataclass=FairseqDataclass) 13 | class SpaceTokenizer(object): 14 | def __init__(self, *unused): 15 | self.space_tok = re.compile(r"\s+") 16 | 17 | def encode(self, x: str) -> str: 18 | return self.space_tok.sub(" ", x) 19 | 20 | def decode(self, x: str) -> str: 21 | return x 22 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/lightconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 9 | 10 | 11 | setup( 12 | name="lightconv_layer", 13 | ext_modules=[ 14 | CUDAExtension( 15 | "lightconv_cuda", 16 | [ 17 | "lightconv_cuda.cpp", 18 | "lightconv_cuda_kernel.cu", 19 | ], 20 | ), 21 | ], 22 | cmdclass={"build_ext": BuildExtension}, 23 | ) 24 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/unfold.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn.functional as F 7 | 8 | 9 | def unfold1d(x, kernel_size: int, padding_l: int, pad_value: float = 0): 10 | """unfold T x B x C to T x B x C x K""" 11 | if kernel_size > 1: 12 | T, B, C = x.size() 13 | x = F.pad( 14 | x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value 15 | ) 16 | x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) 17 | else: 18 | x = x.unsqueeze(3) 19 | return x 20 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .image_pretraining import ImagePretrainingTask, ImagePretrainingConfig 7 | from .image_classification import ImageClassificationTask, ImageClassificationConfig 8 | from .mae_image_pretraining import MaeImagePretrainingTask, MaeImagePretrainingConfig 9 | 10 | 11 | __all__ = [ 12 | "ImageClassificationTask", 13 | "ImageClassificationConfig", 14 | "ImagePretrainingTask", 15 | "ImagePretrainingConfig", 16 | "MaeImagePretrainingTask", 17 | "MaeImagePretrainingConfig", 18 | ] -------------------------------------------------------------------------------- /fairseq/fairseq/data/sort_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class SortDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, sort_order): 13 | super().__init__(dataset) 14 | if not isinstance(sort_order, (list, tuple)): 15 | sort_order = [sort_order] 16 | self.sort_order = sort_order 17 | 18 | assert all(len(so) == len(dataset) for so in sort_order) 19 | 20 | def ordered_indices(self): 21 | return np.lexsort(self.sort_order) 22 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/dynamicconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 9 | 10 | 11 | setup( 12 | name="dynamicconv_layer", 13 | ext_modules=[ 14 | CUDAExtension( 15 | name="dynamicconv_cuda", 16 | sources=[ 17 | "dynamicconv_cuda.cpp", 18 | "dynamicconv_cuda_kernel.cu", 19 | ], 20 | ), 21 | ], 22 | cmdclass={"build_ext": BuildExtension}, 23 | ) 24 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/transpose_last.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | transpose last 2 dimensions of the input 7 | """ 8 | 9 | import torch.nn as nn 10 | 11 | 12 | class TransposeLast(nn.Module): 13 | def __init__(self, deconstruct_idx=None, tranpose_dim=-2): 14 | super().__init__() 15 | self.deconstruct_idx = deconstruct_idx 16 | self.tranpose_dim = tranpose_dim 17 | 18 | def forward(self, x): 19 | if self.deconstruct_idx is not None: 20 | x = x[self.deconstruct_idx] 21 | return x.transpose(self.tranpose_dim, -1) 22 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/youcookcap.yaml: -------------------------------------------------------------------------------- 1 | # finetuning for youcook captioning. 2 | includes: projects/task/ft.yaml 3 | dataset: 4 | meta_processor: YoucookNLGMetaProcessor 5 | train_path: data/youcook/train_list.txt 6 | val_path: data/youcook/val_list.txt 7 | trainval_annotation: data/youcook/youcookii_annotations_trainval.json 8 | video_processor: YoucookVideoProcessor 9 | vfeat_dir: data/feat/feat_youcook_s3d 10 | text_processor: NLGTextProcessor 11 | aligner: DSNLGAligner 12 | model: 13 | model_cls: MMFusionNLG 14 | mm_encoder_cls: MMBertForNLG 15 | loss: 16 | loss_cls: NLGLoss 17 | fairseq: 18 | dataset: 19 | batch_size: 128 20 | optimization: 21 | max_epoch: 10 22 | checkpoint: 23 | save_dir: runs/task/youcookcap 24 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/strip_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class StripTokenDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, id_to_strip): 11 | super().__init__(dataset) 12 | self.id_to_strip = id_to_strip 13 | 14 | def __getitem__(self, index): 15 | item = self.dataset[index] 16 | while len(item) > 0 and item[-1] == self.id_to_strip: 17 | item = item[:-1] 18 | while len(item) > 0 and item[0] == self.id_to_strip: 19 | item = item[1:] 20 | return item 21 | -------------------------------------------------------------------------------- /fairseq/fairseq/clib/libnat_cuda/edit_dist.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | 11 | #include 12 | 13 | torch::Tensor LevenshteinDistanceCuda( 14 | torch::Tensor source, 15 | torch::Tensor target, 16 | torch::Tensor source_length, 17 | torch::Tensor target_length); 18 | 19 | torch::Tensor GenerateDeletionLabelCuda( 20 | torch::Tensor source, 21 | torch::Tensor operations); 22 | 23 | std::pair GenerateInsertionLabelCuda( 24 | torch::Tensor source, 25 | torch::Tensor operations); 26 | -------------------------------------------------------------------------------- /fairseq/tests/speech/test_s2t_conformer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import unittest 7 | from tests.speech import TestFairseqSpeech 8 | 9 | 10 | class TestS2TConformer(TestFairseqSpeech): 11 | def setUp(self): 12 | self.set_up_librispeech() 13 | 14 | def test_librispeech_s2t_conformer_s_checkpoint(self): 15 | self.base_test( 16 | ckpt_name="librispeech_conformer_rel_pos_s.pt", 17 | reference_score=12, 18 | arg_overrides={"config_yaml": "cfg_librispeech.yaml"}, 19 | ) 20 | 21 | 22 | if __name__ == "__main__": 23 | unittest.main() 24 | -------------------------------------------------------------------------------- /fairseq/tests/speech/test_s2t_transformer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import unittest 7 | from tests.speech import TestFairseqSpeech 8 | 9 | 10 | class TestS2TTransformer(TestFairseqSpeech): 11 | def setUp(self): 12 | self.set_up_librispeech() 13 | 14 | def test_librispeech_s2t_transformer_s_checkpoint(self): 15 | self.base_test( 16 | ckpt_name="librispeech_transformer_s.pt", 17 | reference_score=9, 18 | arg_overrides={"config_yaml": "cfg_librispeech.yaml"}, 19 | ) 20 | 21 | 22 | if __name__ == "__main__": 23 | unittest.main() 24 | -------------------------------------------------------------------------------- /fairseq/examples/multilingual/data_scripts/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Install dependency 3 | ```bash 4 | pip install -r requirement.txt 5 | ``` 6 | 7 | # Download the data set 8 | ```bash 9 | export WORKDIR_ROOT= 10 | 11 | ``` 12 | The downloaded data will be at $WORKDIR_ROOT/ML50 13 | 14 | # preprocess the data 15 | Install SPM [here](https://github.com/google/sentencepiece) 16 | ```bash 17 | export WORKDIR_ROOT= 18 | export SPM_PATH= 19 | ``` 20 | * $WORKDIR_ROOT/ML50/raw: extracted raw data 21 | * $WORKDIR_ROOT/ML50/dedup: dedup data 22 | * $WORKDIR_ROOT/ML50/clean: data with valid and test sentences removed from the dedup data 23 | 24 | 25 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_didemo_zs.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test.yaml 2 | dataset: 3 | meta_processor: DiDeMoMetaProcessor 4 | test_path: data/didemo/test_data.json 5 | video_processor: VideoProcessor 6 | vfeat_dir: data/feat/feat_didemo_s3d 7 | text_processor: DiDeMoTextProcessor 8 | aligner: DiDeMoAligner 9 | num_iso_layer: 12 10 | model: 11 | model_cls: MMFusionSeparate 12 | mm_encoder_cls: 13 | video_encoder_cls: MMBertForEncoder 14 | text_encoder_cls: BertModel 15 | num_hidden_video_layers: 6 16 | eval: 17 | save_path: runs/task/didemo_zs/eval 18 | fairseq: 19 | # read code and find what is the checkpoint arg. 20 | common_eval: 21 | path: runs/task/checkpoint_best.pt 22 | metric: DiDeMoMetric 23 | predictor: DiDeMoPredictor 24 | -------------------------------------------------------------------------------- /fairseq/examples/adaptive_span/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | # automatically import any Python files in the current directory 10 | cur_dir = os.path.dirname(__file__) 11 | for file in os.listdir(cur_dir): 12 | path = os.path.join(cur_dir, file) 13 | if ( 14 | not file.startswith("_") 15 | and not file.startswith(".") 16 | and (file.endswith(".py") or os.path.isdir(path)) 17 | ): 18 | mod_name = file[: file.find(".py")] if file.endswith(".py") else file 19 | module = importlib.import_module(__name__ + "." + mod_name) 20 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/how2.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/default.yaml 2 | task_type: sweep_big 3 | slurm_config: big 4 | dataset: 5 | meta_processor: ShardedHow2MetaProcessor 6 | train_path: data/how2/how2_s3d_train.lst 7 | val_path: data/how2/how2_s3d_val.lst 8 | video_processor: ShardedVideoProcessor 9 | vfeat_dir: data/feat/feat_how2_s3d_shard_small 10 | text_processor: ShardedTextProcessor 11 | tfeat_dir: data/feat/feat_how2_s3d_shard_small/raw_caption_dedup.bert-base-uncased. 12 | aligner: FixedLenAligner 13 | # disable direct running of this yaml 14 | eval: 15 | save_path: runs/task 16 | fairseq: 17 | checkpoint: 18 | save_dir: runs/task 19 | save_interval_updates: 1024 20 | keep_interval_updates: 2 21 | keep_last_epochs: 30 22 | 23 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/processors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | from .processor import * 6 | 7 | from .how2processor import * 8 | from .how2retriprocessor import * 9 | 10 | from .dsprocessor import * 11 | 12 | try: 13 | from .rawvideoprocessor import * 14 | from .codecprocessor import * 15 | from .webvidprocessor import * 16 | from .expprocessor import * 17 | from .exphow2processor import * 18 | from .exphow2retriprocessor import * 19 | from .expcodecprocessor import * 20 | from .expfeatureencoder import * 21 | from .expdsprocessor import * 22 | except ImportError: 23 | pass 24 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/gslm/metrics/README.md: -------------------------------------------------------------------------------- 1 | # GSLM Metrics 2 | 3 | ## ASR Metrics 4 | The suite of metrics here uses an ASR model to transcribe the synthesized speech into text, and then uses text-based metrics. We also use word error rate from ASR transcription itself as one of the metrics. [More details](asr_metrics) 5 | 6 | ## ABX Metrics 7 | We use [ABX](https://www.semanticscholar.org/paper/ABX-Discriminability-Measures-and-Applications-Schatz/13d3537228f728c1063cc83743cb118bba3367a0) to evaluate how well-separated phonetic categories are with quantized representations. [More details](abx_metrics) 8 | 9 | ## sWUGGY and sBLIMP 10 | We refer to [ZeroSpeech challenge](https://www.zerospeech.com/2021/track_s.html#scoring-based-metrics) for details on the sWUGGY and sBLIMP metrics. 11 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r") as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="mmpt", 8 | version="0.0.1", 9 | author="Hu Xu, Po-yao Huang", 10 | author_email="huxu@fb.com", 11 | description="A package for multimodal pretraining.", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/pytorch/fairseq/examples/MMPT", 15 | packages=setuptools.find_packages(), 16 | install_requires=[ 17 | ], 18 | classifiers=[ 19 | "Programming Language :: Python :: 3", 20 | "License :: CC-BY-NC", 21 | "Operating System :: OS Independent", 22 | ], 23 | python_requires='>=3.6', 24 | ) 25 | -------------------------------------------------------------------------------- /fairseq/examples/roberta/config/finetuning/run_config/slurm_1g_aws.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: '_' 8 | item_sep: '/' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /fsx-wav2vec/${env:USER}/roberta_ft/${env:PREFIX}/${hydra.job.config_name}/${env:SUFFIX} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir}/submitit 17 | timeout_min: 1000 18 | cpus_per_task: 8 19 | gpus_per_node: 1 20 | tasks_per_node: 1 21 | mem_gb: 0 22 | nodes: 1 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | partition: learnfair,wav2vec 25 | max_num_timeout: 30 26 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/coin.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/ft.yaml 2 | task_type: sweep_big 3 | dataset: 4 | meta_processor: COINActionSegmentationMetaProcessor 5 | train_path: data/coin/COIN.json 6 | val_path: data/coin/COIN.json 7 | vfeat_dir: data/feat/feat_coin_s3d 8 | video_processor: VideoProcessor 9 | text_processor: COINActionSegmentationTextProcessor 10 | aligner: COINActionSegmentationAligner 11 | num_iso_layer: 12 12 | sliding_window: 8 13 | sliding_window_size: 32 14 | model: 15 | model_cls: MMFusionActionSegmentation 16 | mm_encoder_cls: MMBertForTokenClassification 17 | loss: 18 | loss_cls: CrossEntropy 19 | fairseq: 20 | dataset: 21 | batch_size: 1 22 | optimization: 23 | max_epoch: 8 24 | checkpoint: 25 | save_dir: runs/task/coin 26 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/vtt.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/ft.yaml 2 | dataset: 3 | meta_processor: MSRVTTMetaProcessor 4 | train_path: data/msrvtt/MSRVTT_train.csv 5 | jsfusion_path: data/msrvtt/MSRVTT_JSFUSION_test.csv 6 | full_test_path: data/msrvtt/MSRVTT_FULL_test.csv 7 | dup: 20 8 | val_path: data/msrvtt/MSRVTT_JSFUSION_test.csv 9 | vfeat_dir: data/feat/feat_vtt_s3d 10 | text_processor: MSRVTTTextProcessor 11 | json_path: data/msrvtt/MSRVTT_data.json 12 | aligner: DSAligner 13 | num_iso_layer: 12 14 | model: 15 | model_cls: MMFusionJoint 16 | mm_encoder_cls: MMBertForJoint 17 | loss: 18 | loss_cls: T2VContraLoss 19 | fairseq: 20 | dataset: 21 | batch_size: 256 22 | optimization: 23 | max_epoch: 10 24 | checkpoint: 25 | save_dir: runs/task/vtt 26 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/wav2vec/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import math 7 | import torch.nn.functional as F 8 | 9 | 10 | def pad_to_multiple(x, multiple, dim=-1, value=0): 11 | # Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41 12 | if x is None: 13 | return None, 0 14 | tsz = x.size(dim) 15 | m = tsz / multiple 16 | remainder = math.ceil(m) * multiple - tsz 17 | if m.is_integer(): 18 | return x, 0 19 | pad_offset = (0,) * (-1 - dim) * 2 20 | 21 | return F.pad(x, (*pad_offset, 0, remainder), value=value), remainder 22 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_youcookcap.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test.yaml 2 | dataset: 3 | meta_processor: YoucookNLGMetaProcessor 4 | test_path: data/youcook/val_list.txt 5 | trainval_annotation: data/youcook/youcookii_annotations_trainval.json 6 | video_processor: YoucookVideoProcessor 7 | vfeat_dir: data/feat/feat_youcook_s3d 8 | text_processor: NLGTextProcessor 9 | aligner: DSNLGAligner 10 | model: 11 | model_cls: MMFusionNLG 12 | mm_encoder_cls: MMBertForNLG 13 | max_decode_length: 24 14 | eval: 15 | save_path: runs/task/youcookcap/eval 16 | fairseq: 17 | # read code and find what is the checkpoint arg. 18 | common_eval: 19 | path: runs/task/youcookcap/checkpoint_best.pt 20 | metric: NLGMetric 21 | predictor: NLGPredictor 22 | gen_param: 23 | num_beams: 5 24 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/new/decoders/viterbi_decoder.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import torch 9 | 10 | from typing import List, Dict 11 | 12 | from .base_decoder import BaseDecoder 13 | 14 | 15 | class ViterbiDecoder(BaseDecoder): 16 | def decode( 17 | self, 18 | emissions: torch.FloatTensor, 19 | ) -> List[List[Dict[str, torch.LongTensor]]]: 20 | def get_pred(e): 21 | toks = e.argmax(dim=-1).unique_consecutive() 22 | return toks[toks != self.blank] 23 | 24 | return [[{"tokens": get_pred(x), "score": 0}] for x in emissions] 25 | -------------------------------------------------------------------------------- /test_scripts/convert_id.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tqdm 3 | import shutil 4 | import argparse 5 | 6 | 7 | from examples.speech_to_text.data_utils import load_df_from_tsv 8 | 9 | def process(args): 10 | df = load_df_from_tsv(args.input_tsv) 11 | data = list(df.T.to_dict().values()) 12 | for idx, item in tqdm.tqdm(enumerate(data)): 13 | old_path = os.path.join(args.audio_dir, f"{item['id']}_generated_e2e.wav") 14 | new_path = os.path.join(args.audio_dir, f"{idx}_pred.wav") 15 | shutil.copy(old_path, new_path) 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument("--input-tsv") 20 | parser.add_argument("--audio-dir") 21 | args = parser.parse_args() 22 | process(args) 23 | 24 | if __name__ == "__main__": 25 | main() 26 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_coin.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test.yaml 2 | dataset: 3 | split: test 4 | test_path: data/coin/COIN.json 5 | meta_processor: COINActionSegmentationMetaProcessor 6 | vfeat_dir: data/feat/feat_coin_s3d 7 | video_processor: VideoProcessor 8 | text_processor: COINActionSegmentationTextProcessor 9 | aligner: COINActionSegmentationAligner 10 | num_iso_layer: 12 11 | sliding_window: 16 12 | sliding_window_size: 32 13 | model: 14 | model_cls: MMFusionActionSegmentation 15 | mm_encoder_cls: MMBertForTokenClassification 16 | eval: 17 | save_path: runs/task/coin/eval 18 | fairseq: 19 | dataset: 20 | batch_size: 1 21 | common_eval: 22 | path: runs/task/coin/checkpoint_best.pt 23 | metric: COINActionSegmentationMetric 24 | predictor: COINPredictor 25 | -------------------------------------------------------------------------------- /fairseq/examples/roberta/fb_multilingual/README.multilingual.pretraining.md: -------------------------------------------------------------------------------- 1 | # Multilingual pretraining RoBERTa 2 | 3 | This tutorial will walk you through pretraining multilingual RoBERTa. 4 | 5 | ### 1) Preprocess the data 6 | 7 | ```bash 8 | DICTIONARY="/private/home/namangoyal/dataset/XLM/wiki/17/175k/vocab" 9 | DATA_LOCATION="/private/home/namangoyal/dataset/XLM/wiki/17/175k" 10 | 11 | for LANG in en es it 12 | do 13 | fairseq-preprocess \ 14 | --only-source \ 15 | --srcdict $DICTIONARY \ 16 | --trainpref "$DATA_LOCATION/train.$LANG" \ 17 | --validpref "$DATA_LOCATION/valid.$LANG" \ 18 | --testpref "$DATA_LOCATION/test.$LANG" \ 19 | --destdir "wiki_17-bin/$LANG" \ 20 | --workers 60; 21 | done 22 | ``` 23 | 24 | ### 2) Train RoBERTa base 25 | 26 | [COMING UP...] 27 | -------------------------------------------------------------------------------- /fairseq/examples/simultaneous_translation/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import os 8 | import importlib 9 | from fairseq import registry 10 | 11 | ( 12 | build_monotonic_attention, 13 | register_monotonic_attention, 14 | MONOTONIC_ATTENTION_REGISTRY, 15 | _, 16 | ) = registry.setup_registry("--simul-type") 17 | 18 | for file in sorted(os.listdir(os.path.dirname(__file__))): 19 | if file.endswith(".py") and not file.startswith("_"): 20 | model_name = file[: file.find(".py")] 21 | importlib.import_module( 22 | "examples.simultaneous_translation.modules." + model_name 23 | ) 24 | -------------------------------------------------------------------------------- /fairseq/fairseq/model_parallel/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the models/ directory 11 | models_dir = os.path.dirname(__file__) 12 | for file in os.listdir(models_dir): 13 | path = os.path.join(models_dir, file) 14 | if ( 15 | not file.startswith("_") 16 | and not file.startswith(".") 17 | and (file.endswith(".py") or os.path.isdir(path)) 18 | ): 19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file 20 | module = importlib.import_module("fairseq.model_parallel.models." + model_name) 21 | -------------------------------------------------------------------------------- /fairseq/examples/operators/alignment_train_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include "alignment_train_cuda.h" 10 | #include "utils.h" 11 | 12 | namespace { 13 | 14 | void alignmentTrainCUDA( 15 | const torch::Tensor& p_choose, 16 | torch::Tensor& alpha, 17 | float eps) { 18 | CHECK_INPUT(p_choose); 19 | CHECK_INPUT(alpha); 20 | 21 | alignmentTrainCUDAWrapper(p_choose, alpha, eps); 22 | } 23 | 24 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 25 | m.def( 26 | "alignment_train_cuda", 27 | &alignmentTrainCUDA, 28 | "expected_alignment_from_p_choose (CUDA)"); 29 | } 30 | 31 | } // namespace 32 | -------------------------------------------------------------------------------- /fairseq/fairseq/models/huggingface/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the models/huggingface/ directory 11 | models_dir = os.path.dirname(__file__) 12 | for file in os.listdir(models_dir): 13 | path = os.path.join(models_dir, file) 14 | if ( 15 | not file.startswith("_") 16 | and not file.startswith(".") 17 | and (file.endswith(".py") or os.path.isdir(path)) 18 | ): 19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file 20 | module = importlib.import_module("fairseq.models.huggingface." + model_name) 21 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/gelu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with 7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs 8 | """ 9 | 10 | import math 11 | 12 | import torch 13 | import torch.nn as nn 14 | 15 | 16 | def gelu_accurate(x): 17 | if not hasattr(gelu_accurate, "_a"): 18 | gelu_accurate._a = math.sqrt(2 / math.pi) 19 | return ( 20 | 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) 21 | ) 22 | 23 | 24 | def gelu(x: torch.Tensor) -> torch.Tensor: 25 | return torch.nn.functional.gelu(x.float()).type_as(x) 26 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/gslm/speech2unit/clustering/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from typing import List, Tuple 7 | 8 | 9 | def get_audio_files(manifest_path: str) -> Tuple[str, List[str], List[int]]: 10 | fnames, sizes = [], [] 11 | with open(manifest_path, "r") as f: 12 | root_dir = f.readline().strip() 13 | for line in f: 14 | items = line.strip().split("\t") 15 | assert ( 16 | len(items) == 2 17 | ), f"File must have two columns separated by tab. Got {line}" 18 | fnames.append(items[0]) 19 | sizes.append(int(items[1])) 20 | return root_dir, fnames, sizes 21 | -------------------------------------------------------------------------------- /fairseq/examples/m2m_100/tokenizers/README.md: -------------------------------------------------------------------------------- 1 | # M2M-100 Tokenization 2 | 3 | We apply different tokenization strategies for different languages following the existing literature. Here we provide tok.sh a tokenizer that can be used to reproduce our results. 4 | 5 | To reproduce the results, follow these steps: 6 | 7 | ``` 8 | tgt_lang=... 9 | reference_translation=... 10 | cat generation_output | grep -P "^H" | sort -V | cut -f 3- | sh tok.sh $tgt_lang > hyp 11 | cat $reference_translation |sh tok.sh $tgt_lang > ref 12 | sacrebleu -tok 'none' ref < hyp 13 | ``` 14 | 15 | ## Installation 16 | 17 | Tools needed for all the languages except Arabic can be installed by running install_dependencies.sh 18 | If you want to evaluate Arabic models, please follow the instructions provided here: http://alt.qcri.org/tools/arabic-normalizer/ to install 19 | -------------------------------------------------------------------------------- /fairseq/hydra_plugins/dependency_submitit_launcher/hydra_plugins/dependency_submitit_launcher/config.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from dataclasses import dataclass, field 3 | 4 | from hydra.core.config_store import ConfigStore 5 | 6 | from hydra_plugins.hydra_submitit_launcher.config import SlurmQueueConf 7 | 8 | 9 | @dataclass 10 | class DependencySubmititConf(SlurmQueueConf): 11 | """Slurm configuration overrides and specific parameters""" 12 | 13 | _target_: str = ( 14 | "hydra_plugins.dependency_submitit_launcher.launcher.DependencySubmititLauncher" 15 | ) 16 | 17 | 18 | ConfigStore.instance().store( 19 | group="hydra/launcher", 20 | name="dependency_submitit_slurm", 21 | node=DependencySubmititConf(), 22 | provider="dependency_submitit_slurm", 23 | ) 24 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py: -------------------------------------------------------------------------------- 1 | """ from https://github.com/keithito/tacotron """ 2 | 3 | ''' 4 | Defines the set of symbols used in text input to the model. 5 | 6 | The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' 7 | from . import cmudict 8 | 9 | _pad = '_' 10 | _punctuation = '!\'(),.:;? ' 11 | _special = '-' 12 | _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' 13 | 14 | # Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): 15 | _arpabet = ['@' + s for s in cmudict.valid_symbols] 16 | 17 | # Export all symbols: 18 | symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet 19 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/encoders/characters.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | SPACE = chr(32) 11 | SPACE_ESCAPE = chr(9601) 12 | 13 | 14 | @register_bpe("characters") 15 | class Characters(object): 16 | def __init__(self, *unused): 17 | pass 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | pass 22 | 23 | @staticmethod 24 | def encode(x: str) -> str: 25 | escaped = x.replace(SPACE, SPACE_ESCAPE) 26 | return SPACE.join(list(escaped)) 27 | 28 | @staticmethod 29 | def decode(x: str) -> str: 30 | return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) 31 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/mtm/vlm/test_vttqa.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: VideoProcessor 6 | aligner: MSRVTTQAAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: MSRVTTQAMetaProcessor 9 | test_path: data/msrvtt-qa/MSR_MC_test.csv 10 | vfeat_dir: data/feat/feat_vtt_s3d 11 | text_processor: MSRVTTQATextProcessor 12 | num_iso_layer: 12 13 | max_video_len: 32 14 | max_len: 96 15 | fairseq: 16 | dataset: 17 | batch_size: 256 18 | valid_subset: test 19 | num_workers: 2 20 | common_eval: 21 | path: runs/mtm/vlm/vttqa/checkpoint_last.pt 22 | model: 23 | model_cls: MMFusionJoint 24 | mm_encoder_cls: MMBertForJoint 25 | use_seg_emb: true 26 | eval: 27 | save_path: runs/mtm/vlm/vttqa/eval 28 | metric: QAMetric 29 | predictor: QAPredictor 30 | -------------------------------------------------------------------------------- /fairseq/examples/constrained_decoding/normalize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import sys 9 | 10 | from sacremoses.normalize import MosesPunctNormalizer 11 | 12 | 13 | def main(args): 14 | normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn) 15 | for line in sys.stdin: 16 | print(normalizer.normalize(line.rstrip()), flush=True) 17 | 18 | 19 | if __name__ == "__main__": 20 | import argparse 21 | 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument("--lang", "-l", default="en") 24 | parser.add_argument("--penn", "-p", action="store_true") 25 | args = parser.parse_args() 26 | 27 | main(args) 28 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/new/conf/hydra/sweeper/ax.yaml: -------------------------------------------------------------------------------- 1 | # @package hydra.sweeper 2 | _target_: hydra_plugins.hydra_ax_sweeper.ax_sweeper.AxSweeper 3 | max_batch_size: null 4 | ax_config: 5 | max_trials: 128 6 | early_stop: 7 | minimize: true 8 | max_epochs_without_improvement: 10 9 | epsilon: 0.025 10 | experiment: 11 | name: ${dataset.gen_subset} 12 | objective_name: wer 13 | minimize: true 14 | parameter_constraints: null 15 | outcome_constraints: null 16 | status_quo: null 17 | client: 18 | verbose_logging: false 19 | random_seed: null 20 | params: 21 | decoding.lmweight: 22 | type: range 23 | bounds: [0.0, 5.0] 24 | decoding.wordscore: 25 | type: range 26 | bounds: [-5.0, 5.0] 27 | decoding.silweight: 28 | type: range 29 | bounds: [ -8.0, 0.0 ] 30 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/mtm/vlm/test_vtt.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: VideoProcessor 6 | aligner: DSAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: MSRVTTMetaProcessor 9 | test_path: data/msrvtt/MSRVTT_JSFUSION_test.csv 10 | vfeat_dir: data/feat/feat_vtt_s3d 11 | text_processor: MSRVTTTextProcessor 12 | num_iso_layer: 12 13 | max_video_len: 32 14 | max_len: 96 15 | fairseq: 16 | dataset: 17 | batch_size: 256 18 | valid_subset: test 19 | num_workers: 2 20 | common_eval: 21 | path: runs/mtm/vlm/vtt/checkpoint_last.pt 22 | model: 23 | model_cls: MMFusionJoint 24 | mm_encoder_cls: MMBertForJoint 25 | use_seg_emb: true 26 | eval: 27 | save_path: runs/mtm/vlm/vtt/eval 28 | metric: RetrievalMetric 29 | predictor: RetrievalPredictor 30 | -------------------------------------------------------------------------------- /fairseq/examples/roberta/config/pretraining/base.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | common: 3 | fp16: true 4 | log_format: json 5 | log_interval: 200 6 | 7 | checkpoint: 8 | no_epoch_checkpoints: true 9 | 10 | task: 11 | _name: masked_lm 12 | data: ??? 13 | sample_break_mode: complete 14 | tokens_per_sample: 512 15 | 16 | criterion: masked_lm 17 | 18 | dataset: 19 | batch_size: 16 20 | ignore_unused_valid_subsets: true 21 | 22 | optimizer: 23 | _name: adam 24 | weight_decay: 0.01 25 | adam_betas: (0.9,0.98) 26 | adam_eps: 1e-06 27 | 28 | lr_scheduler: 29 | _name: polynomial_decay 30 | warmup_updates: 10000 31 | 32 | optimization: 33 | clip_norm: 0 34 | lr: [0.0005] 35 | max_update: 125000 36 | update_freq: [16] 37 | 38 | model: 39 | _name: roberta 40 | max_positions: 512 41 | dropout: 0.1 42 | attention_dropout: 0.1 43 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/new/conf/hydra/sweeper/ax_sil.yaml: -------------------------------------------------------------------------------- 1 | # @package hydra.sweeper 2 | _target_: hydra_plugins.hydra_ax_sweeper.ax_sweeper.AxSweeper 3 | max_batch_size: null 4 | ax_config: 5 | max_trials: 64 6 | early_stop: 7 | minimize: true 8 | max_epochs_without_improvement: 10 9 | epsilon: 0.025 10 | experiment: 11 | name: ${dataset.gen_subset} 12 | objective_name: wer 13 | minimize: true 14 | parameter_constraints: null 15 | outcome_constraints: null 16 | status_quo: null 17 | client: 18 | verbose_logging: false 19 | random_seed: null 20 | params: 21 | decoding.lmweight: 22 | type: range 23 | bounds: [0.0, 10.0] 24 | decoding.wordscore: 25 | type: range 26 | bounds: [-10.0, 10.0] 27 | decoding.silweight: 28 | type: range 29 | bounds: [ -10.0, 0.0 ] 30 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/config/finetuning/run_config/slurm_1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir} 17 | timeout_min: 4320 18 | cpus_per_task: 10 19 | gpus_per_node: 8 20 | tasks_per_node: 8 21 | mem_gb: 450 22 | nodes: 1 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | partition: devlab,learnlab,learnfair,scavenge 25 | constraint: volta32gb 26 | max_num_timeout: 30 -------------------------------------------------------------------------------- /fairseq/examples/m2m_100/tokenizers/tokenize_indic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | # Use: echo {text} | python tokenize_indic.py {language} 8 | 9 | import sys 10 | 11 | from indicnlp.normalize.indic_normalize import IndicNormalizerFactory 12 | from indicnlp.tokenize.indic_tokenize import trivial_tokenize 13 | 14 | 15 | factory = IndicNormalizerFactory() 16 | normalizer = factory.get_normalizer( 17 | sys.argv[1], remove_nuktas=False, nasals_mode="do_nothing" 18 | ) 19 | 20 | for line in sys.stdin: 21 | normalized_line = normalizer.normalize(line.strip()) 22 | tokenized_line = " ".join(trivial_tokenize(normalized_line, sys.argv[1])) 23 | print(tokenized_line) 24 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/config/finetuning/run_config/slurm_2g.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir} 17 | timeout_min: 4320 18 | cpus_per_task: 10 19 | gpus_per_node: 2 20 | tasks_per_node: 2 21 | mem_gb: 200 22 | nodes: 1 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | partition: devlab,learnlab,learnfair,scavenge 25 | constraint: volta32gb 26 | max_num_timeout: 30 27 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/config/finetuning/run_config/slurm_4g.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir} 17 | timeout_min: 4320 18 | cpus_per_task: 10 19 | gpus_per_node: 4 20 | tasks_per_node: 4 21 | mem_gb: 200 22 | nodes: 1 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | partition: devlab,learnlab,learnfair,scavenge 25 | constraint: volta32gb 26 | max_num_timeout: 30 27 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/config/finetuning/run_config/slurm_8.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir} 17 | timeout_min: 4320 18 | cpus_per_task: 10 19 | gpus_per_node: 8 20 | tasks_per_node: 8 21 | mem_gb: 400 22 | nodes: 8 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | partition: devlab,learnlab,learnfair,scavenge 25 | constraint: volta32gb 26 | max_num_timeout: 30 27 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -u 4 | 5 | val_sets="dev_other" 6 | graph_name=graph 7 | decode_suffix="" 8 | decode_script="steps/decode_fmllr.sh" 9 | decode_args="" 10 | nj=60 11 | 12 | . ./cmd.sh 13 | . ./path.sh 14 | . parse_options.sh 15 | 16 | set -x 17 | exp_dir=$1 18 | data_root=$2 19 | lang_test=$3 20 | 21 | graph=$exp_dir/$graph_name 22 | 23 | if [ ! -d $graph ]; then 24 | utils/mkgraph.sh $lang_test $exp_dir $graph 25 | fi 26 | 27 | for part in $val_sets; do 28 | dec_dir=$exp_dir/decode${decode_suffix}_${part} 29 | if [ ! -d $dec_dir ]; then 30 | echo "decoding $part for $exp_dir" 31 | $decode_script --nj $nj --cmd "$decode_cmd" $decode_args \ 32 | $graph $data_root/$part $dec_dir & 33 | else 34 | echo "$dec_dir exists. skip" 35 | fi 36 | done 37 | 38 | wait 39 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/fp32_group_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | Layer norm done in fp32 (for fp16 training) 7 | """ 8 | 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | 12 | 13 | class Fp32GroupNorm(nn.GroupNorm): 14 | def __init__(self, *args, **kwargs): 15 | super().__init__(*args, **kwargs) 16 | 17 | def forward(self, input): 18 | output = F.group_norm( 19 | input.float(), 20 | self.num_groups, 21 | self.weight.float() if self.weight is not None else None, 22 | self.bias.float() if self.bias is not None else None, 23 | self.eps, 24 | ) 25 | return output.type_as(input) 26 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/youcook.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/ft.yaml 2 | dataset: 3 | meta_processor: YoucookMetaProcessor 4 | train_path: data/youcook/youcook_train.pkl 5 | val_path: data/youcook/youcook_val.pkl 6 | trainval_annotation: data/youcook/youcookii_annotations_trainval.json 7 | use_annotation_text: True 8 | video_processor: YoucookVideoProcessor 9 | vfeat_dir: data/feat/feat_youcook_s3d # /checkpoint/huxu/feat/youcook_vmz # /checkpoint/prarora/berniehuang/feat_youcook_vmz 10 | text_processor: TextProcessor 11 | aligner: DSAligner 12 | num_iso_layer: 12 13 | model: 14 | model_cls: MMFusionJoint 15 | mm_encoder_cls: MMBertForJoint 16 | loss: 17 | loss_cls: T2VContraLoss 18 | fairseq: 19 | dataset: 20 | batch_size: 128 21 | optimization: 22 | max_epoch: 10 23 | checkpoint: 24 | save_dir: runs/task/youcook 25 | 26 | -------------------------------------------------------------------------------- /translatotron/preprocess/s2ut/create_manifest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tqdm 3 | import argparse 4 | import soundfile as sf 5 | 6 | 7 | def main(): 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument("--data-root") 10 | args = parser.parse_args() 11 | for split in ["train", "dev", "test"]: 12 | fin = open(os.path.join(args.data_root, f"{split}.tsv")) 13 | fout = open(os.path.join(args.data_root, f"{split}.txt"), "w") 14 | data = fin.read().splitlines() 15 | fout.write(args.data_root + split + "\n") 16 | for line in data: 17 | src_audio, _ = line.split("\t") 18 | src_audio = src_audio + ".wav" 19 | n_frames = sf.info(os.path.join(args.data_root, split, src_audio)).frames 20 | fout.write(f"{src_audio}\t{n_frames}\n") 21 | 22 | 23 | if __name__ == "__main__": 24 | main() -------------------------------------------------------------------------------- /fairseq/fairseq/config/fb_run_config/slurm.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - fb_run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | launcher: 15 | cpus_per_task: 60 16 | gpus_per_node: ??? 17 | tasks_per_node: 1 18 | nodes: 1 19 | partition: learnfair 20 | mem_gb: 400 21 | timeout_min: 4320 22 | max_num_timeout: 10 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | submitit_folder: ${hydra.sweep.dir} 25 | 26 | distributed_training: 27 | ddp_backend: c10d 28 | distributed_world_size: ??? 29 | distributed_port: ??? 30 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/task/test_youcook.yaml: -------------------------------------------------------------------------------- 1 | includes: projects/task/test.yaml 2 | dataset: 3 | meta_processor: YoucookMetaProcessor 4 | test_path: data/youcook/youcook_val.pkl 5 | trainval_annotation: data/youcook/youcookii_annotations_trainval.json 6 | use_annotation_text: True 7 | video_processor: YoucookVideoProcessor 8 | vfeat_dir: data/feat/feat_youcook_s3d # /checkpoint/huxu/feat/youcook_vmz # /checkpoint/prarora/berniehuang/feat_youcook_vmz 9 | text_processor: TextProcessor 10 | aligner: DSAligner 11 | num_iso_layer: 12 12 | model: 13 | model_cls: MMFusionJoint 14 | mm_encoder_cls: MMBertForJoint 15 | eval: 16 | save_path: runs/task/youcook/eval 17 | fairseq: 18 | # read code and find what is the checkpoint arg. 19 | common_eval: 20 | path: runs/task/youcook/checkpoint_last.pt 21 | metric: RetrievalMetric 22 | predictor: RetrievalPredictor 23 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/text/finetune_sst2_qnli_sweep_fair_nodep.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env zsh 2 | 3 | dir="$1" 4 | cp="$dir/checkpoints/checkpoint_last.pt" 5 | 6 | echo "dir: $dir" 7 | 8 | declare -A tasks 9 | tasks[qnli]="/private/home/jgu/data/GLUE/QNLI-bin" 10 | tasks[sst_2]="/private/home/jgu/data/GLUE/SST-2-bin" 11 | 12 | lrs="5e-6 1e-5 2e-5 5e-5 1e-4 2e-4 5e-4 1e-3" 13 | 14 | for task data_path in ${(kv)tasks}; do 15 | for lr in $(echo "$lrs"); do 16 | PYTHONPATH=. PREFIX="${PREFIX}" SUFFIX="" nohup python fairseq_cli/hydra_train.py -m --config-dir examples/roberta/config/finetuning \ 17 | --config-name $task hydra/launcher=submitit_slurm +run_config=slurm_1g task.data="$data_path" hydra.launcher.name=finetune_${task}_${PREFIX} \ 18 | checkpoint.restore_file="$cp" hydra.sweep.dir="$dir/finetune_sweep/$task/lr_$lr" "optimization.lr=[${lr}]" & 19 | done 20 | done 21 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/encoders/nltk_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_tokenizer 7 | from fairseq.dataclass import FairseqDataclass 8 | 9 | 10 | @register_tokenizer("nltk", dataclass=FairseqDataclass) 11 | class NLTKTokenizer(object): 12 | def __init__(self, *unused): 13 | try: 14 | from nltk.tokenize import word_tokenize 15 | 16 | self.word_tokenize = word_tokenize 17 | except ImportError: 18 | raise ImportError("Please install nltk with: pip install nltk") 19 | 20 | def encode(self, x: str) -> str: 21 | return " ".join(self.word_tokenize(x)) 22 | 23 | def decode(self, x: str) -> str: 24 | return x 25 | -------------------------------------------------------------------------------- /fairseq/.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🚀 Feature Request 3 | about: Submit a proposal/request for a new feature 4 | labels: 'enhancement, help wanted, needs triage' 5 | --- 6 | 7 | ## 🚀 Feature Request 8 | 9 | 10 | ### Motivation 11 | 12 | 13 | 14 | ### Pitch 15 | 16 | 17 | 18 | ### Alternatives 19 | 20 | 21 | 22 | ### Additional context 23 | 24 | 25 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/text/finetune_all_fair_nodep.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env zsh 2 | 3 | dir="$1" 4 | cp="$dir/checkpoints/checkpoint_last.pt" 5 | 6 | echo "dir: $dir" 7 | 8 | declare -A tasks 9 | tasks[cola]="/private/home/jgu/data/GLUE/CoLA-bin" 10 | tasks[qnli]="/private/home/jgu/data/GLUE/QNLI-bin" 11 | tasks[mrpc]="/private/home/jgu/data/GLUE/MRPC-bin" 12 | tasks[rte]="/private/home/jgu/data/GLUE/RTE-bin" 13 | tasks[sst_2]="/private/home/jgu/data/GLUE/SST-2-bin" 14 | 15 | for task data_path in ${(kv)tasks}; do 16 | PYTHONPATH=. PREFIX="${PREFIX}" SUFFIX="" nohup python fairseq_cli/hydra_train.py -m --config-dir examples/roberta/config/finetuning \ 17 | --config-name $task hydra/launcher=submitit_slurm +run_config=slurm_1g task.data="$data_path" hydra.launcher.name=finetune_${task}_${PREFIX} \ 18 | checkpoint.restore_file="$cp" hydra.sweep.dir="$dir/finetune/$task" & 19 | done 20 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/config/finetuning/run_config/slurm_1_old.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir} 17 | timeout_min: 4320 18 | cpus_per_task: 80 19 | gpus_per_node: 8 20 | tasks_per_node: 1 21 | mem_gb: 450 22 | nodes: 1 23 | name: ${env:PREFIX}_wav2vec3_small_librispeech 24 | partition: devlab,learnlab,learnfair,scavenge 25 | constraint: volta32gb 26 | max_num_timeout: 30 27 | exclude: learnfair1381 -------------------------------------------------------------------------------- /fairseq/fairseq/data/list_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ListDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, sizes=None): 11 | super().__init__(dataset) 12 | self._sizes = sizes 13 | 14 | def __iter__(self): 15 | for x in self.dataset: 16 | yield x 17 | 18 | def collater(self, samples): 19 | return samples 20 | 21 | @property 22 | def sizes(self): 23 | return self._sizes 24 | 25 | def num_tokens(self, index): 26 | return self.sizes[index] 27 | 28 | def size(self, index): 29 | return self.sizes[index] 30 | 31 | def set_epoch(self, epoch): 32 | pass 33 | -------------------------------------------------------------------------------- /fairseq/examples/roberta/config/finetuning/run_config/slurm_1g.yaml: -------------------------------------------------------------------------------- 1 | 2 | # @package _global_ 3 | 4 | hydra: 5 | job: 6 | config: 7 | override_dirname: 8 | kv_sep: '_' 9 | item_sep: '/' 10 | exclude_keys: 11 | - run_config 12 | - distributed_training.distributed_port 13 | sweep: 14 | dir: /checkpoint/${env:USER}/roberta_ft/${env:PREFIX}/${hydra.job.config_name}/${env:SUFFIX} 15 | subdir: ${hydra.job.num} 16 | launcher: 17 | submitit_folder: ${hydra.sweep.dir}/submitit 18 | timeout_min: 1000 19 | cpus_per_task: 8 20 | gpus_per_node: 1 21 | tasks_per_node: 1 22 | mem_gb: 60 23 | nodes: 1 24 | name: ${env:PREFIX}_${hydra.job.config_name} 25 | partition: devlab,learnlab,learnfair,scavenge 26 | constraint: volta32gb 27 | max_num_timeout: 30 28 | exclude: learnfair1381,learnfair5192,learnfair2304 29 | -------------------------------------------------------------------------------- /fairseq/examples/linformer/README.md: -------------------------------------------------------------------------------- 1 | # Linformer: Self-Attention with Linear Complexity (Wang et al., 2020) 2 | 3 | This example contains code to train Linformer models as described in our paper 4 | [Linformer: Self-Attention with Linear Complexity](https://arxiv.org/abs/2006.04768). 5 | 6 | ## Training a new Linformer RoBERTa model 7 | 8 | You can mostly follow the [RoBERTa pretraining README](/examples/roberta/README.pretraining.md), 9 | updating your training command with `--user-dir examples/linformer/linformer_src --arch linformer_roberta_base`. 10 | 11 | ## Citation 12 | 13 | If you use our work, please cite: 14 | 15 | ```bibtex 16 | @article{wang2020linformer, 17 | title={Linformer: Self-Attention with Linear Complexity}, 18 | author={Wang, Sinong and Li, Belinda and Khabsa, Madian and Fang, Han and Ma, Hao}, 19 | journal={arXiv preprint arXiv:2006.04768}, 20 | year={2020} 21 | } 22 | ``` 23 | -------------------------------------------------------------------------------- /fairseq/docs/criterions.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | .. _Criterions: 5 | 6 | Criterions 7 | ========== 8 | 9 | Criterions compute the loss function given the model and batch, roughly:: 10 | 11 | loss = criterion(model, batch) 12 | 13 | .. automodule:: fairseq.criterions 14 | :members: 15 | 16 | .. autoclass:: fairseq.criterions.FairseqCriterion 17 | :members: 18 | :undoc-members: 19 | 20 | .. autoclass:: fairseq.criterions.adaptive_loss.AdaptiveLoss 21 | :members: 22 | :undoc-members: 23 | .. autoclass:: fairseq.criterions.composite_loss.CompositeLoss 24 | :members: 25 | :undoc-members: 26 | .. autoclass:: fairseq.criterions.cross_entropy.CrossEntropyCriterion 27 | :members: 28 | :undoc-members: 29 | .. autoclass:: fairseq.criterions.label_smoothed_cross_entropy.LabelSmoothedCrossEntropyCriterion 30 | :members: 31 | :undoc-members: 32 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/new/conf/run_config/fb_slurm_2g.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '/' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | - common_eval.path 13 | sweep: 14 | dir: /checkpoint/abaevski/asr/d2v2/decoding/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 15 | # subdir: ${hydra.job.override_dirname} 16 | launcher: 17 | cpus_per_task: 16 18 | gpus_per_node: 2 19 | tasks_per_node: 2 20 | nodes: 1 21 | partition: devlab,learnlab 22 | mem_gb: 100 23 | timeout_min: 2000 24 | max_num_timeout: 10 25 | name: ${env:PREFIX}_${hydra.job.config_name} 26 | submitit_folder: ${hydra.sweep.dir}/%j 27 | constraint: volta32gb -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | langdir="" 4 | lmdir="" 5 | 6 | . ./cmd.sh 7 | . ./path.sh 8 | . parse_options.sh 9 | 10 | arpa_lm=$1 11 | data=$2 12 | 13 | if [ -z $langdir ]; then 14 | langdir=$data/lang 15 | fi 16 | if [ -z $lmdir ]; then 17 | lmdir=$data/lang_test 18 | fi 19 | 20 | if [ ! -d $langdir ]; then 21 | echo "$langdir not found. run local/prepare_lang.sh first" && exit 1 22 | fi 23 | 24 | mkdir -p $lmdir 25 | cp -r $langdir/* $lmdir 26 | 27 | if [[ "$arpa_lm" == *.gz ]]; then 28 | gunzip -c $arpa_lm | arpa2fst --disambig-symbol=#0 --read-symbol-table=$lmdir/words.txt - $lmdir/G.fst 29 | else 30 | arpa2fst --disambig-symbol=#0 --read-symbol-table=$lmdir/words.txt $arpa_lm $lmdir/G.fst 31 | fi 32 | fstisstochastic $lmdir/G.fst 33 | utils/validate_lang.pl $lmdir || exit 1 34 | 35 | echo "done preparing lm ($lmdir)" 36 | -------------------------------------------------------------------------------- /fairseq/fairseq/distributed/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .distributed_timeout_wrapper import DistributedTimeoutWrapper 7 | from .fully_sharded_data_parallel import ( 8 | fsdp_enable_wrap, 9 | fsdp_wrap, 10 | FullyShardedDataParallel, 11 | ) 12 | from .legacy_distributed_data_parallel import LegacyDistributedDataParallel 13 | from .module_proxy_wrapper import ModuleProxyWrapper 14 | from .tpu_distributed_data_parallel import TPUDistributedDataParallel 15 | 16 | 17 | __all__ = [ 18 | "DistributedTimeoutWrapper", 19 | "fsdp_enable_wrap", 20 | "fsdp_wrap", 21 | "FullyShardedDataParallel", 22 | "LegacyDistributedDataParallel", 23 | "ModuleProxyWrapper", 24 | "TPUDistributedDataParallel", 25 | ] 26 | -------------------------------------------------------------------------------- /fairseq/examples/m2m_100/tokenizers/tokenizer_ar.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | # 7 | # Please follow the instructions here http://alt.qcri.org/tools/arabic-normalizer/ 8 | # to install tools needed for Arabic 9 | 10 | echo "Please install Arabic tools: http://alt.qcri.org/tools/arabic-normalizer/" 11 | echo "Then update environment variables in tokenizer_ar.sh" 12 | exit 1 13 | 14 | SVMTOOL=... 15 | GOMOSESGO=... 16 | QCRI_ARABIC_NORMALIZER=... 17 | 18 | export PERL5LIB="$SVMTOOL/lib":"$GOMOSESGO/bin/MADA-3.2":$PERL5LIB 19 | 20 | 21 | tempfile=$(mktemp) 22 | cat - > $tempfile 23 | 24 | cd $QCRI_ARABIC_NORMALIZER 25 | 26 | bash qcri_normalizer_mada3.2_aramorph1.2.1.sh $tempfile 27 | cat $tempfile.mada_norm-aramorph.europarl_tok 28 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/config/finetuning/run_config/slurm_16.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir} 17 | timeout_min: 4320 18 | cpus_per_task: 80 19 | gpus_per_node: 8 20 | tasks_per_node: 1 21 | mem_gb: 450 22 | nodes: 16 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | partition: learnlab,learnfair,scavenge 25 | constraint: volta32gb 26 | max_num_timeout: 30 27 | exclude: learnfair1381,learnfair5192,learnfair2304 -------------------------------------------------------------------------------- /fairseq/fairseq/data/encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | 12 | 13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry( 14 | "--tokenizer", 15 | default=None, 16 | ) 17 | 18 | 19 | build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry( 20 | "--bpe", 21 | default=None, 22 | ) 23 | 24 | 25 | # automatically import any Python files in the encoders/ directory 26 | for file in sorted(os.listdir(os.path.dirname(__file__))): 27 | if file.endswith(".py") and not file.startswith("_"): 28 | module = file[: file.find(".py")] 29 | importlib.import_module("fairseq.data.encoders." + module) 30 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/config/finetuning/run_config/slurm_2.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir} 17 | timeout_min: 4320 18 | cpus_per_task: 10 19 | gpus_per_node: 8 20 | tasks_per_node: 8 21 | mem_gb: 450 22 | nodes: 2 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | partition: devlab,learnlab,learnfair,scavenge 25 | constraint: volta32gb 26 | max_num_timeout: 30 27 | exclude: learnfair7491,learnfair7477,learnfair7487 -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/config/finetuning/run_config/slurm_3.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '__' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | sweep: 13 | dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 14 | subdir: ${hydra.job.num} 15 | launcher: 16 | submitit_folder: ${hydra.sweep.dir} 17 | timeout_min: 4320 18 | cpus_per_task: 10 19 | gpus_per_node: 8 20 | tasks_per_node: 8 21 | mem_gb: 450 22 | nodes: 3 23 | name: ${env:PREFIX}_${hydra.job.config_name} 24 | partition: devlab,learnlab,learnfair,scavenge 25 | constraint: volta32gb 26 | max_num_timeout: 30 27 | exclude: learnfair7491,learnfair7477,learnfair7487 -------------------------------------------------------------------------------- /fairseq/.github/ISSUE_TEMPLATE/how-to-question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ❓ Questions/Help 3 | about: If you have questions, please first search existing issues and docs 4 | labels: 'question, needs triage' 5 | --- 6 | 7 | ## ❓ Questions and Help 8 | 9 | ### Before asking: 10 | 1. search the issues. 11 | 2. search the docs. 12 | 13 | 14 | 15 | #### What is your question? 16 | 17 | #### Code 18 | 19 | 20 | 21 | #### What have you tried? 22 | 23 | #### What's your environment? 24 | 25 | - fairseq Version (e.g., 1.0 or main): 26 | - PyTorch Version (e.g., 1.0) 27 | - OS (e.g., Linux): 28 | - How you installed fairseq (`pip`, source): 29 | - Build command you used (if compiling from source): 30 | - Python version: 31 | - CUDA/cuDNN version: 32 | - GPU models and configuration: 33 | - Any other relevant information: 34 | -------------------------------------------------------------------------------- /fairseq/tests/gpu/transformer_quantization_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | # This file defines example configuration arguments for quantizing 7 | # a transformer model with product quantization 8 | 9 | n_centroids: 10 | Linear: 11 | key: in_features 12 | value: {"*": 8} 13 | Embedding: 14 | key: embedding_dim 15 | value: {"*": 8} 16 | 17 | block_sizes: 18 | Linear: 19 | key: fuzzy_name 20 | value: {fc: 8, attn: 4, emb: 4} 21 | Embedding: 22 | key: fuzzy_name 23 | value: {emb: 8} 24 | 25 | layers_to_quantize: 26 | - decoder\\.layers\\.\d+\\.fc[12] 27 | - decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01] 28 | - decoder\\.layers\\.\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj) 29 | -------------------------------------------------------------------------------- /hifi-gan/config_v3.json: -------------------------------------------------------------------------------- 1 | { 2 | "resblock": "2", 3 | "num_gpus": 0, 4 | "batch_size": 16, 5 | "learning_rate": 0.0002, 6 | "adam_b1": 0.8, 7 | "adam_b2": 0.99, 8 | "lr_decay": 0.999, 9 | "seed": 1234, 10 | 11 | "upsample_rates": [8,8,4], 12 | "upsample_kernel_sizes": [16,16,8], 13 | "upsample_initial_channel": 256, 14 | "resblock_kernel_sizes": [3,5,7], 15 | "resblock_dilation_sizes": [[1,2], [2,6], [3,12]], 16 | 17 | "segment_size": 8192, 18 | "num_mels": 80, 19 | "num_freq": 1025, 20 | "n_fft": 1024, 21 | "hop_size": 256, 22 | "win_size": 1024, 23 | 24 | "sampling_rate": 22050, 25 | 26 | "fmin": 0, 27 | "fmax": 8000, 28 | "fmax_for_loss": null, 29 | 30 | "num_workers": 4, 31 | 32 | "dist_config": { 33 | "dist_backend": "nccl", 34 | "dist_url": "tcp://localhost:54321", 35 | "world_size": 1 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/gslm/unit2speech/multiproc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import torch 4 | import sys 5 | import subprocess 6 | 7 | argslist = list(sys.argv)[1:] 8 | log_dir = argslist[-1] 9 | num_gpus = torch.cuda.device_count() 10 | argslist.append('--n_gpus={}'.format(num_gpus)) 11 | workers = [] 12 | job_id = time.strftime("%Y_%m_%d-%H%M%S") 13 | argslist.append("--group_name=group_{}".format(job_id)) 14 | 15 | print("GPU log directory is {}".format(log_dir)) 16 | os.makedirs(log_dir, exist_ok=True) 17 | for i in range(num_gpus): 18 | argslist.append('--rank={}'.format(i)) 19 | stdout = None if i == 0 else open("{}/{}_GPU_{}.log".format(log_dir, job_id, i), 20 | "w") 21 | print(argslist) 22 | p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout) 23 | workers.append(p) 24 | argslist = argslist[:-1] 25 | 26 | for p in workers: 27 | p.wait() 28 | -------------------------------------------------------------------------------- /fairseq/examples/speech_recognition/new/conf/run_config/fb_slurm_1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | hydra: 4 | job: 5 | config: 6 | override_dirname: 7 | kv_sep: ':' 8 | item_sep: '/' 9 | exclude_keys: 10 | - run_config 11 | - distributed_training.distributed_port 12 | - common_eval.path 13 | sweep: 14 | dir: /checkpoint/abaevski/asr/d2v2/decoding/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname} 15 | # subdir: ${hydra.job.override_dirname} 16 | launcher: 17 | cpus_per_task: 16 18 | gpus_per_node: 1 19 | tasks_per_node: 1 20 | nodes: 1 21 | partition: devlab,learnlab 22 | mem_gb: 100 23 | timeout_min: 2000 24 | max_num_timeout: 10 25 | name: ${env:PREFIX}_${hydra.job.config_name} 26 | submitit_folder: ${hydra.sweep.dir}/%j 27 | constraint: volta32gb 28 | exclude: learnfair7598 -------------------------------------------------------------------------------- /fairseq/tests/test_hf_hub.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import unittest 8 | 9 | import torch 10 | 11 | try: 12 | import huggingface_hub 13 | except ImportError: 14 | huggingface_hub = None 15 | 16 | from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub 17 | 18 | 19 | @unittest.skipIf(not huggingface_hub, "Requires huggingface_hub install") 20 | class TestHuggingFaceHub(unittest.TestCase): 21 | @torch.no_grad() 22 | def test_hf_fastspeech2(self): 23 | hf_model_id = "facebook/fastspeech2-en-ljspeech" 24 | models, cfg, task = load_model_ensemble_and_task_from_hf_hub(hf_model_id) 25 | self.assertTrue(len(models) > 0) 26 | 27 | 28 | if __name__ == "__main__": 29 | unittest.main() 30 | -------------------------------------------------------------------------------- /hifi-gan/config_v1.json: -------------------------------------------------------------------------------- 1 | { 2 | "resblock": "1", 3 | "num_gpus": 0, 4 | "batch_size": 16, 5 | "learning_rate": 0.0002, 6 | "adam_b1": 0.8, 7 | "adam_b2": 0.99, 8 | "lr_decay": 0.999, 9 | "seed": 1234, 10 | 11 | "upsample_rates": [8,8,2,2], 12 | "upsample_kernel_sizes": [16,16,4,4], 13 | "upsample_initial_channel": 512, 14 | "resblock_kernel_sizes": [3,7,11], 15 | "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], 16 | 17 | "segment_size": 8192, 18 | "num_mels": 80, 19 | "num_freq": 1025, 20 | "n_fft": 1024, 21 | "hop_size": 256, 22 | "win_size": 1024, 23 | 24 | "sampling_rate": 22050, 25 | 26 | "fmin": 0, 27 | "fmax": 8000, 28 | "fmax_for_loss": null, 29 | 30 | "num_workers": 4, 31 | 32 | "dist_config": { 33 | "dist_backend": "nccl", 34 | "dist_url": "tcp://localhost:54321", 35 | "world_size": 1 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /hifi-gan/config_v2.json: -------------------------------------------------------------------------------- 1 | { 2 | "resblock": "1", 3 | "num_gpus": 0, 4 | "batch_size": 16, 5 | "learning_rate": 0.0002, 6 | "adam_b1": 0.8, 7 | "adam_b2": 0.99, 8 | "lr_decay": 0.999, 9 | "seed": 1234, 10 | 11 | "upsample_rates": [8,8,2,2], 12 | "upsample_kernel_sizes": [16,16,4,4], 13 | "upsample_initial_channel": 128, 14 | "resblock_kernel_sizes": [3,7,11], 15 | "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], 16 | 17 | "segment_size": 8192, 18 | "num_mels": 80, 19 | "num_freq": 1025, 20 | "n_fft": 1024, 21 | "hop_size": 256, 22 | "win_size": 1024, 23 | 24 | "sampling_rate": 22050, 25 | 26 | "fmin": 0, 27 | "fmax": 8000, 28 | "fmax_for_loss": null, 29 | 30 | "num_workers": 4, 31 | 32 | "dist_config": { 33 | "dist_backend": "nccl", 34 | "dist_url": "tcp://localhost:54321", 35 | "world_size": 1 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /fairseq/examples/data2vec/scripts/text/finetune_all_fair_nodep_aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env zsh 2 | 3 | dir="$1" 4 | cp="$dir/checkpoints/checkpoint_last.pt" 5 | 6 | echo "dir: $dir" 7 | 8 | declare -A tasks 9 | tasks[cola]="/fsx-wav2vec/abaevski/data/nlp/GLUE/CoLA-bin" 10 | tasks[qnli]="/fsx-wav2vec/abaevski/data/nlp/GLUE/QNLI-bin" 11 | tasks[mrpc]="/fsx-wav2vec/abaevski/data/nlp/GLUE/MRPC-bin" 12 | tasks[rte]="/fsx-wav2vec/abaevski/data/nlp/GLUE/RTE-bin" 13 | tasks[sst_2]="/fsx-wav2vec/abaevski/data/nlp/GLUE/SST-2-bin" 14 | 15 | for task data_path in ${(kv)tasks}; do 16 | PYTHONPATH=. PREFIX="${PREFIX}" SUFFIX="" nohup python fairseq_cli/hydra_train.py -m --config-dir examples/roberta/config/finetuning \ 17 | --config-name $task hydra/launcher=submitit_slurm +run_config=slurm_1g_aws task.data="$data_path" hydra.launcher.name=finetune_${task}_${PREFIX} \ 18 | checkpoint.restore_file="$cp" hydra.sweep.dir="$dir/finetune/$task" & 19 | done 20 | -------------------------------------------------------------------------------- /fairseq/examples/multilingual/multilingual_fairseq_gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | lang_pairs="en-fr,en-cs,fr-en,cs-en" 9 | path_2_data=$1 # 10 | lang_list=$2 # 11 | model=$3 # 12 | source_lang=cs 13 | target_lang=en 14 | 15 | fairseq-generate "$path_2_data" \ 16 | --path "$model" \ 17 | --task translation_multi_simple_epoch \ 18 | --gen-subset test \ 19 | --source-lang "$source_lang" \ 20 | --target-lang "$target_lang" \ 21 | --sacrebleu --remove-bpe 'sentencepiece'\ 22 | --batch-size 32 \ 23 | --encoder-langtok "src" \ 24 | --decoder-langtok \ 25 | --lang-dict "$lang_list" \ 26 | --lang-pairs "$lang_pairs" 27 | -------------------------------------------------------------------------------- /fairseq/examples/wmt21/README.md: -------------------------------------------------------------------------------- 1 | # WMT 21 2 | 3 | This page provides pointers to the models of Facebook AI's WMT'21 news translation task submission [(Tran et al., 2021)](https://arxiv.org/abs/2108.03265). 4 | 5 | ## Single best dense models 6 | 7 | Model | Description | Download 8 | ---|---|--- 9 | `wmt21.dense-24-wide.X-En` | X-En | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt21.dense-24-wide.X-En.tar.gz) 10 | `wmt21.dense-24-wide.En-X` | En-X | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt21.dense-24-wide.En-X.tar.gz) 11 | 12 | ## Example usage 13 | 14 | See eval.sh 15 | 16 | 17 | ## Citation 18 | ```bibtex 19 | @inproceedings{tran2021facebook 20 | title={Facebook AI’s WMT21 News Translation Task Submission}, 21 | author={Chau Tran and Shruti Bhosale and James Cross and Philipp Koehn and Sergey Edunov and Angela Fan}, 22 | booktitle={Proc. of WMT}, 23 | year={2021}, 24 | } 25 | ``` 26 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/numel_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class NumelDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, reduce=False): 14 | super().__init__(dataset) 15 | self.reduce = reduce 16 | 17 | def __getitem__(self, index): 18 | item = self.dataset[index] 19 | if torch.is_tensor(item): 20 | return torch.numel(item) 21 | else: 22 | return np.size(item) 23 | 24 | def __len__(self): 25 | return len(self.dataset) 26 | 27 | def collater(self, samples): 28 | if self.reduce: 29 | return sum(samples) 30 | else: 31 | return torch.tensor(samples) 32 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/retri/videoclip/test_vttqa_zs.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: VideoProcessor 6 | aligner: MSRVTTQAAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: MSRVTTQAMetaProcessor 9 | test_path: data/msrvtt-qa/MSR_MC_test.csv 10 | vfeat_dir: data/feat/feat_vtt_s3d 11 | text_processor: MSRVTTQATextProcessor 12 | num_iso_layer: 12 13 | max_video_len: 32 14 | max_len: 96 15 | fairseq: 16 | dataset: 17 | batch_size: 256 18 | valid_subset: test 19 | num_workers: 2 20 | common_eval: 21 | path: runs/retri/videoclip/checkpoint_best.pt 22 | model: 23 | model_cls: MMFusionSeparate 24 | mm_encoder_cls: null 25 | video_encoder_cls: MMBertForEncoder 26 | text_encoder_cls: BertModel 27 | num_hidden_video_layers: 6 28 | eval: 29 | save_path: runs/retri/videoclip/vttqa_zs/eval 30 | metric: QAMetric 31 | predictor: QAPredictor 32 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/retri/videoclip/test_didemo_zs.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: VideoProcessor 6 | aligner: DiDeMoAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: DiDeMoMetaProcessor 9 | test_path: data/didemo/test_data.json 10 | vfeat_dir: data/feat/feat_didemo_s3d 11 | text_processor: DiDeMoTextProcessor 12 | num_iso_layer: 12 13 | max_video_len: 32 14 | max_len: 96 15 | fairseq: 16 | dataset: 17 | batch_size: 256 18 | valid_subset: test 19 | num_workers: 2 20 | common_eval: 21 | path: runs/retri/videoclip/checkpoint_best.pt 22 | model: 23 | model_cls: MMFusionSeparate 24 | mm_encoder_cls: null 25 | video_encoder_cls: MMBertForEncoder 26 | text_encoder_cls: BertModel 27 | num_hidden_video_layers: 6 28 | eval: 29 | save_path: runs/retri/videoclip/didemo_zs/eval 30 | metric: DiDeMoMetric 31 | predictor: DiDeMoPredictor 32 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/retri/videoclip/test_vtt_zs.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: VideoProcessor 6 | aligner: DSAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: MSRVTTMetaProcessor 9 | test_path: data/msrvtt/MSRVTT_JSFUSION_test.csv 10 | vfeat_dir: data/feat/feat_vtt_s3d 11 | text_processor: MSRVTTTextProcessor 12 | num_iso_layer: 12 13 | max_video_len: 32 14 | max_len: 96 15 | fairseq: 16 | dataset: 17 | batch_size: 256 18 | valid_subset: test 19 | num_workers: 2 20 | common_eval: 21 | path: runs/retri/videoclip/checkpoint_best.pt 22 | model: 23 | model_cls: MMFusionSeparate 24 | mm_encoder_cls: null 25 | video_encoder_cls: MMBertForEncoder 26 | text_encoder_cls: BertModel 27 | num_hidden_video_layers: 6 28 | eval: 29 | save_path: runs/retri/videoclip/vtt_zs/eval 30 | metric: RetrievalMetric 31 | predictor: RetrievalPredictor 32 | -------------------------------------------------------------------------------- /fairseq/scripts/convert_dictionary.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (c) Facebook, Inc. and its affiliates. 2 | -- 3 | -- This source code is licensed under the MIT license found in the 4 | -- LICENSE file in the root directory of this source tree. 5 | -- 6 | -- Usage: convert_dictionary.lua 7 | require 'fairseq' 8 | require 'torch' 9 | require 'paths' 10 | 11 | if #arg < 1 then 12 | print('usage: convert_dictionary.lua ') 13 | os.exit(1) 14 | end 15 | if not paths.filep(arg[1]) then 16 | print('error: file does not exit: ' .. arg[1]) 17 | os.exit(1) 18 | end 19 | 20 | dict = torch.load(arg[1]) 21 | dst = paths.basename(arg[1]):gsub('.th7', '.txt') 22 | assert(dst:match('.txt$')) 23 | 24 | f = io.open(dst, 'w') 25 | for idx, symbol in ipairs(dict.index_to_symbol) do 26 | if idx > dict.cutoff then 27 | break 28 | end 29 | f:write(symbol) 30 | f:write(' ') 31 | f:write(dict.index_to_freq[idx]) 32 | f:write('\n') 33 | end 34 | f:close() 35 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/retri/videoclip/test_vttqa_videoclip.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: VideoProcessor 6 | aligner: MSRVTTQAAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: MSRVTTQAMetaProcessor 9 | test_path: data/msrvtt-qa/MSR_MC_test.csv 10 | vfeat_dir: data/feat/feat_vtt_s3d 11 | text_processor: MSRVTTQATextProcessor 12 | num_iso_layer: 12 13 | max_video_len: 32 14 | max_len: 96 15 | fairseq: 16 | dataset: 17 | batch_size: 256 18 | valid_subset: test 19 | num_workers: 2 20 | common_eval: 21 | path: runs/retri/videoclip/vttqa/checkpoint_last.pt 22 | model: 23 | model_cls: MMFusionSeparate 24 | mm_encoder_cls: null 25 | video_encoder_cls: MMBertForEncoder 26 | text_encoder_cls: BertModel 27 | num_hidden_video_layers: 6 28 | eval: 29 | save_path: runs/retri/videoclip/vttqa/eval 30 | metric: QAMetric 31 | predictor: QAPredictor 32 | -------------------------------------------------------------------------------- /fairseq/docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=fairseq 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/retri/videoclip/test_vtt_videoclip.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: VideoProcessor 6 | aligner: DSAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: MSRVTTMetaProcessor 9 | test_path: data/msrvtt/MSRVTT_JSFUSION_test.csv 10 | vfeat_dir: data/feat/feat_vtt_s3d 11 | text_processor: MSRVTTTextProcessor 12 | num_iso_layer: 12 13 | max_video_len: 32 14 | max_len: 96 15 | fairseq: 16 | dataset: 17 | batch_size: 256 18 | valid_subset: test 19 | num_workers: 2 20 | common_eval: 21 | path: runs/retri/videoclip/vtt/checkpoint_last.pt 22 | model: 23 | model_cls: MMFusionSeparate 24 | mm_encoder_cls: null 25 | video_encoder_cls: MMBertForEncoder 26 | text_encoder_cls: BertModel 27 | num_hidden_video_layers: 6 28 | eval: 29 | save_path: runs/retri/videoclip/vtt/eval 30 | metric: RetrievalMetric 31 | predictor: RetrievalPredictor 32 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/mtm/vlm/test_youcook.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: YoucookVideoProcessor 6 | aligner: DSAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: YoucookMetaProcessor 9 | test_path: data/youcook/youcook_val.pkl 10 | trainval_annotation: data/youcook/youcookii_annotations_trainval.json 11 | use_annotation_text: true 12 | vfeat_dir: data/feat/feat_youcook_s3d 13 | text_processor: TextProcessor 14 | num_iso_layer: 12 15 | max_video_len: 32 16 | max_len: 96 17 | fairseq: 18 | dataset: 19 | batch_size: 256 20 | valid_subset: test 21 | num_workers: 2 22 | common_eval: 23 | path: runs/mtm/vlm/youcook/checkpoint_last.pt 24 | model: 25 | model_cls: MMFusionJoint 26 | mm_encoder_cls: MMBertForJoint 27 | use_seg_emb: true 28 | eval: 29 | save_path: runs/mtm/vlm/youcook/eval 30 | metric: RetrievalMetric 31 | predictor: RetrievalPredictor 32 | -------------------------------------------------------------------------------- /fairseq/fairseq/data/colorize_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class ColorizeDataset(BaseWrapperDataset): 12 | """Adds 'colors' property to net input that is obtained from the provided color getter for use by models""" 13 | 14 | def __init__(self, dataset, color_getter): 15 | super().__init__(dataset) 16 | self.color_getter = color_getter 17 | 18 | def collater(self, samples): 19 | base_collate = super().collater(samples) 20 | if len(base_collate) > 0: 21 | base_collate["net_input"]["colors"] = torch.tensor( 22 | list(self.color_getter(self.dataset, s["id"]) for s in samples), 23 | dtype=torch.long, 24 | ) 25 | return base_collate 26 | -------------------------------------------------------------------------------- /fairseq/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | std::vector 5 | dynamicconv_cpu_forward(float* input, float* filters, int padding_l); 6 | 7 | std::vector dynamicconv_cpu_backward( 8 | float* gradOutput, 9 | int padding_l, 10 | float* input, 11 | float* filters); 12 | 13 | std::vector 14 | dynamicconv_forward(float* input, float* filters, int padding_l) { 15 | return dynamicconv_cpu_forward(input, filters, padding_l); 16 | } 17 | 18 | std::vector dynamicconv_backward( 19 | float* gradOutput, 20 | int padding_l, 21 | float* input, 22 | float* filters) { 23 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters); 24 | } 25 | 26 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 27 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)"); 28 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)"); 29 | } 30 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/mtm/vlm/test_youcookcap.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: YoucookVideoProcessor 6 | aligner: DSNLGAligner 7 | bert_name: bert-base-uncased 8 | meta_processor: YoucookNLGMetaProcessor 9 | test_path: data/youcook/val_list.txt 10 | trainval_annotation: data/youcook/youcookii_annotations_trainval.json 11 | vfeat_dir: data/feat/feat_youcook_s3d 12 | text_processor: NLGTextProcessor 13 | max_video_len: 32 14 | max_len: 96 15 | fairseq: 16 | dataset: 17 | batch_size: 256 18 | valid_subset: test 19 | num_workers: 2 20 | common_eval: 21 | path: runs/mtm/vlm/youcookcap/checkpoint_best.pt 22 | model: 23 | model_cls: MMFusionNLG 24 | mm_encoder_cls: MMBertForNLG 25 | max_decode_length: 24 26 | use_seg_emb: true 27 | eval: 28 | save_path: runs/mtm/vlm/youcookcap/eval 29 | metric: NLGMetric 30 | predictor: NLGPredictor 31 | gen_param: 32 | num_beams: 5 33 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/decode/infer_kenlm.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | defaults: 4 | - model: null 5 | 6 | hydra: 7 | run: 8 | dir: ${common_eval.results_path}/beam${decoding.beam}_th${decoding.beamthreshold}_lmw${decoding.lmweight}_wrd${decoding.wordscore}_sil${decoding.silweight} 9 | sweep: 10 | dir: ${common_eval.results_path} 11 | subdir: beam${decoding.beam}_th${decoding.beamthreshold}_lmw${decoding.lmweight}_wrd${decoding.wordscore}_sil${decoding.silweight} 12 | 13 | task: 14 | _name: hubert_pretraining 15 | single_target: true 16 | fine_tuning: true 17 | data: ??? 18 | normalize: ??? 19 | 20 | decoding: 21 | type: kenlm 22 | lexicon: ??? 23 | lmpath: ??? 24 | beamthreshold: 100 25 | beam: 500 26 | lmweight: 2 27 | wordscore: -1 28 | silweight: 0 29 | unique_wer_file: true 30 | common_eval: 31 | results_path: ??? 32 | path: ??? 33 | post_process: letter 34 | dataset: 35 | max_tokens: 1100000 36 | gen_subset: ??? 37 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/projects/mtm/vlm/test_coin.yaml: -------------------------------------------------------------------------------- 1 | slurm_config: big 2 | task_type: local_predict 3 | dataset: 4 | split: test 5 | video_processor: VideoProcessor 6 | aligner: COINActionSegmentationAligner 7 | bert_name: bert-base-uncased 8 | test_path: data/coin/COIN.json 9 | meta_processor: COINActionSegmentationMetaProcessor 10 | vfeat_dir: data/feat/feat_coin_s3d 11 | text_processor: COINActionSegmentationTextProcessor 12 | num_iso_layer: 12 13 | sliding_window: 16 14 | sliding_window_size: 32 15 | max_video_len: 32 16 | max_len: 96 17 | fairseq: 18 | dataset: 19 | batch_size: 1 20 | valid_subset: test 21 | num_workers: 2 22 | common_eval: 23 | path: runs/mtm/vlm/coin/checkpoint_best.pt 24 | model: 25 | model_cls: MMFusionActionSegmentation 26 | mm_encoder_cls: MMBertForTokenClassification 27 | use_seg_emb: true 28 | eval: 29 | save_path: runs/mtm/vlm/coin/eval 30 | metric: COINActionSegmentationMetric 31 | predictor: COINPredictor 32 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/config/decode/infer_fsqlm.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | defaults: 4 | - model: null 5 | 6 | hydra: 7 | run: 8 | dir: ${common_eval.results_path}/beam${decoding.beam}_th${decoding.beamthreshold}_lmw${decoding.lmweight}_wrd${decoding.wordscore}_sil${decoding.silweight} 9 | sweep: 10 | dir: ${common_eval.results_path} 11 | subdir: beam${decoding.beam}_th${decoding.beamthreshold}_lmw${decoding.lmweight}_wrd${decoding.wordscore}_sil${decoding.silweight} 12 | 13 | task: 14 | _name: hubert_pretraining 15 | single_target: true 16 | fine_tuning: true 17 | data: ??? 18 | normalize: ??? 19 | 20 | decoding: 21 | type: fairseqlm 22 | lexicon: ??? 23 | lmpath: ??? 24 | beamthreshold: 25 25 | beam: 500 26 | lmweight: 2 27 | wordscore: -1 28 | silweight: 0 29 | unique_wer_file: true 30 | common_eval: 31 | results_path: ??? 32 | path: ??? 33 | post_process: letter 34 | dataset: 35 | max_tokens: 1100000 36 | gen_subset: ??? 37 | -------------------------------------------------------------------------------- /fairseq/examples/hubert/update_ckpt.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | src_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2.pt" 9 | ref_ckpt = "/checkpoint/wnhsu/w2v/hubert_icassp_oss_v3/iter2_km100-400k-grp-L6/oss.km500_p0_1_s334.pmw1_0.puw0_0.grpnorm.ml10.mp0_8.untie.mxsz250000.ufreq1.maxtok1400000.MU100k.s1337.ngpu32/checkpoint_last.pt" 10 | new_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2_updated.pt" 11 | 12 | 13 | def update_state(state): 14 | state["model"]["label_embs_concat"] = state["model"].pop("label_embs") 15 | state["args"].task = "hubert_pretraining" 16 | state["args"].labels = f"['{state['args'].labels}']" 17 | return state 18 | 19 | 20 | src_state = torch.load(src_ckpt) 21 | src_state = update_state(src_state) 22 | torch.save(src_state, new_ckpt) 23 | -------------------------------------------------------------------------------- /fairseq/examples/megatron_11b/detok.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import argparse 8 | import fileinput 9 | 10 | import sacremoses 11 | 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="") 15 | parser.add_argument("files", nargs="*", help="input files") 16 | args = parser.parse_args() 17 | 18 | detok = sacremoses.MosesDetokenizer() 19 | 20 | for line in fileinput.input(args.files, openhook=fileinput.hook_compressed): 21 | print( 22 | detok.detokenize(line.strip().split(" ")) 23 | .replace(" @", "") 24 | .replace("@ ", "") 25 | .replace(" =", "=") 26 | .replace("= ", "=") 27 | .replace(" – ", "–") 28 | ) 29 | 30 | 31 | if __name__ == "__main__": 32 | main() 33 | -------------------------------------------------------------------------------- /fairseq/fairseq/clib/libbleu/module.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include 10 | 11 | static PyMethodDef method_def[] = {{NULL, NULL, 0, NULL}}; // NOLINT 12 | 13 | static struct PyModuleDef module_def = { 14 | PyModuleDef_HEAD_INIT, 15 | "libbleu", /* name of module */ 16 | // NOLINTNEXTLINE 17 | NULL, /* module documentation, may be NULL */ 18 | -1, /* size of per-interpreter state of the module, 19 | or -1 if the module keeps state in global variables. */ 20 | method_def}; // NOLINT 21 | 22 | #if PY_MAJOR_VERSION == 2 23 | PyMODINIT_FUNC init_libbleu() 24 | #else 25 | PyMODINIT_FUNC PyInit_libbleu() 26 | #endif 27 | { 28 | PyObject* m = PyModule_Create(&module_def); 29 | if (!m) { 30 | return NULL; 31 | } 32 | return m; 33 | } 34 | -------------------------------------------------------------------------------- /fairseq/fairseq/benchmark/dummy_dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from fairseq.data import FairseqDataset 3 | 4 | 5 | class DummyDataset(FairseqDataset): 6 | def __init__(self, batch, num_items, item_size): 7 | super().__init__() 8 | self.batch = batch 9 | self.num_items = num_items 10 | self.item_size = item_size 11 | 12 | def __getitem__(self, index): 13 | return index 14 | 15 | def __len__(self): 16 | return self.num_items 17 | 18 | def collater(self, samples): 19 | return self.batch 20 | 21 | @property 22 | def sizes(self): 23 | return np.array([self.item_size] * self.num_items) 24 | 25 | def num_tokens(self, index): 26 | return self.item_size 27 | 28 | def size(self, index): 29 | return self.item_size 30 | 31 | def ordered_indices(self): 32 | return np.arange(self.num_items) 33 | 34 | @property 35 | def supports_prefetch(self): 36 | return False 37 | -------------------------------------------------------------------------------- /fairseq/examples/textless_nlp/pgslm/scripts/prepare_f0_quantization.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | set -eu 8 | 9 | train_json=$1 10 | sr=$2 11 | nbins=$3 12 | out_dir=$4 13 | out_prefix=$5 14 | 15 | f0_dir="$out_dir/f0" 16 | 17 | python examples/textless_nlp/pgslm/preprocess_f0.py \ 18 | $train_json $f0_dir/${out_prefix}_f0_quant --nshards 1 --rank 1 --sampling_rate $sr 19 | 20 | # NB: one can use parallel here: 21 | # NSHARDS=16 22 | # 23 | #seq 1 $NSHARDS | parallel -j $NSHARDS python examples/textless_nlp/pgslm/preprocess_f0.py \ 24 | # $train_json $f0_dir/${out_prefix}_f0_quant --nshards $NSHARDS --sampling_rate $sr --rank 25 | 26 | python examples/textless_nlp/pgslm/quantize_f0.py \ 27 | $train_json $f0_dir/${out_prefix}_f0_quant $out_dir $out_prefix --nbins $nbins --nshards 1 --normalize mean --log 28 | -------------------------------------------------------------------------------- /fairseq/examples/unsupervised_quality_estimation/repeat_lines.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import argparse 7 | import sys 8 | 9 | 10 | def _normalize_spaces(line): 11 | return " ".join(line.split()) 12 | 13 | 14 | def main(): 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("-i", "--input_file", required=True, type=str) 17 | parser.add_argument("-n", "--repeat_times", required=True, type=int) 18 | parser.add_argument("-o", "--output_file", required=False, type=str) 19 | args = parser.parse_args() 20 | stream = open(args.output_file, "w") if args.output_file else sys.stdout 21 | 22 | for line in open(args.input_file): 23 | for _ in range(args.repeat_times): 24 | stream.write(_normalize_spaces(line) + "\n") 25 | 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /fairseq/examples/MMPT/mmpt/tasks/vlmtask.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | import torch 6 | 7 | from .task import Task 8 | 9 | 10 | class VLMTask(Task): 11 | """A VLM task for reproducibility. 12 | the collator split subsamples into two sub-batches. 13 | This has should have no logic changes. 14 | but changed the randomness in frame masking. 15 | """ 16 | 17 | def flat_subsample(self, tensor): 18 | size = tensor.size() 19 | if len(size) >= 2: 20 | batch_size = size[0] * (size[1] // 2) 21 | expanded_size = ( 22 | (batch_size, 2) + size[2:] if len(size) > 2 23 | else (batch_size, 2) 24 | ) 25 | tensor = tensor.view(expanded_size) 26 | tensor = torch.cat([tensor[:, 0], tensor[:, 1]], dim=0) 27 | return tensor 28 | -------------------------------------------------------------------------------- /fairseq/examples/translation_moe/translation_moe_src/logsumexp_moe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class LogSumExpMoE(torch.autograd.Function): 10 | """Standard LogSumExp forward pass, but use *posterior* for the backward. 11 | 12 | See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" 13 | (Shen et al., 2019) `_. 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, logp, posterior, dim=-1): 18 | ctx.save_for_backward(posterior) 19 | ctx.dim = dim 20 | return torch.logsumexp(logp, dim=dim) 21 | 22 | @staticmethod 23 | def backward(ctx, grad_output): 24 | (posterior,) = ctx.saved_tensors 25 | grad_logp = grad_output.unsqueeze(ctx.dim) * posterior 26 | return grad_logp, None, None 27 | -------------------------------------------------------------------------------- /fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh: -------------------------------------------------------------------------------- 1 | # you can change cmd.sh depending on what type of queue you are using. 2 | # If you have no queueing system and want to run on a local machine, you 3 | # can change all instances 'queue.pl' to run.pl (but be careful and run 4 | # commands one by one: most recipes will exhaust the memory on your 5 | # machine). queue.pl works with GridEngine (qsub). slurm.pl works 6 | # with slurm. Different queues are configured differently, with different 7 | # queue names and different ways of specifying things like memory; 8 | # to account for these differences you can create and edit the file 9 | # conf/queue.conf to match your queue's configuration. Search for 10 | # conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information, 11 | # or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl. 12 | 13 | export train_cmd="run.pl --mem 2G" 14 | export decode_cmd="run.pl --mem 4G" 15 | export mkgraph_cmd="run.pl --mem 8G" 16 | -------------------------------------------------------------------------------- /fairseq/examples/multilingual/data_scripts/preprocess_ML50_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | if [ -z $WORKDIR_ROOT ] ; 9 | then 10 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." 11 | exit 12 | fi 13 | 14 | if [ -z $SPM_PATH ] ; 15 | then 16 | echo "Please install sentence piecence from https://github.com/google/sentencepiece and set SPM_PATH pointing to the installed spm_encode.py. Exitting..." 17 | exit 18 | fi 19 | 20 | ML50=${WORKDIR_ROOT}/ML50 21 | 22 | mkdir -p $ML50/dedup 23 | mkdir -p $ML50/cleaned_dedup 24 | 25 | python ./dedup_all.py --from-folder $ML50/raw --to-folder $ML50/dedup 26 | python ./remove_valid_test_in_train.py --from-folder $ML50/dedup --to-folder $ML50/clean 27 | python ./binarize.py --raw-folder $ML50/clean --------------------------------------------------------------------------------