├── tests ├── __init__.py ├── gpu │ ├── __init__.py │ └── transformer_quantization_config.yaml ├── distributed │ └── __init__.py └── speech_recognition │ └── __init__.py ├── fairseq_cli └── __init__.py ├── scripts ├── __init__.py ├── replace_tsv.sh ├── lcrm_tsv.sh ├── lcrm.py ├── mecab_token.py ├── token.sh ├── pipeline_token.sh ├── lowercase.perl ├── spm_train.py ├── compound_split_bleu.sh ├── pipe_replace_tsv.sh ├── sacrebleu.sh ├── extract_txt_from_tsv.py └── convert_dictionary.lua ├── fairseq ├── logging │ └── __init__.py ├── data │ ├── audio │ │ └── __init__.py │ ├── multilingual │ │ └── __init__.py │ ├── num_samples_dataset.py │ ├── id_dataset.py │ ├── offset_tokens_dataset.py │ ├── legacy │ │ └── __init__.py │ ├── roll_dataset.py │ ├── raw_label_dataset.py │ ├── lru_cache_dataset.py │ ├── encoders │ │ ├── space_tokenizer.py │ │ ├── characters.py │ │ ├── nltk_tokenizer.py │ │ ├── __init__.py │ │ ├── bytes.py │ │ └── utils.py │ ├── sort_dataset.py │ ├── strip_token_dataset.py │ ├── list_dataset.py │ ├── numel_dataset.py │ ├── colorize_dataset.py │ └── pad_dataset.py ├── version.txt ├── modules │ ├── quantization │ │ ├── __init__.py │ │ ├── scalar │ │ │ ├── __init__.py │ │ │ └── modules │ │ │ │ └── __init__.py │ │ └── pq │ │ │ ├── __init__.py │ │ │ └── modules │ │ │ └── __init__.py │ ├── speech_to_text │ │ └── __init__.py │ ├── lightconv_layer │ │ ├── __init__.py │ │ └── setup.py │ ├── dynamicconv_layer │ │ ├── __init__.py │ │ ├── setup.py │ │ └── dynamiconv_cpu.cpp │ ├── grad_multiply.py │ ├── same_pad.py │ ├── transpose_last.py │ ├── utils.py │ ├── unfold.py │ ├── gelu.py │ ├── fp32_group_norm.py │ └── scalar_bias.py ├── models │ ├── speech_to_text │ │ ├── modules │ │ │ └── __init__.py │ │ └── __init__.py │ ├── bart │ │ └── __init__.py │ ├── wav2vec │ │ └── __init__.py │ ├── roberta │ │ └── __init__.py │ ├── nat │ │ └── __init__.py │ └── huggingface │ │ └── __init__.py ├── torch_imputer │ └── __init__.py ├── config │ ├── model │ │ ├── wav2vec │ │ │ └── vq_wav2vec_gumbel.yaml │ │ └── wav2vec2 │ │ │ ├── wav2vec2_base.yaml │ │ │ └── wav2vec2_large.yaml │ ├── __init__.py │ └── config.yaml ├── model_parallel │ ├── models │ │ ├── roberta │ │ │ └── __init__.py │ │ ├── pipeline_parallel_transformer │ │ │ └── __init__.py │ │ └── __init__.py │ ├── __init__.py │ ├── criterions │ │ └── __init__.py │ └── modules │ │ └── __init__.py ├── benchmark │ └── __init__.py ├── dataclass │ └── __init__.py ├── tokenizer.py ├── clib │ ├── libnat_cuda │ │ └── edit_dist.h │ └── libbleu │ │ └── module.cpp ├── distributed │ └── __init__.py └── scoring │ └── chrf.py ├── examples ├── .gitignore ├── linformer │ ├── linformer_src │ │ ├── models │ │ │ └── __init__.py │ │ ├── modules │ │ │ └── __init__.py │ │ └── __init__.py │ └── README.md ├── latent_depth │ └── latent_depth_src │ │ ├── loss │ │ └── __init__.py │ │ ├── models │ │ └── __init__.py │ │ ├── modules │ │ └── __init__.py │ │ └── __init__.py ├── multilingual │ ├── data_scripts │ │ ├── requirement.txt │ │ ├── utils │ │ │ └── strip_sgm.sh │ │ ├── README.md │ │ └── preprocess_ML50_v1.sh │ ├── ML50_langs.txt │ └── multilingual_fairseq_gen.sh ├── adaptive_span │ ├── truncated_bptt_lm_task.py │ └── __init__.py ├── speech_recognition │ ├── __init__.py │ ├── tasks │ │ └── __init__.py │ ├── data │ │ └── __init__.py │ ├── models │ │ └── __init__.py │ ├── hydra │ │ └── conf │ │ │ ├── infer.yaml │ │ │ └── hydra │ │ │ └── sweeper │ │ │ └── ax.yaml │ └── criterions │ │ └── __init__.py ├── simultaneous_translation │ ├── eval │ │ ├── __init__.py │ │ ├── scorers │ │ │ └── __init__.py │ │ └── agents │ │ │ └── __init__.py │ ├── __init__.py │ ├── models │ │ └── __init__.py │ ├── utils │ │ └── __init__.py │ └── modules │ │ └── __init__.py ├── rxf │ ├── __init__.py │ └── rxf_src │ │ └── __init__.py ├── noisychannel │ └── __init__.py ├── roberta │ ├── commonsense_qa │ │ ├── __init__.py │ │ └── download_cqa_data.sh │ └── wsc │ │ └── __init__.py ├── translation_moe │ └── translation_moe_src │ │ ├── __init__.py │ │ └── logsumexp_moe.py ├── pointer_generator │ └── pointer_generator_src │ │ └── __init__.py ├── truncated_bptt │ └── __init__.py ├── __init__.py ├── m2m_100 │ └── tokenizers │ │ ├── thirdparty │ │ └── .gitignore │ │ ├── tokenize_zh.py │ │ ├── tokenize_thai.py │ │ ├── seg_ja.sh │ │ ├── seg_ko.sh │ │ ├── README.md │ │ ├── tokenize_indic.py │ │ └── tokenizer_ar.sh ├── laser │ └── laser_src │ │ └── __init__.py ├── fast_noisy_channel │ └── __init__.py ├── constrained_decoding │ ├── normalize.py │ └── tok.py ├── megatron_11b │ └── detok.py ├── unsupervised_quality_estimation │ └── repeat_lines.py └── language_model │ └── prepare-wikitext-103.sh ├── egs ├── aishell │ └── asr │ │ ├── conf │ │ ├── ctc.yaml │ │ ├── mlo.yaml │ │ ├── dlcl.yaml │ │ ├── xctc.yaml │ │ ├── norm.yaml │ │ ├── conformer.yaml │ │ ├── dynamic.yaml │ │ ├── rpr.yaml │ │ ├── ipa.yaml │ │ ├── inter.yaml │ │ ├── pds_base.yaml │ │ ├── purectc.yaml │ │ ├── basis.yaml │ │ ├── big.yaml │ │ ├── base.yaml │ │ ├── base_nonorm.yaml │ │ ├── big_wenet.yaml │ │ ├── pds_base_16.yaml │ │ ├── pds_base_8.yaml │ │ ├── xinter.yaml │ │ ├── pds_big_8.yaml │ │ └── pds_base_32.yaml │ │ └── local │ │ ├── cal_wer.sh │ │ ├── cal_ctc_bleu.sh │ │ ├── extract_txt_from_tsv.py │ │ ├── cal_bleu.sh │ │ └── monitor.sh ├── librispeech │ └── asr │ │ ├── conf │ │ ├── ctc.yaml │ │ ├── mlo.yaml │ │ ├── wo_fusion.yaml │ │ ├── 100h.yaml │ │ ├── dlcl.yaml │ │ ├── xctc.yaml │ │ ├── norm.yaml │ │ ├── local_attn.yaml │ │ ├── conformer_wom.yaml │ │ ├── conformer.yaml │ │ ├── dynamic.yaml │ │ ├── rpr.yaml │ │ ├── ipa.yaml │ │ ├── inter.yaml │ │ ├── pds_base.yaml │ │ ├── pds_big.yaml │ │ ├── pds_deep.yaml │ │ ├── purectc.yaml │ │ ├── compare_purectc_base.yaml │ │ ├── big.yaml │ │ ├── ConformerCTCSmall.yaml │ │ ├── purectc_inter.yaml │ │ ├── basis.yaml │ │ ├── purectc_pds_base_16_growth.yaml │ │ ├── purectc_pds_base_8_growth.yaml │ │ ├── purectc_pds_base_8_growth360.yaml │ │ ├── purectc_pds_big_16.yaml │ │ ├── purectc_pds_large_16.yaml │ │ ├── base.yaml │ │ ├── purectc_pds_base_16_growth_fusion256.yaml │ │ ├── purectc_pds_base_16_growth_fusion320.yaml │ │ ├── purectc_pds_base_16_growth_fusion360.yaml │ │ ├── purectc_pds_base_8_growth_fusion256.yaml │ │ ├── purectc_pds_big_8.yaml │ │ ├── purectc_pds_large_8.yaml │ │ ├── pds_base_8_444.yaml │ │ ├── pds_base_8.yaml │ │ ├── pds_base_16.yaml │ │ ├── xinter.yaml │ │ ├── pds_base_4.yaml │ │ ├── pds_big_16.yaml │ │ ├── pds_big_8.yaml │ │ ├── pds_deep30_8.yaml │ │ ├── pds_deep_16.yaml │ │ ├── pds_deep_8.yaml │ │ ├── pds_base_16_growth.yaml │ │ ├── pds_deep18_16.yaml │ │ ├── pds_deep30_16.yaml │ │ ├── pds_base_32.yaml │ │ ├── pds_base_8_growth.yaml │ │ ├── pds_big_32.yaml │ │ ├── pds_base_8_growth_fusion256.yaml │ │ ├── pds_deep_32.yaml │ │ └── pds_deep18_32.yaml │ │ ├── pipe.sh │ │ └── local │ │ ├── cal_wer.sh │ │ ├── cal_ctc_bleu.sh │ │ ├── extract_txt_from_tsv.py │ │ ├── cal_bleu.sh │ │ └── monitor.sh ├── mustc │ ├── st │ │ ├── conf │ │ │ ├── mlo.yaml │ │ │ ├── ctc.yaml │ │ │ ├── dlcl.yaml │ │ │ ├── xctc.yaml │ │ │ ├── conformer.yaml │ │ │ ├── dynamic.yaml │ │ │ ├── rpr.yaml │ │ │ ├── inter.yaml │ │ │ ├── ipa.yaml │ │ │ ├── aipa.yaml │ │ │ ├── aipa_kd.yaml │ │ │ ├── basis.yaml │ │ │ ├── big.yaml │ │ │ ├── reproduction_aipa_kd.yaml │ │ │ ├── reproduction_aipa_kd_womixuploss.yaml │ │ │ ├── base.yaml │ │ │ ├── xinter.yaml │ │ │ ├── pds_deep_8.yaml │ │ │ └── pds_big_32.yaml │ │ └── local │ │ │ ├── cal_wer.sh │ │ │ ├── cal_ctc_bleu.sh │ │ │ ├── extract_txt_from_tsv.py │ │ │ ├── cal_bleu.sh │ │ │ └── monitor.sh │ ├── asr │ │ ├── conf │ │ │ ├── ctc.yaml │ │ │ ├── dlcl.yaml │ │ │ ├── xctc.yaml │ │ │ ├── conformer.yaml │ │ │ ├── rpr.yaml │ │ │ ├── ipa.yaml │ │ │ ├── inter.yaml │ │ │ ├── purectc.yaml │ │ │ ├── basis.yaml │ │ │ ├── purectc_pds_base_8_grow.yaml │ │ │ ├── purectc_pds_base_8.yaml │ │ │ ├── purectc_pds_base_8_grow512.yaml │ │ │ ├── big.yaml │ │ │ ├── purectc_pds_base_8_compare.yaml │ │ │ ├── purectc_pds_base_8_grow_compare.yaml │ │ │ ├── base.yaml │ │ │ ├── xinter.yaml │ │ │ ├── pds_base_8_grow.yaml │ │ │ └── pds_base_8_grow512.yaml │ │ └── local │ │ │ ├── cal_wer.sh │ │ │ ├── cal_ctc_bleu.sh │ │ │ ├── extract_txt_from_tsv.py │ │ │ ├── cal_bleu.sh │ │ │ └── monitor.sh │ └── mt │ │ ├── conf │ │ ├── dlcl.yaml │ │ ├── rpr.yaml │ │ ├── ctc.yaml │ │ ├── basis.yaml │ │ ├── base.yaml │ │ └── small.yaml │ │ ├── local │ │ └── monitor.sh │ │ └── train.sh └── wmt16 │ └── mt │ ├── conf │ ├── dlcl.yaml │ ├── rpr.yaml │ ├── ctc.yaml │ ├── basis.yaml │ ├── base.yaml │ ├── deep.yaml │ ├── base_postnorm.yaml │ ├── big.yaml │ ├── deep_ctc.yaml │ └── big_postnorm.yaml │ ├── local │ ├── lower_rm.py │ ├── monitor.sh │ └── replace-unicode-punctuation.perl │ ├── decode.sh │ └── train.sh ├── docs ├── docutils.conf ├── requirements.txt ├── fairseq.gif ├── fairseq_logo.png ├── _static │ └── theme_overrides.css ├── modules.rst ├── Makefile ├── criterions.rst ├── make.bat └── optim.rst ├── pyproject.toml ├── .gitmodules ├── train.py └── entry.sh /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq_cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/gpu/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/logging/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/data/audio/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/version.txt: -------------------------------------------------------------------------------- 1 | 1.0.0a0 2 | -------------------------------------------------------------------------------- /tests/distributed/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/speech_recognition/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | !*/*.sh 2 | !*/*.md 3 | -------------------------------------------------------------------------------- /fairseq/modules/quantization/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/ctc.yaml: -------------------------------------------------------------------------------- 1 | ctc-weight: 0.3 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/ctc.yaml: -------------------------------------------------------------------------------- 1 | ctc-weight: 0.3 -------------------------------------------------------------------------------- /egs/mustc/st/conf/mlo.yaml: -------------------------------------------------------------------------------- 1 | inter-ctc-mlo: 1:2:3 -------------------------------------------------------------------------------- /docs/docutils.conf: -------------------------------------------------------------------------------- 1 | [writers] 2 | option-limit=0 3 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/mlo.yaml: -------------------------------------------------------------------------------- 1 | inter-ctc-mlo: 1:2:3 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/mlo.yaml: -------------------------------------------------------------------------------- 1 | inter-ctc-mlo: 1:2:3 -------------------------------------------------------------------------------- /examples/linformer/linformer_src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/linformer/linformer_src/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fairseq/models/speech_to_text/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx<2.0 2 | sphinx-argparse 3 | -------------------------------------------------------------------------------- /examples/latent_depth/latent_depth_src/loss/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/latent_depth/latent_depth_src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/latent_depth/latent_depth_src/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/wo_fusion.yaml: -------------------------------------------------------------------------------- 1 | pds-fusion-method: none -------------------------------------------------------------------------------- /examples/multilingual/data_scripts/requirement.txt: -------------------------------------------------------------------------------- 1 | wget 2 | pandas -------------------------------------------------------------------------------- /egs/mustc/asr/conf/ctc.yaml: -------------------------------------------------------------------------------- 1 | ctc-weight: 0.3 2 | share-ctc-and-embed: True -------------------------------------------------------------------------------- /egs/mustc/mt/conf/dlcl.yaml: -------------------------------------------------------------------------------- 1 | use-enc-dlcl: True 2 | use-dec-dlcl: True 3 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/ctc.yaml: -------------------------------------------------------------------------------- 1 | ctc-weight: 0.3 2 | share-ctc-and-embed: True -------------------------------------------------------------------------------- /egs/mustc/st/conf/dlcl.yaml: -------------------------------------------------------------------------------- 1 | use-enc-dlcl: True 2 | use-dec-dlcl: True 3 | -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/dlcl.yaml: -------------------------------------------------------------------------------- 1 | use-enc-dlcl: True 2 | use-dec-dlcl: True 3 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/dlcl.yaml: -------------------------------------------------------------------------------- 1 | use-enc-dlcl: True 2 | use-dec-dlcl: True 3 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/xctc.yaml: -------------------------------------------------------------------------------- 1 | xctc-weight: 0.3 2 | share-xctc-and-embed: True -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/100h.yaml: -------------------------------------------------------------------------------- 1 | train-subset: train-clean-100 2 | lr: 0.001 -------------------------------------------------------------------------------- /egs/mustc/asr/conf/dlcl.yaml: -------------------------------------------------------------------------------- 1 | use-enc-dlcl: True 2 | use-dec-dlcl: True 3 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/xctc.yaml: -------------------------------------------------------------------------------- 1 | xctc-weight: 0.3 2 | share-xctc-and-embed: True -------------------------------------------------------------------------------- /egs/mustc/st/conf/xctc.yaml: -------------------------------------------------------------------------------- 1 | xctc-weight: 0.3 2 | share-xctc-and-embed: True -------------------------------------------------------------------------------- /docs/fairseq.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xuchennlp/S2T/HEAD/docs/fairseq.gif -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/dlcl.yaml: -------------------------------------------------------------------------------- 1 | use-enc-dlcl: True 2 | use-dec-dlcl: True 3 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/xctc.yaml: -------------------------------------------------------------------------------- 1 | xctc-weight: 0.3 2 | share-xctc-and-embed: True -------------------------------------------------------------------------------- /docs/fairseq_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xuchennlp/S2T/HEAD/docs/fairseq_logo.png -------------------------------------------------------------------------------- /egs/aishell/asr/conf/norm.yaml: -------------------------------------------------------------------------------- 1 | encoder-embed-norm: True 2 | encoder-no-scale-embedding: True -------------------------------------------------------------------------------- /examples/adaptive_span/truncated_bptt_lm_task.py: -------------------------------------------------------------------------------- 1 | ../truncated_bptt/truncated_bptt_lm_task.py -------------------------------------------------------------------------------- /examples/speech_recognition/__init__.py: -------------------------------------------------------------------------------- 1 | from . import criterions, models, tasks # noqa 2 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/norm.yaml: -------------------------------------------------------------------------------- 1 | encoder-embed-norm: True 2 | encoder-no-scale-embedding: True -------------------------------------------------------------------------------- /fairseq/torch_imputer/__init__.py: -------------------------------------------------------------------------------- 1 | from .imputer import imputer_loss, ImputerLoss, best_alignment, ctc_decode 2 | -------------------------------------------------------------------------------- /fairseq/modules/speech_to_text/__init__.py: -------------------------------------------------------------------------------- 1 | from .subsampling import * 2 | from .ctc import * 3 | from .adapter import * -------------------------------------------------------------------------------- /examples/multilingual/data_scripts/utils/strip_sgm.sh: -------------------------------------------------------------------------------- 1 | grep "seg id" | sed 's///g' | sed 's/<\/seg>//g' 2 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel", "cython"] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/local_attn.yaml: -------------------------------------------------------------------------------- 1 | encoder-attention-type: local 2 | hard-mask-window: 0 3 | gauss-mask-sigma: 3 4 | init-mask-weight: 0 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/conformer_wom.yaml: -------------------------------------------------------------------------------- 1 | use-cnn-module: True 2 | cnn-module-kernel: 31 3 | encoder-attention-type: rel_pos 4 | encoder-activation-fn: swish -------------------------------------------------------------------------------- /fairseq/config/model/wav2vec/vq_wav2vec_gumbel.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation: gelu 3 | vq_type: gumbel 4 | vq_depth: 2 5 | combine_groups: true 6 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "fairseq/model_parallel/megatron"] 2 | path = fairseq/model_parallel/megatron 3 | url = https://github.com/ngoyal2707/Megatron-LM 4 | branch = fairseq 5 | -------------------------------------------------------------------------------- /egs/librispeech/asr/pipe.sh: -------------------------------------------------------------------------------- 1 | dir= 2 | 3 | cmd="" 4 | for d in `ls $dir`; do 5 | echo $d 6 | ./run.sh --stage 3 --max_tokens 50000 --infer_parameter "--cal_flops" --exp_name $d 7 | done -------------------------------------------------------------------------------- /egs/aishell/asr/conf/conformer.yaml: -------------------------------------------------------------------------------- 1 | macaron-style: True 2 | use-cnn-module: True 3 | cnn-module-kernel: 15 4 | encoder-attention-type: rel_pos 5 | encoder-activation-fn: swish 6 | layer-padding-mask: True -------------------------------------------------------------------------------- /egs/mustc/asr/conf/conformer.yaml: -------------------------------------------------------------------------------- 1 | macaron-style: True 2 | use-cnn-module: True 3 | cnn-module-kernel: 15 4 | encoder-attention-type: rel_pos 5 | encoder-activation-fn: swish 6 | layer-padding-mask: True -------------------------------------------------------------------------------- /egs/mustc/st/conf/conformer.yaml: -------------------------------------------------------------------------------- 1 | macaron-style: True 2 | use-cnn-module: True 3 | cnn-module-kernel: 15 4 | encoder-attention-type: rel_pos 5 | encoder-activation-fn: swish 6 | layer-padding-mask: True -------------------------------------------------------------------------------- /egs/mustc/st/conf/dynamic.yaml: -------------------------------------------------------------------------------- 1 | compression-metric: threshold 2 | compression-mode: create 3 | compression-layers: 6,9 4 | compression-threshold: 0.99 5 | compression-norm: True 6 | compression-pos: True -------------------------------------------------------------------------------- /egs/aishell/asr/conf/dynamic.yaml: -------------------------------------------------------------------------------- 1 | compression-metric: threshold 2 | compression-mode: create 3 | compression-layers: 6,9 4 | compression-threshold: 0.95 5 | compression-norm: True 6 | compression-pos: True -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/conformer.yaml: -------------------------------------------------------------------------------- 1 | macaron-style: True 2 | use-cnn-module: True 3 | cnn-module-kernel: 15 4 | encoder-attention-type: rel_pos 5 | encoder-activation-fn: swish 6 | layer-padding-mask: True -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/dynamic.yaml: -------------------------------------------------------------------------------- 1 | compression-metric: threshold 2 | compression-mode: create 3 | compression-layers: 6,9 4 | compression-threshold: 0.95 5 | compression-norm: True 6 | compression-pos: True -------------------------------------------------------------------------------- /egs/mustc/asr/conf/rpr.yaml: -------------------------------------------------------------------------------- 1 | encoder-attention-type: rel_pos 2 | 3 | # encoder-attention-type: relative 4 | # decoder-attention-type: relative 5 | # max-encoder-relative-length: 100 6 | # max-decoder-relative-length: 20 -------------------------------------------------------------------------------- /egs/mustc/mt/conf/rpr.yaml: -------------------------------------------------------------------------------- 1 | #encoder-attention-type: rel_selfattn 2 | encoder-attention-type: relative 3 | decoder-attention-type: relative 4 | max-encoder-relative-length: 20 5 | max-decoder-relative-length: 20 6 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/rpr.yaml: -------------------------------------------------------------------------------- 1 | encoder-attention-type: rel_pos 2 | 3 | # encoder-attention-type: relative 4 | # decoder-attention-type: relative 5 | # max-encoder-relative-length: 100 6 | # max-decoder-relative-length: 20 -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/rpr.yaml: -------------------------------------------------------------------------------- 1 | #encoder-attention-type: rel_selfattn 2 | encoder-attention-type: relative 3 | decoder-attention-type: relative 4 | max-encoder-relative-length: 20 5 | max-decoder-relative-length: 20 6 | -------------------------------------------------------------------------------- /fairseq/config/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /fairseq/config/model/wav2vec2/wav2vec2_base.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | quantize_targets: true 4 | final_dim: 256 5 | encoder_layerdrop: 0.05 6 | dropout_input: 0.1 7 | dropout_features: 0.1 8 | feature_grad_mult: 0.1 9 | -------------------------------------------------------------------------------- /fairseq/data/multilingual/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /examples/simultaneous_translation/eval/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /egs/mustc/mt/conf/ctc.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer_ctc 2 | criterion: label_smoothed_cross_entropy_with_ctc 3 | 4 | # ctc-layer: 6 5 | ctc-weight: 0.3 6 | ctc-upsampling-ratio: 3 7 | ctc-out-downsampling: False 8 | ctc-out-downsampling-method: maxpooling -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/ctc.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer_ctc 2 | criterion: label_smoothed_cross_entropy_with_ctc 3 | 4 | # ctc-layer: 6 5 | ctc-weight: 0.3 6 | ctc-upsampling-ratio: 3 7 | ctc-out-downsampling: False 8 | ctc-out-downsampling-method: maxpooling 9 | -------------------------------------------------------------------------------- /examples/rxf/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import rxf_src # noqa 7 | -------------------------------------------------------------------------------- /docs/_static/theme_overrides.css: -------------------------------------------------------------------------------- 1 | .wy-table-responsive table td kbd { 2 | white-space: nowrap; 3 | } 4 | .wy-table-responsive table td { 5 | white-space: normal !important; 6 | } 7 | .wy-table-responsive { 8 | overflow: visible !important; 9 | } 10 | -------------------------------------------------------------------------------- /examples/noisychannel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .rerank_options import * # noqa 7 | -------------------------------------------------------------------------------- /examples/simultaneous_translation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import models # noqa 7 | -------------------------------------------------------------------------------- /fairseq/model_parallel/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | -------------------------------------------------------------------------------- /examples/roberta/commonsense_qa/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import commonsense_qa_task # noqa 7 | -------------------------------------------------------------------------------- /fairseq/model_parallel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import criterions, models, modules # noqa 7 | -------------------------------------------------------------------------------- /scripts/replace_tsv.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | org_tsv=$1 4 | replace_tsv=$2 5 | out_tsv=$3 6 | item=$4 7 | 8 | tmp=$(mktemp -t temp.record.XXXXXX) 9 | 10 | python3 extract_txt_from_tsv.py $replace_tsv $tmp $item 11 | python3 replace_txt_from_tsv.py $org_tsv $out_tsv $tmp $item 12 | -------------------------------------------------------------------------------- /examples/linformer/linformer_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .models import linformer_roberta # noqa 7 | -------------------------------------------------------------------------------- /fairseq/modules/quantization/scalar/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import quantize_model_ # NOQA 7 | -------------------------------------------------------------------------------- /examples/translation_moe/translation_moe_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import translation_moe # noqa 7 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .lightconv_layer import LightconvLayer # noqa 7 | -------------------------------------------------------------------------------- /docs/modules.rst: -------------------------------------------------------------------------------- 1 | Modules 2 | ======= 3 | 4 | Fairseq provides several stand-alone :class:`torch.nn.Module` classes that may 5 | be helpful when implementing a new :class:`~fairseq.models.BaseFairseqModel`. 6 | 7 | .. automodule:: fairseq.modules 8 | :members: 9 | :undoc-members: 10 | -------------------------------------------------------------------------------- /examples/pointer_generator/pointer_generator_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import transformer_pg # noqa 7 | -------------------------------------------------------------------------------- /fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | -------------------------------------------------------------------------------- /fairseq/models/bart/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .dynamicconv_layer import DynamicconvLayer # noqa 7 | -------------------------------------------------------------------------------- /fairseq/modules/quantization/pq/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import SizeTracker, quantize_model_ # NOQA 7 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/rpr.yaml: -------------------------------------------------------------------------------- 1 | encoder-attention-type: rel_pos 2 | 3 | #encoder-attention-type: rel_pos_legacy 4 | #encoder-attention-type: rel_selfattn 5 | #encoder-attention-type: relative 6 | #decoder-attention-type: relative 7 | #max-encoder-relative-length: 100 8 | #max-decoder-relative-length: 20 9 | -------------------------------------------------------------------------------- /examples/roberta/wsc/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import wsc_criterion # noqa 7 | from . import wsc_task # noqa 8 | -------------------------------------------------------------------------------- /examples/truncated_bptt/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import transformer_xl_model, truncated_bptt_lm_task # noqa 7 | -------------------------------------------------------------------------------- /scripts/lcrm_tsv.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | in_tsv=$1 4 | out_tsv=$2 5 | item=$3 6 | 7 | tmp=$(mktemp -t temp.record.XXXXXX) 8 | 9 | python3 extract_txt_from_tsv.py $in_tsv $tmp $item 10 | cat $tmp | python3 lcrm.py > $tmp.lcrm 11 | python3 replace_txt_from_tsv.py $in_tsv $out_tsv $tmp.lcrm $item 12 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/rpr.yaml: -------------------------------------------------------------------------------- 1 | encoder-attention-type: rel_pos 2 | 3 | #encoder-attention-type: rel_pos_legacy 4 | #encoder-attention-type: rel_selfattn 5 | #encoder-attention-type: relative 6 | #decoder-attention-type: relative 7 | #max-encoder-relative-length: 100 8 | #max-decoder-relative-length: 20 9 | -------------------------------------------------------------------------------- /examples/rxf/rxf_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa 7 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | try: 7 | from fairseq.version import __version__ # noqa 8 | except ImportError: 9 | pass 10 | -------------------------------------------------------------------------------- /examples/m2m_100/tokenizers/thirdparty/.gitignore: -------------------------------------------------------------------------------- 1 | seg_my.py 2 | indic_nlp_library/ 3 | indic_nlp_resources/ 4 | kytea/ 5 | mecab-0.996-ko-0.9.2.tar.gz 6 | mecab-0.996-ko-0.9.2/ 7 | mosesdecoder/ 8 | wat2020.my-en.zip 9 | wat2020.my-en/ 10 | wmt16-scripts/ 11 | mecab-ko-dic-2.1.1-20180720/ 12 | mecab-ko-dic-2.1.1-20180720.tar.gz -------------------------------------------------------------------------------- /examples/speech_recognition/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | task_name = file[: file.find(".py")] 8 | importlib.import_module("examples.speech_recognition.tasks." + task_name) 9 | -------------------------------------------------------------------------------- /fairseq/models/wav2vec/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .wav2vec import * # noqa 7 | from .wav2vec2 import * # noqa 8 | from .wav2vec2_asr import * # noqa 9 | -------------------------------------------------------------------------------- /examples/speech_recognition/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .asr_dataset import AsrDataset 7 | 8 | 9 | __all__ = [ 10 | "AsrDataset", 11 | ] 12 | -------------------------------------------------------------------------------- /examples/speech_recognition/models/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | model_name = file[: file.find(".py")] 8 | importlib.import_module("examples.speech_recognition.models." + model_name) 9 | -------------------------------------------------------------------------------- /fairseq/benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | # import models/tasks to register them 7 | from . import dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa 8 | -------------------------------------------------------------------------------- /examples/laser/laser_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .laser_task import * # noqa 7 | from .laser_lstm import * # noqa 8 | from .laser_transformer import * # noqa 9 | -------------------------------------------------------------------------------- /fairseq/modules/quantization/pq/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .qconv import PQConv2d # NOQA 7 | from .qemb import PQEmbedding # NOQA 8 | from .qlinear import PQLinear # NOQA 9 | -------------------------------------------------------------------------------- /egs/wmt16/mt/local/lower_rm.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import string 3 | 4 | 5 | in_file = sys.argv[1] 6 | 7 | with open(in_file, "r", encoding="utf-8") as f: 8 | for line in f.readlines(): 9 | line = line.strip().lower() 10 | for w in string.punctuation: 11 | line = line.replace(w, "") 12 | line = line.replace(" ", "") 13 | print(line) 14 | 15 | -------------------------------------------------------------------------------- /examples/fast_noisy_channel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import noisy_channel_translation # noqa 7 | from . import noisy_channel_sequence_generator # noqa 8 | from . import noisy_channel_beam_search # noqa 9 | -------------------------------------------------------------------------------- /fairseq/dataclass/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .configs import FairseqDataclass 7 | from .constants import ChoiceEnum 8 | 9 | 10 | __all__ = [ 11 | "FairseqDataclass", 12 | "ChoiceEnum", 13 | ] 14 | -------------------------------------------------------------------------------- /fairseq/config/config.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | hydra: 4 | run: 5 | dir: . 6 | 7 | defaults: 8 | - task: null 9 | - model: null 10 | - criterion: cross_entropy 11 | - optimizer: null 12 | - lr_scheduler: fixed 13 | - bpe: null 14 | - tokenizer: null 15 | - scoring: null 16 | - generation: null 17 | - common_eval: null 18 | - eval_lm: null 19 | -------------------------------------------------------------------------------- /scripts/lcrm.py: -------------------------------------------------------------------------------- 1 | import string 2 | import sys 3 | 4 | punctuation_str = string.punctuation 5 | punctuation_str = punctuation_str.replace("'", "") 6 | 7 | user_input = sys.stdin.readlines() 8 | for line in user_input: 9 | line = line.strip().lower() 10 | for w in punctuation_str: 11 | line = line .replace(w, "") 12 | line = " ".join(line.split(" ")) 13 | 14 | print(line) 15 | -------------------------------------------------------------------------------- /examples/m2m_100/tokenizers/tokenize_zh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | 8 | import fileinput 9 | 10 | import sacrebleu 11 | 12 | 13 | for line in fileinput.input(): 14 | print(sacrebleu.tokenize_zh(line)) 15 | -------------------------------------------------------------------------------- /scripts/mecab_token.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import MeCab 3 | import sys 4 | 5 | fin = sys.argv[1] 6 | fout = sys.argv[2] 7 | 8 | fw = open(fout, "w") 9 | 10 | wakati = MeCab.Tagger("-Owakati") 11 | 12 | with open(fin, "r") as fr: 13 | for line in fr.readlines(): 14 | token_line = wakati.parse(line.strip()).split() 15 | fw.write(" ".join(token_line) + "\n") 16 | 17 | fw.close() 18 | -------------------------------------------------------------------------------- /examples/m2m_100/tokenizers/tokenize_thai.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import sys 8 | 9 | from pythainlp import word_tokenize 10 | 11 | 12 | for line in sys.stdin: 13 | print(" ".join(word_tokenize(line.strip()))) 14 | -------------------------------------------------------------------------------- /fairseq/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | from .model_camembert import * # noqa 9 | from .model_gottbert import * # noqa 10 | from .model_xlmr import * # noqa 11 | -------------------------------------------------------------------------------- /fairseq/modules/quantization/scalar/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .qact import ActivationQuantizer # NOQA 7 | from .qconv import IntConv2d # NOQA 8 | from .qemb import IntEmbedding # NOQA 9 | from .qlinear import IntLinear # NOQA 10 | -------------------------------------------------------------------------------- /fairseq/tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | 9 | SPACE_NORMALIZER = re.compile(r"\s+") 10 | 11 | 12 | def tokenize_line(line): 13 | line = SPACE_NORMALIZER.sub(" ", line) 14 | line = line.strip() 15 | return line.split() 16 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead. 8 | """ 9 | 10 | from fairseq_cli.train import cli_main 11 | 12 | 13 | if __name__ == "__main__": 14 | cli_main() 15 | -------------------------------------------------------------------------------- /scripts/token.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | lang=$1 5 | in_file=$2 6 | out_file=$3 7 | 8 | export PATH=$PATH:$PWD 9 | 10 | if [[ $lang == "ja" ]] ; then 11 | cmd="mecab_token.py $in_file $out_file" 12 | elif [[ $lang == "zh" ]] ; then 13 | cmd="python3 -m jieba -d ' ' $in_file > $out_file" 14 | else 15 | cmd="tokenizer.perl -l ${lang} --threads 32 -no-escape < ${in_file} > ${out_file}" 16 | fi 17 | echo $cmd 18 | eval $cmd 19 | -------------------------------------------------------------------------------- /examples/latent_depth/latent_depth_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import multilingual_translation_latent_depth # noqa 7 | from .loss import latent_depth # noqa 8 | from .models import latent_multilingual_transformer # noqa 9 | from .modules import latent_layers # noqa 10 | -------------------------------------------------------------------------------- /entry.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | THIS_DIR="$( cd "$( dirname "$0" )" && pwd )" 4 | cd ${THIS_DIR} 5 | export ST_ROOT=/xuchen/st 6 | export NCCL_DEBUG=INFO 7 | 8 | echo "nameserver 114.114.114.114" >> /etc/resolv.conf 9 | 10 | if [[ `pip list | grep fairseq | wc -l` -eq 0 ]]; then 11 | echo "default stage: env configure" 12 | pip3 install -e . 13 | fi 14 | 15 | shell_script=$1 16 | shift 17 | cd `dirname ${shell_script}` 18 | echo $@ 19 | bash ${shell_script} "$@" -------------------------------------------------------------------------------- /examples/m2m_100/tokenizers/seg_ja.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | SCRIPT=`realpath $0` 7 | KYTEA=`dirname $SCRIPT`/thirdparty/kytea 8 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$KYTEA/lib:/usr/local/lib 9 | export PATH=$PATH:"$KYTEA/bin" 10 | 11 | cat - | tr -d "[:blank:]" | kytea -notags 12 | -------------------------------------------------------------------------------- /scripts/pipeline_token.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | splits=(train dev tst-COMMON) 5 | langs=(en zh) 6 | dir=$1 7 | cd $dir 8 | 9 | export PATH=$PATH:/root/st/Fairseq-S2T/scripts 10 | 11 | for split in ${splits[@]}; do 12 | for lang in ${langs[@]}; do 13 | in=$split/txt/${split}.${lang} 14 | out=$split/txt/${split}.tok.$lang 15 | cmd="token.sh $lang $in $out" 16 | echo $cmd 17 | eval $cmd 18 | done 19 | done 20 | -------------------------------------------------------------------------------- /examples/m2m_100/tokenizers/seg_ko.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | SCRIPT=`realpath $0` 7 | MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2 8 | 9 | export PATH=$PATH:"$MECAB/bin":"$MECAB/lib" 10 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib" 11 | 12 | cat - | mecab -O wakati 13 | -------------------------------------------------------------------------------- /scripts/lowercase.perl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | # 3 | # This file is part of moses. Its use is licensed under the GNU Lesser General 4 | # Public License version 2.1 or, at your option, any later version. 5 | 6 | use warnings; 7 | use strict; 8 | 9 | while (@ARGV) { 10 | $_ = shift; 11 | /^-b$/ && ($| = 1, next); # not buffered (flush each line) 12 | } 13 | 14 | binmode(STDIN, ":utf8"); 15 | binmode(STDOUT, ":utf8"); 16 | 17 | while() { 18 | print lc($_); 19 | } 20 | -------------------------------------------------------------------------------- /fairseq/data/num_samples_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqDataset 7 | 8 | 9 | class NumSamplesDataset(FairseqDataset): 10 | def __getitem__(self, index): 11 | return 1 12 | 13 | def __len__(self): 14 | return 0 15 | 16 | def collater(self, samples): 17 | return sum(samples) 18 | -------------------------------------------------------------------------------- /fairseq/config/model/wav2vec2/wav2vec2_large.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | quantize_targets: true 4 | extractor_mode: layer_norm 5 | layer_norm_first: true 6 | final_dim: 768 7 | latent_temp: [2.0,0.1,0.999995] 8 | encoder_layerdrop: 0.0 9 | dropout_input: 0.0 10 | dropout_features: 0.0 11 | dropout: 0.0 12 | attention_dropout: 0.0 13 | conv_bias: true 14 | 15 | encoder_layers: 24 16 | encoder_embed_dim: 1024 17 | encoder_ffn_embed_dim: 4096 18 | encoder_attention_heads: 16 19 | 20 | feature_grad_mult: 1.0 21 | -------------------------------------------------------------------------------- /scripts/spm_train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from __future__ import absolute_import, division, print_function, unicode_literals 9 | 10 | import sys 11 | 12 | import sentencepiece as spm 13 | 14 | 15 | if __name__ == "__main__": 16 | spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:])) 17 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/ipa.yaml: -------------------------------------------------------------------------------- 1 | inter-mixup: True 2 | inter-mixup-layer: -1 3 | inter-mixup-decoder-layer: 0 4 | inter-mixup-prob: 1.0 5 | inter-mixup-ratio: 1.0 6 | inter-mixup-beta: 0.2 7 | inter-mixup-keep-org: False 8 | inter-mixup-decoder-emb: False 9 | ctc-mixup-consistent-weight: 0 10 | inter-ctc-mixup-consistent-weight: 0 11 | mixup-consistent-weight: 0 12 | cal-mixup-loss: True 13 | no-specaugment: False 14 | layer-out-norm: False 15 | 16 | inter-mixup-ratio-decay: False 17 | inter-mixup-ratio-decay-params: 20000,40000,0 -------------------------------------------------------------------------------- /egs/mustc/asr/conf/ipa.yaml: -------------------------------------------------------------------------------- 1 | inter-mixup: True 2 | inter-mixup-layer: -1 3 | inter-mixup-decoder-layer: 0 4 | inter-mixup-prob: 1.0 5 | inter-mixup-ratio: 1.0 6 | inter-mixup-beta: 0.2 7 | inter-mixup-keep-org: False 8 | inter-mixup-decoder-emb: False 9 | ctc-mixup-consistent-weight: 0 10 | inter-ctc-mixup-consistent-weight: 0 11 | mixup-consistent-weight: 0 12 | cal-mixup-loss: True 13 | no-specaugment: False 14 | layer-out-norm: False 15 | 16 | inter-mixup-ratio-decay: False 17 | inter-mixup-ratio-decay-params: 20000,40000,0 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/ipa.yaml: -------------------------------------------------------------------------------- 1 | inter-mixup: True 2 | inter-mixup-layer: -1 3 | inter-mixup-decoder-layer: 0 4 | inter-mixup-prob: 1.0 5 | inter-mixup-ratio: 1.0 6 | inter-mixup-beta: 0.2 7 | inter-mixup-keep-org: False 8 | inter-mixup-decoder-emb: False 9 | ctc-mixup-consistent-weight: 0 10 | inter-ctc-mixup-consistent-weight: 0 11 | mixup-consistent-weight: 0 12 | cal-mixup-loss: True 13 | no-specaugment: False 14 | layer-out-norm: False 15 | 16 | inter-mixup-ratio-decay: False 17 | inter-mixup-ratio-decay-params: 20000,40000,0 -------------------------------------------------------------------------------- /egs/mustc/asr/local/cal_wer.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | infer_dir=$1 6 | tag=$2 7 | s2s_infer_file=${infer_dir}/$3 8 | org_ctc_infer_file=${infer_dir}/$4 9 | ref=$5 10 | 11 | idx=${infer_dir}/${tag}_idx 12 | ctc_infer=${infer_dir}/${tag}_ctc_infer 13 | ctc_infer_sort=${infer_dir}/${tag}_ctc_infer_sort 14 | 15 | cut -f1 ${s2s_infer_file} > ${idx} 16 | paste ${idx} ${org_ctc_infer_file} > ${ctc_infer} 17 | sort -n -t $'\t' ${ctc_infer} | cut -f2 > ${ctc_infer_sort} 18 | python3 ./cal_wer.py ${ref} ${ctc_infer_sort} -------------------------------------------------------------------------------- /egs/mustc/st/conf/inter.yaml: -------------------------------------------------------------------------------- 1 | inter-ctc-weight: 0.2 2 | inter-ctc-layers: 6,9 3 | share-inter-ctc: True 4 | 5 | ctc-pae: none 6 | # ctc-pae: inter_league 7 | 8 | # ctc-pae-ground-truth-ratio: 0.1 9 | # pae-gumbel: True 10 | # pae-distribution-hard: True 11 | # pae-drop-prob: 0.0 12 | # pae-distribution-cutoff: 10 13 | # share-pae-and-ctc: True 14 | # pae-embed-norm: True 15 | # pae-out-norm: True 16 | 17 | # ctc-self-distill-weight: 1 18 | # target-ctc-self-distill-weight: 1 19 | # ctc-self-distill-prob: 0.1 20 | # cal-all-ctc: True -------------------------------------------------------------------------------- /egs/mustc/st/local/cal_wer.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | infer_dir=$1 6 | tag=$2 7 | s2s_infer_file=${infer_dir}/$3 8 | org_ctc_infer_file=${infer_dir}/$4 9 | ref=$5 10 | 11 | idx=${infer_dir}/${tag}_idx 12 | ctc_infer=${infer_dir}/${tag}_ctc_infer 13 | ctc_infer_sort=${infer_dir}/${tag}_ctc_infer_sort 14 | 15 | cut -f1 ${s2s_infer_file} > ${idx} 16 | paste ${idx} ${org_ctc_infer_file} > ${ctc_infer} 17 | sort -n -t $'\t' ${ctc_infer} | cut -f2 > ${ctc_infer_sort} 18 | python3 ./cal_wer.py ${ref} ${ctc_infer_sort} -------------------------------------------------------------------------------- /fairseq/data/id_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class IdDataset(FairseqDataset): 12 | def __getitem__(self, index): 13 | return index 14 | 15 | def __len__(self): 16 | return 0 17 | 18 | def collater(self, samples): 19 | return torch.tensor(samples) 20 | -------------------------------------------------------------------------------- /egs/aishell/asr/local/cal_wer.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | infer_dir=$1 6 | tag=$2 7 | s2s_infer_file=${infer_dir}/$3 8 | org_ctc_infer_file=${infer_dir}/$4 9 | ref=$5 10 | 11 | idx=${infer_dir}/${tag}_idx 12 | ctc_infer=${infer_dir}/${tag}_ctc_infer 13 | ctc_infer_sort=${infer_dir}/${tag}_ctc_infer_sort 14 | 15 | cut -f1 ${s2s_infer_file} > ${idx} 16 | paste ${idx} ${org_ctc_infer_file} > ${ctc_infer} 17 | sort -n -t $'\t' ${ctc_infer} | cut -f2 > ${ctc_infer_sort} 18 | python3 ./cal_wer.py ${ref} ${ctc_infer_sort} -------------------------------------------------------------------------------- /egs/mustc/asr/conf/inter.yaml: -------------------------------------------------------------------------------- 1 | inter-ctc-weight: 0.2 2 | inter-ctc-layers: 6,9 3 | share-inter-ctc: True 4 | 5 | ctc-pae: none 6 | # ctc-pae: inter_league 7 | 8 | # ctc-pae-ground-truth-ratio: 0.1 9 | # pae-gumbel: True 10 | # pae-distribution-hard: True 11 | # pae-drop-prob: 0.0 12 | # pae-distribution-cutoff: 10 13 | # share-pae-and-ctc: True 14 | # pae-embed-norm: True 15 | # pae-out-norm: True 16 | 17 | # ctc-self-distill-weight: 1 18 | # target-ctc-self-distill-weight: 1 19 | # ctc-self-distill-prob: 0.1 20 | # cal-all-ctc: True -------------------------------------------------------------------------------- /fairseq/data/offset_tokens_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class OffsetTokensDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, offset): 11 | super().__init__(dataset) 12 | self.offset = offset 13 | 14 | def __getitem__(self, idx): 15 | return self.dataset[idx] + self.offset 16 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/ipa.yaml: -------------------------------------------------------------------------------- 1 | inter-mixup: True 2 | 3 | inter-mixup-layer: -1 4 | inter-mixup-decoder-layer: 0 5 | inter-mixup-prob: 1.0 6 | inter-mixup-ratio: 1.0 7 | inter-mixup-beta: 0.2 8 | 9 | inter-mixup-keep-org: False 10 | inter-mixup-decoder-emb: False 11 | 12 | ctc-mixup-consistent-weight: 0 13 | inter-ctc-mixup-consistent-weight: 0 14 | mixup-consistent-weight: 0 15 | 16 | cal-mixup-loss: True 17 | no-specaugment: False 18 | layer-out-norm: False 19 | 20 | inter-mixup-ratio-decay: False 21 | inter-mixup-ratio-decay-params: 20000,40000,0 -------------------------------------------------------------------------------- /fairseq/data/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .block_pair_dataset import BlockPairDataset 7 | from .masked_lm_dataset import MaskedLMDataset 8 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary 9 | 10 | 11 | __all__ = [ 12 | "BertDictionary", 13 | "BlockPairDataset", 14 | "MaskedLMDataset", 15 | "MaskedLMDictionary", 16 | ] 17 | -------------------------------------------------------------------------------- /fairseq/modules/grad_multiply.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class GradMultiply(torch.autograd.Function): 10 | @staticmethod 11 | def forward(ctx, x, scale): 12 | ctx.scale = scale 13 | res = x.new(x) 14 | return res 15 | 16 | @staticmethod 17 | def backward(ctx, grad): 18 | return grad * ctx.scale, None 19 | -------------------------------------------------------------------------------- /fairseq/models/nat/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | from .fairseq_nat_model import * 8 | from .nonautoregressive_transformer import * 9 | from .nat_crf_transformer import * 10 | from .iterative_nonautoregressive_transformer import * 11 | from .cmlm_transformer import * 12 | from .levenshtein_transformer import * 13 | from .insertion_transformer import * 14 | -------------------------------------------------------------------------------- /scripts/compound_split_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ]; then 4 | echo "usage: $0 GENERATE_PY_OUTPUT" 5 | exit 1 6 | fi 7 | 8 | GEN=$1 9 | 10 | SYS=$GEN.sys 11 | REF=$GEN.ref 12 | 13 | if [ $(tail -n 1 $GEN | grep BLEU | wc -l) -ne 1 ]; then 14 | echo "not done generating" 15 | exit 16 | fi 17 | 18 | grep ^H $GEN | awk -F '\t' '{print $NF}' | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $SYS 19 | grep ^T $GEN | cut -f2- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $REF 20 | fairseq-score --sys $SYS --ref $REF 21 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/inter.yaml: -------------------------------------------------------------------------------- 1 | inter-ctc-weight: 0.2 2 | inter-ctc-layers: 6,9 3 | inter-ctc-drop-prob: 0 4 | share-inter-ctc: True 5 | 6 | ctc-pae: none 7 | # ctc-pae: inter_league 8 | 9 | # ctc-pae-ground-truth-ratio: 0.1 10 | # pae-gumbel: True 11 | # pae-distribution-hard: True 12 | # pae-drop-prob: 0.0 13 | # pae-distribution-cutoff: 10 14 | # share-pae-and-ctc: True 15 | # pae-embed-norm: True 16 | # pae-out-norm: True 17 | 18 | # ctc-self-distill-weight: 1 19 | # target-ctc-self-distill-weight: 1 20 | # ctc-self-distill-prob: 0.1 21 | # cal-all-ctc: True -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/inter.yaml: -------------------------------------------------------------------------------- 1 | inter-ctc-weight: 0.2 2 | inter-ctc-layers: 6,9 3 | inter-ctc-drop-prob: 0 4 | share-inter-ctc: True 5 | 6 | ctc-pae: none 7 | # ctc-pae: inter_league 8 | 9 | # ctc-pae-ground-truth-ratio: 0.1 10 | # pae-gumbel: True 11 | # pae-distribution-hard: True 12 | # pae-drop-prob: 0.0 13 | # pae-distribution-cutoff: 10 14 | # share-pae-and-ctc: True 15 | # pae-embed-norm: True 16 | # pae-out-norm: True 17 | 18 | # ctc-self-distill-weight: 1 19 | # target-ctc-self-distill-weight: 1 20 | # ctc-self-distill-prob: 0.1 21 | # cal-all-ctc: True -------------------------------------------------------------------------------- /scripts/pipe_replace_tsv.sh: -------------------------------------------------------------------------------- 1 | dir=/xuchen/st/data/must_c/en-$1 2 | 3 | org_dir=$dir/st_tok.bak/ 4 | replace_dir=$dir/st_tok/ 5 | out_dir=$dir/st_tok/ 6 | item=audio 7 | 8 | cp $org_dir/spm* $org_dir/config* $out_dir 9 | sed -i "s#/mnt/bn/nas-xc-1#/xuchen/st#g" $out_dir/config* 10 | 11 | tsv=train.tsv 12 | ./replace_tsv.sh $org_dir/$tsv $replace_dir/$tsv $out_dir/$tsv $item 13 | tsv=dev.tsv 14 | ./replace_tsv.sh $org_dir/$tsv $replace_dir/$tsv $out_dir/$tsv $item 15 | tsv=tst-COMMON.tsv 16 | ./replace_tsv.sh $org_dir/$tsv $replace_dir/$tsv $out_dir/$tsv $item 17 | 18 | -------------------------------------------------------------------------------- /egs/librispeech/asr/local/cal_wer.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | infer_dir=$1 6 | tag=$2 7 | s2s_infer_file=${infer_dir}/$3 8 | org_ctc_infer_file=${infer_dir}/$4 9 | ref=$5 10 | 11 | idx=${infer_dir}/${tag}_idx 12 | ctc_infer=${infer_dir}/${tag}_ctc_infer 13 | ctc_infer_sort=${infer_dir}/${tag}_ctc_infer_sort 14 | 15 | cut -f1 ${s2s_infer_file} > ${idx} 16 | paste ${idx} ${org_ctc_infer_file} > ${ctc_infer} 17 | sort -n -t $'\t' ${ctc_infer} | cut -f2 > ${ctc_infer_sort} 18 | cmd="python3 ./cal_wer.py ${ref} ${ctc_infer_sort}" 19 | echo $cmd 20 | eval $cmd 21 | -------------------------------------------------------------------------------- /examples/multilingual/ML50_langs.txt: -------------------------------------------------------------------------------- 1 | ar_AR 2 | cs_CZ 3 | de_DE 4 | en_XX 5 | es_XX 6 | et_EE 7 | fi_FI 8 | fr_XX 9 | gu_IN 10 | hi_IN 11 | it_IT 12 | ja_XX 13 | kk_KZ 14 | ko_KR 15 | lt_LT 16 | lv_LV 17 | my_MM 18 | ne_NP 19 | nl_XX 20 | ro_RO 21 | ru_RU 22 | si_LK 23 | tr_TR 24 | vi_VN 25 | zh_CN 26 | af_ZA 27 | az_AZ 28 | bn_IN 29 | fa_IR 30 | he_IL 31 | hr_HR 32 | id_ID 33 | ka_GE 34 | km_KH 35 | mk_MK 36 | ml_IN 37 | mn_MN 38 | mr_IN 39 | pl_PL 40 | ps_AF 41 | pt_XX 42 | sv_SE 43 | sw_KE 44 | ta_IN 45 | te_IN 46 | th_TH 47 | tl_XX 48 | uk_UA 49 | ur_PK 50 | xh_ZA 51 | gl_ES 52 | sl_SI -------------------------------------------------------------------------------- /examples/simultaneous_translation/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | for file in os.listdir(os.path.dirname(__file__)): 11 | if file.endswith(".py") and not file.startswith("_"): 12 | model_name = file[: file.find(".py")] 13 | importlib.import_module( 14 | "examples.simultaneous_translation.models." + model_name 15 | ) 16 | -------------------------------------------------------------------------------- /fairseq/data/roll_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class RollDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, shifts): 13 | super().__init__(dataset) 14 | self.shifts = shifts 15 | 16 | def __getitem__(self, index): 17 | item = self.dataset[index] 18 | return torch.roll(item, self.shifts) 19 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/aipa.yaml: -------------------------------------------------------------------------------- 1 | # Append-based Interpolation Augmentation 2 | inter-mixup: True 3 | 4 | inter-mixup-layer: -1 5 | inter-mixup-decoder-layer: 0 6 | inter-mixup-prob: 1.0 7 | inter-mixup-ratio: 1.0 8 | inter-mixup-beta: 0.2 9 | 10 | inter-mixup-keep-org: True 11 | inter-mixup-decoder-emb: True 12 | 13 | ctc-mixup-consistent-weight: 0 14 | inter-ctc-mixup-consistent-weight: 0 15 | mixup-consistent-weight: 0 16 | 17 | cal-mixup-loss: True 18 | no-specaugment: False 19 | layer-out-norm: False 20 | 21 | inter-mixup-ratio-decay: False 22 | inter-mixup-ratio-decay-params: 20000,40000,0 -------------------------------------------------------------------------------- /examples/speech_recognition/hydra/conf/infer.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | defaults: 4 | - task: null 5 | - model: null 6 | 7 | hydra: 8 | run: 9 | dir: ${common_eval.results_path}/${dataset.gen_subset} 10 | sweep: 11 | dir: ${common_eval.results_path} 12 | subdir: ${dataset.gen_subset} 13 | common_eval: 14 | results_path: ${decoding.exp_dir}/decode/${decoding.decoder.name} 15 | path: ${decoding.exp_dir}/checkpoint_best.pt 16 | post_process: letter 17 | generation: 18 | nbest: 1 19 | beam: 500 20 | dataset: 21 | max_tokens: 1000000 22 | gen_subset: test 23 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/aipa_kd.yaml: -------------------------------------------------------------------------------- 1 | # Append-based Interpolation Augmentation 2 | inter-mixup: True 3 | 4 | inter-mixup-layer: -1 5 | inter-mixup-decoder-layer: 0 6 | inter-mixup-prob: 1.0 7 | inter-mixup-ratio: 1.0 8 | inter-mixup-beta: 0.2 9 | 10 | inter-mixup-keep-org: True 11 | inter-mixup-decoder-emb: True 12 | 13 | ctc-mixup-consistent-weight: 0.15 14 | inter-ctc-mixup-consistent-weight: 0.1 15 | mixup-consistent-weight: 0.5 16 | 17 | cal-mixup-loss: True 18 | no-specaugment: False 19 | layer-out-norm: False 20 | 21 | inter-mixup-ratio-decay: False 22 | inter-mixup-ratio-decay-params: 20000,40000,0 -------------------------------------------------------------------------------- /fairseq/model_parallel/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the criterions/ directory 11 | for file in os.listdir(os.path.dirname(__file__)): 12 | if file.endswith(".py") and not file.startswith("_"): 13 | module = file[: file.find(".py")] 14 | importlib.import_module("fairseq.model_parallel.criterions." + module) 15 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/pds_base.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | share-decoder-input-output-embed: True 4 | optimizer: adam 5 | clip-norm: 10.0 6 | lr-scheduler: inverse_sqrt 7 | warmup-init-lr: 1e-7 8 | warmup-updates: 10000 9 | lr: 2e-3 10 | adam_betas: (0.9,0.98) 11 | 12 | criterion: label_smoothed_cross_entropy_with_ctc 13 | label_smoothing: 0.1 14 | 15 | dropout: 0.1 16 | activation-fn: relu 17 | encoder-ffn-embed-dim: 2048 18 | encoder-layers: 12 19 | decoder-layers: 6 20 | encoder-attention-heads: 4 21 | 22 | decoder-embed-dim: 256 23 | decoder-ffn-embed-dim: 2048 24 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | share-decoder-input-output-embed: True 4 | optimizer: adam 5 | clip-norm: 10.0 6 | lr-scheduler: inverse_sqrt 7 | warmup-init-lr: 1e-7 8 | warmup-updates: 10000 9 | lr: 2e-3 10 | adam_betas: (0.9,0.98) 11 | 12 | criterion: label_smoothed_cross_entropy_with_ctc 13 | label_smoothing: 0.1 14 | 15 | dropout: 0.1 16 | activation-fn: relu 17 | encoder-ffn-embed-dim: 2048 18 | encoder-layers: 12 19 | decoder-layers: 6 20 | encoder-attention-heads: 4 21 | 22 | decoder-embed-dim: 256 23 | decoder-ffn-embed-dim: 2048 24 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /examples/simultaneous_translation/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the criterions/ directory 11 | for file in os.listdir(os.path.dirname(__file__)): 12 | if file.endswith(".py") and not file.startswith("_"): 13 | module = file[: file.find(".py")] 14 | importlib.import_module("examples.simultaneous_translation.utils." + module) 15 | -------------------------------------------------------------------------------- /examples/speech_recognition/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | # ASG loss requires flashlight bindings 6 | files_to_skip = set() 7 | try: 8 | import flashlight.lib.sequence.criterion 9 | except ImportError: 10 | files_to_skip.add("ASG_loss.py") 11 | 12 | for file in os.listdir(os.path.dirname(__file__)): 13 | if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip: 14 | criterion_name = file[: file.find(".py")] 15 | importlib.import_module( 16 | "examples.speech_recognition.criterions." + criterion_name 17 | ) 18 | -------------------------------------------------------------------------------- /fairseq/models/speech_to_text/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .berard import * # noqa 7 | from .convtransformer import * # noqa 8 | from .s2t_transformer import * # noqa 9 | from .pdss2t_transformer import * # noqa 10 | from .s2t_sate import * # noqa 11 | from .s2t_dual import * # noqa 12 | from .s2t_ctc import * 13 | from .s2t_multibranch import * 14 | from .s2t_dynamic_transformer import * 15 | from .s2t_w2v2_transformer import * 16 | -------------------------------------------------------------------------------- /scripts/sacrebleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 4 ]; then 4 | echo "usage: $0 TESTSET SRCLANG TGTLANG GEN" 5 | exit 1 6 | fi 7 | 8 | TESTSET=$1 9 | SRCLANG=$2 10 | TGTLANG=$3 11 | 12 | GEN=$4 13 | 14 | if ! command -v sacremoses &> /dev/null 15 | then 16 | echo "sacremoses could not be found, please install with: pip install sacremoses" 17 | exit 18 | fi 19 | 20 | grep ^H $GEN \ 21 | | sed 's/^H\-//' \ 22 | | sort -n -k 1 \ 23 | | cut -f 3 \ 24 | | sacremoses detokenize \ 25 | > $GEN.sorted.detok 26 | 27 | sacrebleu --test-set $TESTSET --language-pair "${SRCLANG}-${TGTLANG}" < $GEN.sorted.detok 28 | -------------------------------------------------------------------------------- /fairseq/model_parallel/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | from .multihead_attention import ModelParallelMultiheadAttention 8 | from .transformer_layer import ( 9 | ModelParallelTransformerEncoderLayer, 10 | ModelParallelTransformerDecoderLayer, 11 | ) 12 | 13 | __all__ = [ 14 | "ModelParallelMultiheadAttention", 15 | "ModelParallelTransformerEncoderLayer", 16 | "ModelParallelTransformerDecoderLayer", 17 | ] 18 | -------------------------------------------------------------------------------- /egs/mustc/asr/local/cal_ctc_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | infer_dir=$1 6 | tag=$2 7 | s2s_infer_file=${infer_dir}/$3 8 | org_ctc_infer_file=${infer_dir}/$4 9 | ref=$5 10 | tokenizer=$6 11 | lang=$7 12 | 13 | idx=${infer_dir}/${tag}_idx 14 | ctc_infer=${infer_dir}/${tag}_ctc_infer 15 | ctc_infer_sort=${infer_dir}/${tag}_ctc_infer_sort 16 | 17 | if [[ ! -f ${ctc_infer_sort} ]]; then 18 | cut -f1 ${s2s_infer_file} > ${idx} 19 | paste ${idx} ${org_ctc_infer_file} > ${ctc_infer} 20 | sort -n -t $'\t' ${ctc_infer} | cut -f2 > ${ctc_infer_sort} 21 | fi 22 | 23 | gen=${ctc_infer_sort} 24 | ./cal_bleu.sh ${ref} ${gen} ${tokenizer} ${lang} -------------------------------------------------------------------------------- /egs/mustc/st/local/cal_ctc_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | infer_dir=$1 6 | tag=$2 7 | s2s_infer_file=${infer_dir}/$3 8 | org_ctc_infer_file=${infer_dir}/$4 9 | ref=$5 10 | tokenizer=$6 11 | lang=$7 12 | 13 | idx=${infer_dir}/${tag}_idx 14 | ctc_infer=${infer_dir}/${tag}_ctc_infer 15 | ctc_infer_sort=${infer_dir}/${tag}_ctc_infer_sort 16 | 17 | if [[ ! -f ${ctc_infer_sort} ]]; then 18 | cut -f1 ${s2s_infer_file} > ${idx} 19 | paste ${idx} ${org_ctc_infer_file} > ${ctc_infer} 20 | sort -n -t $'\t' ${ctc_infer} | cut -f2 > ${ctc_infer_sort} 21 | fi 22 | 23 | gen=${ctc_infer_sort} 24 | ./cal_bleu.sh ${ref} ${gen} ${tokenizer} ${lang} -------------------------------------------------------------------------------- /egs/aishell/asr/local/cal_ctc_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | infer_dir=$1 6 | tag=$2 7 | s2s_infer_file=${infer_dir}/$3 8 | org_ctc_infer_file=${infer_dir}/$4 9 | ref=$5 10 | tokenizer=$6 11 | lang=$7 12 | 13 | idx=${infer_dir}/${tag}_idx 14 | ctc_infer=${infer_dir}/${tag}_ctc_infer 15 | ctc_infer_sort=${infer_dir}/${tag}_ctc_infer_sort 16 | 17 | if [[ ! -f ${ctc_infer_sort} ]]; then 18 | cut -f1 ${s2s_infer_file} > ${idx} 19 | paste ${idx} ${org_ctc_infer_file} > ${ctc_infer} 20 | sort -n -t $'\t' ${ctc_infer} | cut -f2 > ${ctc_infer_sort} 21 | fi 22 | 23 | gen=${ctc_infer_sort} 24 | ./cal_bleu.sh ${ref} ${gen} ${tokenizer} ${lang} -------------------------------------------------------------------------------- /fairseq/modules/same_pad.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from torch import nn 8 | 9 | 10 | class SamePad(nn.Module): 11 | def __init__(self, kernel_size, causal=False): 12 | super().__init__() 13 | if causal: 14 | self.remove = kernel_size - 1 15 | else: 16 | self.remove = 1 if kernel_size % 2 == 0 else 0 17 | 18 | def forward(self, x): 19 | if self.remove > 0: 20 | x = x[:, :, : -self.remove] 21 | return x 22 | -------------------------------------------------------------------------------- /fairseq/modules/transpose_last.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | transpose last 2 dimensions of the input 7 | """ 8 | 9 | import torch.nn as nn 10 | 11 | 12 | class TransposeLast(nn.Module): 13 | def __init__(self, deconstruct_idx=None): 14 | super().__init__() 15 | self.deconstruct_idx = deconstruct_idx 16 | 17 | def forward(self, x): 18 | if self.deconstruct_idx is not None: 19 | x = x[self.deconstruct_idx] 20 | return x.transpose(-2, -1) 21 | -------------------------------------------------------------------------------- /egs/librispeech/asr/local/cal_ctc_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | infer_dir=$1 6 | tag=$2 7 | s2s_infer_file=${infer_dir}/$3 8 | org_ctc_infer_file=${infer_dir}/$4 9 | ref=$5 10 | tokenizer=$6 11 | lang=$7 12 | 13 | idx=${infer_dir}/${tag}_idx 14 | ctc_infer=${infer_dir}/${tag}_ctc_infer 15 | ctc_infer_sort=${infer_dir}/${tag}_ctc_infer_sort 16 | 17 | if [[ ! -f ${ctc_infer_sort} ]]; then 18 | cut -f1 ${s2s_infer_file} > ${idx} 19 | paste ${idx} ${org_ctc_infer_file} > ${ctc_infer} 20 | sort -n -t $'\t' ${ctc_infer} | cut -f2 > ${ctc_infer_sort} 21 | fi 22 | 23 | gen=${ctc_infer_sort} 24 | ./cal_bleu.sh ${ref} ${gen} ${tokenizer} ${lang} -------------------------------------------------------------------------------- /fairseq/data/raw_label_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class RawLabelDataset(FairseqDataset): 12 | def __init__(self, labels): 13 | super().__init__() 14 | self.labels = labels 15 | 16 | def __getitem__(self, index): 17 | return self.labels[index] 18 | 19 | def __len__(self): 20 | return len(self.labels) 21 | 22 | def collater(self, samples): 23 | return torch.tensor(samples) 24 | -------------------------------------------------------------------------------- /examples/simultaneous_translation/eval/scorers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | 11 | 12 | (build_scorer, register_scorer, SCORER_REGISTRIES, _) = registry.setup_registry( 13 | "--scorer-type" 14 | ) 15 | 16 | for file in os.listdir(os.path.dirname(__file__)): 17 | if file.endswith(".py") and not file.startswith("_"): 18 | module = file[: file.find(".py")] 19 | importlib.import_module("scorers." + module) 20 | -------------------------------------------------------------------------------- /fairseq/modules/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn as nn 3 | 4 | 5 | class Transpose(nn.Module): 6 | 7 | def __init__(self, dim0, dim1): 8 | super(Transpose, self).__init__() 9 | self.dim0 = dim0 10 | self.dim1 = dim1 11 | 12 | def forward(self, x): 13 | return x.transpose(self.dim0, self.dim1) 14 | 15 | 16 | class Permute3D(nn.Module): 17 | 18 | def __init__(self, dim0, dim1, dim2): 19 | super(Permute3D, self).__init__() 20 | self.dim0 = dim0 21 | self.dim1 = dim1 22 | self.dim2 = dim2 23 | 24 | def forward(self, x): 25 | return x.permute(self.dim0, self.dim1, self.dim2) 26 | -------------------------------------------------------------------------------- /fairseq/data/lru_cache_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from functools import lru_cache 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class LRUCacheDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, token=None): 13 | super().__init__(dataset) 14 | 15 | @lru_cache(maxsize=8) 16 | def __getitem__(self, index): 17 | return self.dataset[index] 18 | 19 | @lru_cache(maxsize=8) 20 | def collater(self, samples): 21 | return self.dataset.collater(samples) 22 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = fairseq 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /examples/roberta/commonsense_qa/download_cqa_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | OUTDIR=data/CommonsenseQA 8 | 9 | mkdir -p $OUTDIR 10 | 11 | wget -O $OUTDIR/train.jsonl https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl 12 | wget -O $OUTDIR/valid.jsonl https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl 13 | wget -O $OUTDIR/test.jsonl https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl 14 | wget -O $OUTDIR/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt 15 | -------------------------------------------------------------------------------- /fairseq/modules/unfold.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn.functional as F 7 | 8 | 9 | def unfold1d(x, kernel_size, padding_l, pad_value=0): 10 | """unfold T x B x C to T x B x C x K""" 11 | if kernel_size > 1: 12 | T, B, C = x.size() 13 | x = F.pad( 14 | x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value 15 | ) 16 | x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) 17 | else: 18 | x = x.unsqueeze(3) 19 | return x 20 | -------------------------------------------------------------------------------- /scripts/extract_txt_from_tsv.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import csv 3 | 4 | tsv_file = sys.argv[1] 5 | out_file = sys.argv[2] 6 | extract_item = sys.argv[3] 7 | 8 | with open(tsv_file) as f: 9 | reader = csv.DictReader( 10 | f, 11 | delimiter="\t", 12 | quotechar=None, 13 | doublequote=False, 14 | lineterminator="\n", 15 | quoting=csv.QUOTE_NONE, 16 | ) 17 | samples = [dict(e) for e in reader] 18 | 19 | fw = open(out_file, "w", encoding="utf-8") 20 | for s in samples: 21 | if extract_item in s: 22 | fw.write("%s\n" % s[extract_item]) 23 | else: 24 | print("Error in sample: ") 25 | print(s) 26 | exit() 27 | 28 | -------------------------------------------------------------------------------- /egs/mustc/asr/local/extract_txt_from_tsv.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import csv 3 | 4 | tsv_file = sys.argv[1] 5 | out_file = sys.argv[2] 6 | extract_item = sys.argv[3] 7 | 8 | with open(tsv_file) as f: 9 | reader = csv.DictReader( 10 | f, 11 | delimiter="\t", 12 | quotechar=None, 13 | doublequote=False, 14 | lineterminator="\n", 15 | quoting=csv.QUOTE_NONE, 16 | ) 17 | samples = [dict(e) for e in reader] 18 | 19 | fw = open(out_file, "w", encoding="utf-8") 20 | for s in samples: 21 | if extract_item in s: 22 | fw.write("%s\n" % s[extract_item]) 23 | else: 24 | print("Error in sample: ") 25 | print(s) 26 | exit() 27 | 28 | -------------------------------------------------------------------------------- /egs/mustc/st/local/extract_txt_from_tsv.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import csv 3 | 4 | tsv_file = sys.argv[1] 5 | out_file = sys.argv[2] 6 | extract_item = sys.argv[3] 7 | 8 | with open(tsv_file) as f: 9 | reader = csv.DictReader( 10 | f, 11 | delimiter="\t", 12 | quotechar=None, 13 | doublequote=False, 14 | lineterminator="\n", 15 | quoting=csv.QUOTE_NONE, 16 | ) 17 | samples = [dict(e) for e in reader] 18 | 19 | fw = open(out_file, "w", encoding="utf-8") 20 | for s in samples: 21 | if extract_item in s: 22 | fw.write("%s\n" % s[extract_item]) 23 | else: 24 | print("Error in sample: ") 25 | print(s) 26 | exit() 27 | 28 | -------------------------------------------------------------------------------- /egs/aishell/asr/local/extract_txt_from_tsv.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import csv 3 | 4 | tsv_file = sys.argv[1] 5 | out_file = sys.argv[2] 6 | extract_item = sys.argv[3] 7 | 8 | with open(tsv_file) as f: 9 | reader = csv.DictReader( 10 | f, 11 | delimiter="\t", 12 | quotechar=None, 13 | doublequote=False, 14 | lineterminator="\n", 15 | quoting=csv.QUOTE_NONE, 16 | ) 17 | samples = [dict(e) for e in reader] 18 | 19 | fw = open(out_file, "w", encoding="utf-8") 20 | for s in samples: 21 | if extract_item in s: 22 | fw.write("%s\n" % s[extract_item]) 23 | else: 24 | print("Error in sample: ") 25 | print(s) 26 | exit() 27 | 28 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_big.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_m_8 2 | #arch: pdss2t_transformer_m_16 3 | #arch: pdss2t_transformer_m_32 4 | 5 | pds-fusion: True 6 | 7 | share-decoder-input-output-embed: True 8 | optimizer: adam 9 | clip-norm: 10.0 10 | lr-scheduler: inverse_sqrt 11 | warmup-init-lr: 1e-7 12 | warmup-updates: 10000 13 | lr: 2e-3 14 | adam_betas: (0.9,0.98) 15 | 16 | criterion: label_smoothed_cross_entropy_with_ctc 17 | label_smoothing: 0.1 18 | 19 | dropout: 0.15 20 | activation-fn: relu 21 | encoder-ffn-embed-dim: 2048 22 | encoder-layers: 12 23 | decoder-layers: 6 24 | encoder-attention-heads: 4 25 | 26 | decoder-embed-dim: 512 27 | decoder-ffn-embed-dim: 2048 28 | decoder-attention-heads: 8 29 | -------------------------------------------------------------------------------- /fairseq/data/encoders/space_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | from fairseq.data.encoders import register_tokenizer 9 | from fairseq.dataclass import FairseqDataclass 10 | 11 | 12 | @register_tokenizer("space", dataclass=FairseqDataclass) 13 | class SpaceTokenizer(object): 14 | def __init__(self, *unused): 15 | self.space_tok = re.compile(r"\s+") 16 | 17 | def encode(self, x: str) -> str: 18 | return self.space_tok.sub(" ", x) 19 | 20 | def decode(self, x: str) -> str: 21 | return x 22 | -------------------------------------------------------------------------------- /fairseq/modules/lightconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 9 | 10 | 11 | setup( 12 | name="lightconv_layer", 13 | ext_modules=[ 14 | CUDAExtension( 15 | "lightconv_cuda", 16 | [ 17 | "lightconv_cuda.cpp", 18 | "lightconv_cuda_kernel.cu", 19 | ], 20 | ), 21 | ], 22 | cmdclass={"build_ext": BuildExtension}, 23 | ) 24 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/purectc.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | optimizer: adam 3 | clip-norm: 10.0 4 | lr-scheduler: inverse_sqrt 5 | warmup-init-lr: 1e-7 6 | warmup-updates: 10000 7 | lr: 2e-3 8 | adam_betas: (0.9,0.98) 9 | 10 | criterion: ctc 11 | zero_infinity: True 12 | ctc-weight: 1.0 13 | 14 | encoder-normalize-before: True 15 | decoder-normalize-before: True 16 | 17 | subsampling-type: conv1d 18 | subsampling-layers: 2 19 | subsampling-filter: 1024 20 | subsampling-kernel: 5 21 | subsampling-stride: 2 22 | subsampling-norm: none 23 | subsampling-activation: glu 24 | 25 | dropout: 0.1 26 | activation-fn: relu 27 | encoder-embed-dim: 256 28 | encoder-ffn-embed-dim: 2048 29 | encoder-layers: 18 30 | encoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_deep.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_8 2 | #arch: pdss2t_transformer_sd_16 3 | #arch: pdss2t_transformer_sd_32 4 | 5 | pds-fusion: True 6 | 7 | share-decoder-input-output-embed: True 8 | optimizer: adam 9 | clip-norm: 10.0 10 | lr-scheduler: inverse_sqrt 11 | warmup-init-lr: 1e-7 12 | warmup-updates: 10000 13 | lr: 2e-3 14 | adam_betas: (0.9,0.98) 15 | 16 | criterion: label_smoothed_cross_entropy_with_ctc 17 | label_smoothing: 0.1 18 | 19 | dropout: 0.1 20 | activation-fn: relu 21 | encoder-ffn-embed-dim: 2048 22 | encoder-layers: 30 23 | decoder-layers: 6 24 | encoder-attention-heads: 4 25 | 26 | decoder-embed-dim: 256 27 | decoder-ffn-embed-dim: 2048 28 | decoder-attention-heads: 4 29 | -------------------------------------------------------------------------------- /egs/librispeech/asr/local/extract_txt_from_tsv.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import csv 3 | 4 | tsv_file = sys.argv[1] 5 | out_file = sys.argv[2] 6 | extract_item = sys.argv[3] 7 | 8 | with open(tsv_file) as f: 9 | reader = csv.DictReader( 10 | f, 11 | delimiter="\t", 12 | quotechar=None, 13 | doublequote=False, 14 | lineterminator="\n", 15 | quoting=csv.QUOTE_NONE, 16 | ) 17 | samples = [dict(e) for e in reader] 18 | 19 | fw = open(out_file, "w", encoding="utf-8") 20 | for s in samples: 21 | if extract_item in s: 22 | fw.write("%s\n" % s[extract_item]) 23 | else: 24 | print("Error in sample: ", s, "when extract ", extract_item) 25 | exit() 26 | 27 | -------------------------------------------------------------------------------- /examples/simultaneous_translation/eval/agents/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | 11 | 12 | build_agent, register_agent, MONOTONIC_AGENT, _ = registry.setup_registry( 13 | "--agent-type" 14 | ) 15 | 16 | 17 | DEFAULT_EOS = "" 18 | GET = 0 19 | SEND = 1 20 | 21 | for file in os.listdir(os.path.dirname(__file__)): 22 | if file.endswith(".py") and not file.startswith("_"): 23 | module = file[: file.find(".py")] 24 | importlib.import_module("agents." + module) 25 | -------------------------------------------------------------------------------- /fairseq/data/sort_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class SortDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, sort_order): 13 | super().__init__(dataset) 14 | if not isinstance(sort_order, (list, tuple)): 15 | sort_order = [sort_order] 16 | self.sort_order = sort_order 17 | 18 | assert all(len(so) == len(dataset) for so in sort_order) 19 | 20 | def ordered_indices(self): 21 | return np.lexsort(self.sort_order) 22 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 9 | 10 | 11 | setup( 12 | name="dynamicconv_layer", 13 | ext_modules=[ 14 | CUDAExtension( 15 | name="dynamicconv_cuda", 16 | sources=[ 17 | "dynamicconv_cuda.cpp", 18 | "dynamicconv_cuda_kernel.cu", 19 | ], 20 | ), 21 | ], 22 | cmdclass={"build_ext": BuildExtension}, 23 | ) 24 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/basis.yaml: -------------------------------------------------------------------------------- 1 | train-subset: train 2 | valid-subset: dev 3 | 4 | max-epoch: 100 5 | max-update: 300000 6 | patience: 20 7 | post-process: sentencepiece 8 | 9 | #best_checkpoint_metric: loss 10 | #maximize_best_checkpoint_metric: False 11 | 12 | eval-bleu: True 13 | eval-bleu-args: {"beam": 5, "lenpen": 1.0} 14 | eval-bleu-detok: moses 15 | eval-bleu-remove-bpe: sentencepiece 16 | eval-bleu-print-samples: True 17 | best_checkpoint_metric: bleu 18 | maximize_best_checkpoint_metric: True 19 | 20 | # no-epoch-checkpoints: True 21 | keep-last-epochs: 1 22 | keep-best-checkpoints: 10 23 | 24 | num-workers: 8 25 | no-progress-bar: True 26 | log-interval: 100 27 | seed: 1 28 | report-accuracy: True 29 | skip-invalid-size-inputs-valid-test: True -------------------------------------------------------------------------------- /egs/mustc/mt/conf/basis.yaml: -------------------------------------------------------------------------------- 1 | train-subset: train 2 | valid-subset: valid 3 | 4 | max-epoch: 50 5 | max-update: 100000 6 | patience: 10 7 | post-process: sentencepiece 8 | 9 | #best_checkpoint_metric: loss 10 | #maximize_best_checkpoint_metric: False 11 | 12 | eval-bleu: True 13 | eval-bleu-args: {"beam": 5, "lenpen": 1.0} 14 | eval-bleu-detok: moses 15 | eval-bleu-remove-bpe: sentencepiece 16 | eval-bleu-print-samples: True 17 | best_checkpoint_metric: bleu 18 | maximize_best_checkpoint_metric: True 19 | 20 | # no-epoch-checkpoints: True 21 | keep-last-epochs: 1 22 | keep-best-checkpoints: 10 23 | 24 | num-workers: 8 25 | no-progress-bar: True 26 | log-interval: 100 27 | seed: 1 28 | report-accuracy: True 29 | skip-invalid-size-inputs-valid-test: True -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/basis.yaml: -------------------------------------------------------------------------------- 1 | train-subset: train 2 | valid-subset: valid 3 | 4 | max-epoch: 30 5 | max-update: 500000 6 | patience: 5 7 | post-process: sentencepiece 8 | 9 | # best_checkpoint_metric: loss 10 | # maximize_best_checkpoint_metric: False 11 | 12 | eval-bleu: True 13 | eval-bleu-args: {"beam": 5, "lenpen": 1.0} 14 | eval-bleu-detok: moses 15 | eval-bleu-remove-bpe: sentencepiece 16 | eval-bleu-print-samples: True 17 | best_checkpoint_metric: bleu 18 | maximize_best_checkpoint_metric: True 19 | 20 | # no-epoch-checkpoints: True 21 | keep-last-epochs: 1 22 | keep-best-checkpoints: 10 23 | 24 | num-workers: 8 25 | no-progress-bar: True 26 | log-interval: 100 27 | seed: 1 28 | report-accuracy: True 29 | skip-invalid-size-inputs-valid-test: True -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: transformer 3 | 4 | optimizer: adam 5 | clip-norm: 10.0 6 | lr-scheduler: inverse_sqrt 7 | warmup-init-lr: 1e-7 8 | warmup-updates: 10000 9 | lr: 2e-3 10 | adam_betas: (0.9,0.98) 11 | 12 | criterion: ctc 13 | zero_infinity: True 14 | ctc-weight: 1.0 15 | 16 | encoder-normalize-before: True 17 | decoder-normalize-before: True 18 | 19 | subsampling-type: conv1d 20 | subsampling-layers: 2 21 | subsampling-filter: 1024 22 | subsampling-kernel: 5 23 | subsampling-stride: 2 24 | subsampling-norm: none 25 | subsampling-activation: glu 26 | 27 | dropout: 0.1 28 | activation-fn: relu 29 | encoder-embed-dim: 256 30 | encoder-ffn-embed-dim: 2048 31 | encoder-layers: 18 32 | encoder-attention-heads: 4 -------------------------------------------------------------------------------- /fairseq/data/strip_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class StripTokenDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, id_to_strip): 11 | super().__init__(dataset) 12 | self.id_to_strip = id_to_strip 13 | 14 | def __getitem__(self, index): 15 | item = self.dataset[index] 16 | while len(item) > 0 and item[-1] == self.id_to_strip: 17 | item = item[:-1] 18 | while len(item) > 0 and item[0] == self.id_to_strip: 19 | item = item[1:] 20 | return item 21 | -------------------------------------------------------------------------------- /examples/multilingual/data_scripts/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Install dependency 3 | ```bash 4 | pip install -r requirement.txt 5 | ``` 6 | 7 | # Download the data set 8 | ```bash 9 | export WORKDIR_ROOT= 10 | 11 | ``` 12 | The downloaded data will be at $WORKDIR_ROOT/ML50 13 | 14 | # preprocess the data 15 | Install SPM [here](https://github.com/google/sentencepiece) 16 | ```bash 17 | export WORKDIR_ROOT= 18 | export SPM_PATH= 19 | ``` 20 | * $WORKDIR_ROOT/ML50/raw: extracted raw data 21 | * $WORKDIR_ROOT/ML50/dedup: dedup data 22 | * $WORKDIR_ROOT/ML50/clean: data with valid and test sentences removed from the dedup data 23 | 24 | 25 | -------------------------------------------------------------------------------- /examples/adaptive_span/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | # automatically import any Python files in the current directory 10 | cur_dir = os.path.dirname(__file__) 11 | for file in os.listdir(cur_dir): 12 | path = os.path.join(cur_dir, file) 13 | if ( 14 | not file.startswith("_") 15 | and not file.startswith(".") 16 | and (file.endswith(".py") or os.path.isdir(path)) 17 | ): 18 | mod_name = file[: file.find(".py")] if file.endswith(".py") else file 19 | module = importlib.import_module(__name__ + "." + mod_name) 20 | -------------------------------------------------------------------------------- /examples/speech_recognition/hydra/conf/hydra/sweeper/ax.yaml: -------------------------------------------------------------------------------- 1 | # @package hydra.sweeper 2 | _target_: hydra_plugins.hydra_ax_sweeper.ax_sweeper.AxSweeper 3 | max_batch_size: null 4 | ax_config: 5 | max_trials: 100 6 | early_stop: 7 | minimize: true 8 | max_epochs_without_improvement: 10 9 | epsilon: 1.0e-05 10 | experiment: 11 | name: ${dataset.gen_subset} 12 | objective_name: wer 13 | minimize: true 14 | parameter_constraints: null 15 | outcome_constraints: null 16 | status_quo: null 17 | client: 18 | verbose_logging: false 19 | random_seed: null 20 | params: 21 | decoding.decoder.lmweight: 22 | type: range 23 | bounds: [0.0, 5.0] 24 | decoding.decoder.wordscore: 25 | type: range 26 | bounds: [-5.0, 5.0] 27 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/purectc.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | optimizer: adam 3 | clip-norm: 10.0 4 | lr-scheduler: inverse_sqrt 5 | warmup-init-lr: 1e-7 6 | warmup-updates: 10000 7 | lr: 2e-3 8 | adam_betas: (0.9,0.98) 9 | 10 | criterion: ctc 11 | zero_infinity: True 12 | 13 | encoder-embed-norm: True 14 | encoder-no-scale-embedding: True 15 | 16 | subsampling-type: conv1d 17 | subsampling-layers: 2 18 | subsampling-filter: 1024 19 | subsampling-kernel: 5 20 | subsampling-stride: 2 21 | subsampling-norm: none 22 | subsampling-activation: glu 23 | 24 | dropout: 0.1 25 | attention-dropout: 0.1 26 | activation-dropout: 0.1 27 | 28 | activation-fn: relu 29 | encoder-embed-dim: 256 30 | encoder-ffn-embed-dim: 2048 31 | encoder-layers: 18 32 | encoder-attention-heads: 4 33 | 34 | #load-pretrained-encoder-from: -------------------------------------------------------------------------------- /egs/aishell/asr/local/cal_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | ref=$1 6 | gen=$2 7 | tokenizer=$3 8 | lang=$4 9 | lang_pair=en-${lang} 10 | 11 | record=$(mktemp -t temp.record.XXXXXX) 12 | if [[ ${tokenizer} -eq 1 ]]; then 13 | echo "MultiBLEU" > ${record} 14 | cmd="multi-bleu.perl ${ref} < ${gen}" 15 | eval $cmd | head -n 1 >> ${record} 16 | 17 | cmd="detokenizer.perl -q -l ${lang} --threads 32 < ${ref} > ${ref}.detok" 18 | eval $cmd 19 | cmd="detokenizer.perl -q -l ${lang} --threads 32 < ${gen} > ${gen}.detok" 20 | eval $cmd 21 | ref=${ref}.detok 22 | gen=${gen}.detok 23 | fi 24 | 25 | echo "SacreBLEU" >> ${record} 26 | cmd="cat ${gen} | sacrebleu ${ref} -m bleu -w 4 -l ${lang_pair}" 27 | eval $cmd >> ${record} 28 | cat ${record} 29 | rm ${record} -------------------------------------------------------------------------------- /egs/mustc/asr/local/cal_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | ref=$1 6 | gen=$2 7 | tokenizer=$3 8 | lang=$4 9 | lang_pair=en-${lang} 10 | 11 | record=$(mktemp -t temp.record.XXXXXX) 12 | if [[ ${tokenizer} -eq 1 ]]; then 13 | echo "MultiBLEU" > ${record} 14 | cmd="multi-bleu.perl ${ref} < ${gen}" 15 | eval $cmd | head -n 1 >> ${record} 16 | 17 | cmd="detokenizer.perl -q -l ${lang} --threads 32 < ${ref} > ${ref}.detok" 18 | eval $cmd 19 | cmd="detokenizer.perl -q -l ${lang} --threads 32 < ${gen} > ${gen}.detok" 20 | eval $cmd 21 | ref=${ref}.detok 22 | gen=${gen}.detok 23 | fi 24 | 25 | echo "SacreBLEU" >> ${record} 26 | cmd="cat ${gen} | sacrebleu ${ref} -m bleu -w 4 -l ${lang_pair}" 27 | eval $cmd >> ${record} 28 | cat ${record} 29 | rm ${record} -------------------------------------------------------------------------------- /egs/mustc/st/local/cal_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | ref=$1 6 | gen=$2 7 | tokenizer=$3 8 | lang=$4 9 | lang_pair=en-${lang} 10 | 11 | record=$(mktemp -t temp.record.XXXXXX) 12 | if [[ ${tokenizer} -eq 1 ]]; then 13 | echo "MultiBLEU" > ${record} 14 | cmd="multi-bleu.perl ${ref} < ${gen}" 15 | eval $cmd | head -n 1 >> ${record} 16 | 17 | cmd="detokenizer.perl -q -l ${lang} --threads 32 < ${ref} > ${ref}.detok" 18 | eval $cmd 19 | cmd="detokenizer.perl -q -l ${lang} --threads 32 < ${gen} > ${gen}.detok" 20 | eval $cmd 21 | ref=${ref}.detok 22 | gen=${gen}.detok 23 | fi 24 | 25 | echo "SacreBLEU" >> ${record} 26 | cmd="cat ${gen} | sacrebleu ${ref} -m bleu -w 4 -l ${lang_pair}" 27 | eval $cmd >> ${record} 28 | cat ${record} 29 | rm ${record} -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/base.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer 2 | share-all-embeddings: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 8000 8 | lr: 1e-3 9 | adam_betas: (0.9,0.997) 10 | 11 | criterion: label_smoothed_cross_entropy 12 | label_smoothing: 0.1 13 | 14 | dropout: 0.1 15 | attention-dropout: 0.1 16 | activation-dropout: 0.1 17 | 18 | activation-fn: relu 19 | encoder-normalize-before: True 20 | decoder-normalize-before: True 21 | encoder-embed-dim: 512 22 | encoder-ffn-embed-dim: 2048 23 | encoder-layers: 6 24 | decoder-layers: 6 25 | encoder-attention-heads: 8 26 | 27 | decoder-embed-dim: 512 28 | decoder-ffn-embed-dim: 2048 29 | decoder-attention-heads: 8 30 | 31 | #load-pretrained-encoder-from: 32 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /fairseq/clib/libnat_cuda/edit_dist.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | 11 | #include 12 | 13 | torch::Tensor LevenshteinDistanceCuda( 14 | torch::Tensor source, 15 | torch::Tensor target, 16 | torch::Tensor source_length, 17 | torch::Tensor target_length); 18 | 19 | torch::Tensor GenerateDeletionLabelCuda( 20 | torch::Tensor source, 21 | torch::Tensor operations); 22 | 23 | std::pair GenerateInsertionLabelCuda( 24 | torch::Tensor source, 25 | torch::Tensor operations); 26 | -------------------------------------------------------------------------------- /egs/librispeech/asr/local/cal_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | ref=$1 6 | gen=$2 7 | tokenizer=$3 8 | lang=$4 9 | lang_pair=en-${lang} 10 | 11 | record=$(mktemp -t temp.record.XXXXXX) 12 | if [[ ${tokenizer} -eq 1 ]]; then 13 | echo "MultiBLEU" > ${record} 14 | cmd="multi-bleu.perl ${ref} < ${gen}" 15 | eval $cmd | head -n 1 >> ${record} 16 | 17 | cmd="detokenizer.perl -q -l ${lang} --threads 32 < ${ref} > ${ref}.detok" 18 | eval $cmd 19 | cmd="detokenizer.perl -q -l ${lang} --threads 32 < ${gen} > ${gen}.detok" 20 | eval $cmd 21 | ref=${ref}.detok 22 | gen=${gen}.detok 23 | fi 24 | 25 | echo "SacreBLEU" >> ${record} 26 | cmd="cat ${gen} | sacrebleu ${ref} -m bleu -w 4 -l ${lang_pair}" 27 | eval $cmd >> ${record} 28 | cat ${record} 29 | rm ${record} -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/deep.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer 2 | share-all-embeddings: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 16000 8 | lr: 2e-3 9 | adam_betas: (0.9,0.997) 10 | 11 | criterion: label_smoothed_cross_entropy 12 | label_smoothing: 0.1 13 | 14 | dropout: 0.1 15 | attention-dropout: 0.1 16 | activation-dropout: 0.1 17 | 18 | activation-fn: relu 19 | encoder-normalize-before: True 20 | decoder-normalize-before: True 21 | encoder-embed-dim: 512 22 | encoder-ffn-embed-dim: 2048 23 | encoder-layers: 30 24 | decoder-layers: 6 25 | encoder-attention-heads: 8 26 | 27 | decoder-embed-dim: 512 28 | decoder-ffn-embed-dim: 2048 29 | decoder-attention-heads: 8 30 | 31 | #load-pretrained-encoder-from: 32 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/base_postnorm.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer 2 | share-all-embeddings: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 4000 8 | lr: 7e-4 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy 12 | label_smoothing: 0.1 13 | 14 | dropout: 0.1 15 | attention-dropout: 0.1 16 | activation-dropout: 0.1 17 | 18 | activation-fn: relu 19 | encoder-normalize-before: False 20 | decoder-normalize-before: False 21 | encoder-embed-dim: 512 22 | encoder-ffn-embed-dim: 2048 23 | encoder-layers: 6 24 | decoder-layers: 6 25 | encoder-attention-heads: 8 26 | 27 | decoder-embed-dim: 512 28 | decoder-ffn-embed-dim: 2048 29 | decoder-attention-heads: 8 30 | 31 | #load-pretrained-encoder-from: 32 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /examples/simultaneous_translation/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | 11 | 12 | ( 13 | build_monotonic_attention, 14 | register_monotonic_attention, 15 | MONOTONIC_ATTENTION_REGISTRY, 16 | _, 17 | ) = registry.setup_registry("--simul-type") 18 | 19 | for file in os.listdir(os.path.dirname(__file__)): 20 | if file.endswith(".py") and not file.startswith("_"): 21 | model_name = file[: file.find(".py")] 22 | importlib.import_module( 23 | "examples.simultaneous_translation.modules." + model_name 24 | ) 25 | -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/big.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer_wmt_en_de_big_t2t 2 | share-all-embeddings: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 8000 8 | lr: 7e-4 9 | adam_betas: (0.9,0.997) 10 | 11 | criterion: label_smoothed_cross_entropy 12 | label_smoothing: 0.1 13 | 14 | dropout: 0.3 15 | attention-dropout: 0.1 16 | activation-dropout: 0.1 17 | 18 | activation-fn: relu 19 | encoder-normalize-before: True 20 | decoder-normalize-before: True 21 | encoder-embed-dim: 1024 22 | encoder-ffn-embed-dim: 4096 23 | encoder-layers: 6 24 | decoder-layers: 6 25 | encoder-attention-heads: 16 26 | 27 | decoder-embed-dim: 512 28 | decoder-ffn-embed-dim: 2048 29 | decoder-attention-heads: 8 30 | 31 | #load-pretrained-encoder-from: 32 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /fairseq/model_parallel/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the models/ directory 11 | models_dir = os.path.dirname(__file__) 12 | for file in os.listdir(models_dir): 13 | path = os.path.join(models_dir, file) 14 | if ( 15 | not file.startswith("_") 16 | and not file.startswith(".") 17 | and (file.endswith(".py") or os.path.isdir(path)) 18 | ): 19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file 20 | module = importlib.import_module("fairseq.model_parallel.models." + model_name) 21 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/basis.yaml: -------------------------------------------------------------------------------- 1 | train-subset: train 2 | valid-subset: dev 3 | 4 | max-epoch: 100 5 | max-update: 100000 6 | patience: 20 7 | post-process: sentencepiece 8 | 9 | # best-checkpoint-metric: loss 10 | # maximize-best-checkpoint-metric: False 11 | 12 | eval-wer: True 13 | eval-wer-args: {"beam": 5, "lenpen": 1.0} 14 | eval-wer-tok-args: {"wer_remove_punct": true, "wer_lowercase": true, "wer_char_level": false} 15 | eval-wer-remove-bpe: sentencepiece 16 | eval-wer-print-samples: True 17 | best_checkpoint_metric: dec_wer 18 | maximize_best_checkpoint_metric: False 19 | 20 | no-epoch-checkpoints: True 21 | # keep-last-epochs: 10 22 | keep-best-checkpoints: 10 23 | 24 | num-workers: 8 25 | no-progress-bar: True 26 | log-interval: 100 27 | seed: 1 28 | report-accuracy: True 29 | skip-invalid-size-inputs-valid-test: True -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/deep_ctc.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer_ctc 2 | share-all-embeddings: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 16000 8 | lr: 2e-3 9 | adam_betas: (0.9,0.997) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | dropout: 0.1 15 | attention-dropout: 0.1 16 | activation-dropout: 0.1 17 | 18 | activation-fn: relu 19 | encoder-normalize-before: True 20 | decoder-normalize-before: True 21 | encoder-embed-dim: 512 22 | encoder-ffn-embed-dim: 2048 23 | encoder-layers: 20 24 | decoder-layers: 6 25 | encoder-attention-heads: 8 26 | 27 | decoder-embed-dim: 512 28 | decoder-ffn-embed-dim: 2048 29 | decoder-attention-heads: 8 30 | 31 | #load-pretrained-encoder-from: 32 | #load-pretrained-decoder-from: 33 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/basis.yaml: -------------------------------------------------------------------------------- 1 | train-subset: train 2 | valid-subset: dev 3 | 4 | max-epoch: 100 5 | max-update: 100000 6 | patience: 20 7 | post-process: sentencepiece 8 | 9 | # best-checkpoint-metric: loss 10 | # maximize-best-checkpoint-metric: False 11 | 12 | eval-wer: True 13 | eval-wer-args: {"beam": 1, "lenpen": 1.0} 14 | eval-wer-tok-args: {"wer_remove_punct": true, "wer_lowercase": true, "wer_char_level": true} 15 | eval-wer-remove-bpe: sentencepiece 16 | eval-wer-print-samples: True 17 | best_checkpoint_metric: dec_wer 18 | maximize_best_checkpoint_metric: False 19 | 20 | no-epoch-checkpoints: True 21 | # keep-last-epochs: 10 22 | keep-best-checkpoints: 10 23 | 24 | num-workers: 8 25 | no-progress-bar: True 26 | log-interval: 100 27 | seed: 1 28 | report-accuracy: True 29 | skip-invalid-size-inputs-valid-test: True -------------------------------------------------------------------------------- /egs/aishell/asr/conf/big.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_m 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 0.0014 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | subsampling-type: conv1d 15 | subsampling-layers: 2 16 | subsampling-filter: 2048 17 | subsampling-kernel: 5 18 | subsampling-stride: 2 19 | subsampling-norm: none 20 | subsampling-activation: glu 21 | 22 | dropout: 0.15 23 | activation-fn: relu 24 | encoder-embed-dim: 512 25 | encoder-ffn-embed-dim: 2048 26 | encoder-layers: 12 27 | decoder-layers: 6 28 | encoder-attention-heads: 8 29 | 30 | decoder-embed-dim: 512 31 | decoder-ffn-embed-dim: 2048 32 | decoder-attention-heads: 8 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/compare_purectc_base.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: transformer 3 | 4 | optimizer: adam 5 | clip-norm: 10.0 6 | lr-scheduler: inverse_sqrt 7 | warmup-init-lr: 1e-7 8 | warmup-updates: 10000 9 | lr: 0.002 10 | adam_betas: (0.9,0.98) 11 | 12 | criterion: ctc 13 | ctc-weight: 1.0 14 | 15 | subsampling-type: conv2d 16 | subsampling-layers: 2 17 | subsampling-filter: 176 18 | subsampling-kernel: 3 19 | subsampling-stride: 2 20 | subsampling-norm: batch2d 21 | subsampling-activation: swish 22 | 23 | dropout: 0.1 24 | activation-fn: relu 25 | encoder-embed-dim: 176 26 | encoder-ffn-embed-dim: 704 27 | encoder-layers: 16 28 | encoder-attention-heads: 4 29 | 30 | macaron-style: True 31 | use-cnn-module: True 32 | cnn-module-kernel: 31 33 | encoder-activation-fn: swish 34 | encoder-attention-type: rel_pos -------------------------------------------------------------------------------- /egs/wmt16/mt/conf/big_postnorm.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer_wmt_en_de_big 2 | share-all-embeddings: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 4000 8 | lr: 5e-4 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy 12 | label_smoothing: 0.1 13 | 14 | dropout: 0.3 15 | attention-dropout: 0.1 16 | activation-dropout: 0.1 17 | 18 | activation-fn: relu 19 | encoder-normalize-before: False 20 | decoder-normalize-before: False 21 | encoder-embed-dim: 1024 22 | encoder-ffn-embed-dim: 4096 23 | encoder-layers: 6 24 | decoder-layers: 6 25 | encoder-attention-heads: 16 26 | 27 | decoder-embed-dim: 1024 28 | decoder-ffn-embed-dim: 4096 29 | decoder-attention-heads: 16 30 | 31 | #load-pretrained-encoder-from: 32 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /fairseq/models/huggingface/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the models/huggingface/ directory 11 | models_dir = os.path.dirname(__file__) 12 | for file in os.listdir(models_dir): 13 | path = os.path.join(models_dir, file) 14 | if ( 15 | not file.startswith("_") 16 | and not file.startswith(".") 17 | and (file.endswith(".py") or os.path.isdir(path)) 18 | ): 19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file 20 | module = importlib.import_module("fairseq.models.huggingface." + model_name) 21 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/big.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_m 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 0.0014 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | subsampling-type: conv1d 15 | subsampling-layers: 2 16 | subsampling-filter: 2048 17 | subsampling-kernel: 5 18 | subsampling-stride: 2 19 | subsampling-norm: none 20 | subsampling-activation: glu 21 | 22 | dropout: 0.15 23 | activation-fn: relu 24 | encoder-embed-dim: 512 25 | encoder-ffn-embed-dim: 2048 26 | encoder-layers: 12 27 | decoder-layers: 6 28 | encoder-attention-heads: 8 29 | 30 | decoder-embed-dim: 512 31 | decoder-ffn-embed-dim: 2048 32 | decoder-attention-heads: 8 -------------------------------------------------------------------------------- /fairseq/modules/gelu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with 7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs 8 | """ 9 | 10 | import math 11 | 12 | import torch 13 | import torch.nn as nn 14 | 15 | 16 | def gelu_accurate(x): 17 | if not hasattr(gelu_accurate, "_a"): 18 | gelu_accurate._a = math.sqrt(2 / math.pi) 19 | return ( 20 | 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) 21 | ) 22 | 23 | 24 | def gelu(x: torch.Tensor) -> torch.Tensor: 25 | return torch.nn.functional.gelu(x.float()).type_as(x) 26 | -------------------------------------------------------------------------------- /examples/m2m_100/tokenizers/README.md: -------------------------------------------------------------------------------- 1 | # M2M-100 Tokenization 2 | 3 | We apply different tokenization strategies for different languages following the existing literature. Here we provide tok.sh a tokenizer that can be used to reproduce our results. 4 | 5 | To reproduce the results, follow these steps: 6 | 7 | ``` 8 | tgt_lang=... 9 | reference_translation=... 10 | cat generation_output | grep -P "^H" | sort -V | cut -f 3- | sh tok.sh $tgt_lang > hyp 11 | cat $reference_translation |sh tok.sh $tgt_lang > ref 12 | sacrebleu -tok 'none' ref < hyp 13 | ``` 14 | 15 | ## Installation 16 | 17 | Tools needed for all the languages except Arabic can be installed by running install_dependencies.sh 18 | If you want to evaluate Arabic models, please follow the instructions provided here: http://alt.qcri.org/tools/arabic-normalizer/ to install 19 | -------------------------------------------------------------------------------- /egs/mustc/mt/conf/base.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer 2 | #share-all-embeddings: True 3 | share-decoder-input-output-embed: True 4 | optimizer: adam 5 | clip-norm: 10.0 6 | lr-scheduler: inverse_sqrt 7 | warmup-init-lr: 1e-7 8 | warmup-updates: 8000 9 | lr: 1e-3 10 | adam_betas: (0.9,0.997) 11 | 12 | criterion: label_smoothed_cross_entropy 13 | label_smoothing: 0.1 14 | 15 | dropout: 0.1 16 | attention-dropout: 0.1 17 | activation-dropout: 0.1 18 | 19 | activation-fn: relu 20 | encoder-normalize-before: True 21 | decoder-normalize-before: True 22 | encoder-embed-dim: 512 23 | encoder-ffn-embed-dim: 2048 24 | encoder-layers: 6 25 | decoder-layers: 6 26 | encoder-attention-heads: 8 27 | 28 | decoder-embed-dim: 512 29 | decoder-ffn-embed-dim: 2048 30 | decoder-attention-heads: 8 31 | 32 | #load-pretrained-encoder-from: 33 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /egs/mustc/mt/conf/small.yaml: -------------------------------------------------------------------------------- 1 | arch: transformer 2 | #share-all-embeddings: True 3 | share-decoder-input-output-embed: True 4 | optimizer: adam 5 | clip-norm: 10.0 6 | lr-scheduler: inverse_sqrt 7 | warmup-init-lr: 1e-7 8 | warmup-updates: 8000 9 | lr: 1e-3 10 | adam_betas: (0.9,0.997) 11 | 12 | criterion: label_smoothed_cross_entropy 13 | label_smoothing: 0.1 14 | 15 | dropout: 0.1 16 | attention-dropout: 0.1 17 | activation-dropout: 0.1 18 | 19 | activation-fn: relu 20 | encoder-normalize-before: True 21 | decoder-normalize-before: True 22 | encoder-embed-dim: 256 23 | encoder-ffn-embed-dim: 2048 24 | encoder-layers: 6 25 | decoder-layers: 6 26 | encoder-attention-heads: 4 27 | 28 | decoder-embed-dim: 256 29 | decoder-ffn-embed-dim: 2048 30 | decoder-attention-heads: 4 31 | 32 | #load-pretrained-encoder-from: 33 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /fairseq/data/encoders/characters.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | SPACE = chr(32) 11 | SPACE_ESCAPE = chr(9601) 12 | 13 | 14 | @register_bpe("characters") 15 | class Characters(object): 16 | def __init__(self, *unused): 17 | pass 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | pass 22 | 23 | @staticmethod 24 | def encode(x: str) -> str: 25 | escaped = x.replace(SPACE, SPACE_ESCAPE) 26 | return SPACE.join(list(escaped)) 27 | 28 | @staticmethod 29 | def decode(x: str) -> str: 30 | return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) 31 | -------------------------------------------------------------------------------- /examples/constrained_decoding/normalize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import sys 9 | 10 | from sacremoses.normalize import MosesPunctNormalizer 11 | 12 | 13 | def main(args): 14 | normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn) 15 | for line in sys.stdin: 16 | print(normalizer.normalize(line.rstrip()), flush=True) 17 | 18 | 19 | if __name__ == "__main__": 20 | import argparse 21 | 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument("--lang", "-l", default="en") 24 | parser.add_argument("--penn", "-p", action="store_true") 25 | args = parser.parse_args() 26 | 27 | main(args) 28 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/purectc_pds_base_8_grow.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 384 5 | pds-stages: 4 6 | encoder-layers: 12 7 | pds-layers: 4_3_3_2 8 | pds-ratios: 2_2_1_2 9 | pds-fusion: False 10 | pds-fusion-method: all_conv2 11 | pds-fusion-layers: 0_1_1_1 12 | pds-fusion-weight: 0.2_0.3_0.5 13 | pds-embed-dims: 128_256_256_384 14 | pds-ds-method: conv 15 | pds-embed-norm: True 16 | pds-position-embed: 1_1_1_1 17 | pds-kernel-sizes: 5_5_5_5 18 | pds-ffn-ratios: 8_8_8_4 19 | pds-attn-heads: 4_4_4_8 20 | 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 0.002 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | post-process: sentencepiece 31 | 32 | dropout: 0.1 33 | activation-fn: relu 34 | 35 | #load-pretrained-encoder-from: 36 | -------------------------------------------------------------------------------- /egs/wmt16/mt/local/monitor.sh: -------------------------------------------------------------------------------- 1 | gpu_num=4 2 | cmd="sh train.sh" 3 | 4 | while : 5 | do 6 | record=$(mktemp -t temp.record.XXXXXX) 7 | gpustat > $record 8 | all_devices=$(seq 0 "$(sed '1,2d' ${record} | wc -l)"); 9 | 10 | count=0 11 | for dev in ${all_devices[@]} 12 | do 13 | line=$((dev + 2)) 14 | use=$(head -n $line ${record} | tail -1 | cut -d '|' -f3 | cut -d '/' -f1) 15 | 16 | if [[ $use -lt 100 ]]; then 17 | device[$count]=$dev 18 | count=$((count + 1)) 19 | if [[ $count -eq $gpu_num ]]; then 20 | break 21 | fi 22 | fi 23 | done 24 | if [[ ${#device[@]} -lt $gpu_num ]]; then 25 | sleep 60s 26 | else 27 | echo "Run $cmd" 28 | eval $cmd 29 | sleep 10s 30 | exit 31 | fi 32 | done 33 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/ConformerCTCSmall.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: transformer 3 | 4 | optimizer: adam 5 | #clip-norm: 10.0 6 | lr-scheduler: inverse_sqrt 7 | warmup-init-lr: 1e-7 8 | warmup-updates: 10000 9 | weight-decay: 1e-6 10 | lr: 0.0015 11 | adam_betas: (0.9,0.98) 12 | 13 | criterion: ctc 14 | post-process: sentencepiece 15 | 16 | subsampling-type: conv2d 17 | subsampling-layers: 2 18 | subsampling-filter: 176 19 | subsampling-kernel: 3 20 | subsampling-stride: 2 21 | subsampling-norm: batch2d 22 | subsampling-activation: swish 23 | 24 | dropout: 0.1 25 | activation-fn: relu 26 | encoder-embed-dim: 176 27 | encoder-ffn-embed-dim: 704 28 | encoder-layers: 16 29 | encoder-attention-heads: 4 30 | 31 | macaron-style: True 32 | use-cnn-module: True 33 | cnn-module-kernel: 31 34 | encoder-activation-fn: swish 35 | encoder-attention-type: rel_pos -------------------------------------------------------------------------------- /examples/m2m_100/tokenizers/tokenize_indic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | # Use: echo {text} | python tokenize_indic.py {language} 8 | 9 | import sys 10 | 11 | from indicnlp.normalize.indic_normalize import IndicNormalizerFactory 12 | from indicnlp.tokenize.indic_tokenize import trivial_tokenize 13 | 14 | 15 | factory = IndicNormalizerFactory() 16 | normalizer = factory.get_normalizer( 17 | sys.argv[1], remove_nuktas=False, nasals_mode="do_nothing" 18 | ) 19 | 20 | for line in sys.stdin: 21 | normalized_line = normalizer.normalize(line.strip()) 22 | tokenized_line = " ".join(trivial_tokenize(normalized_line, sys.argv[1])) 23 | print(tokenized_line) 24 | -------------------------------------------------------------------------------- /fairseq/modules/fp32_group_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | Layer norm done in fp32 (for fp16 training) 7 | """ 8 | 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | 12 | 13 | class Fp32GroupNorm(nn.GroupNorm): 14 | def __init__(self, *args, **kwargs): 15 | super().__init__(*args, **kwargs) 16 | 17 | def forward(self, input): 18 | output = F.group_norm( 19 | input.float(), 20 | self.num_groups, 21 | self.weight.float() if self.weight is not None else None, 22 | self.bias.float() if self.bias is not None else None, 23 | self.eps, 24 | ) 25 | return output.type_as(input) 26 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_inter.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: transformer 3 | 4 | optimizer: adam 5 | clip-norm: 10.0 6 | lr-scheduler: inverse_sqrt 7 | warmup-init-lr: 1e-7 8 | warmup-updates: 10000 9 | lr: 2e-3 10 | adam_betas: (0.9,0.98) 11 | 12 | criterion: ctc 13 | zero_infinity: True 14 | ctc-weight: 1.0 15 | 16 | encoder-normalize-before: True 17 | decoder-normalize-before: True 18 | 19 | subsampling-type: conv1d 20 | subsampling-layers: 2 21 | subsampling-filter: 1024 22 | subsampling-kernel: 5 23 | subsampling-stride: 2 24 | subsampling-norm: none 25 | subsampling-activation: glu 26 | 27 | dropout: 0.1 28 | activation-fn: relu 29 | encoder-embed-dim: 256 30 | encoder-ffn-embed-dim: 2048 31 | encoder-layers: 18 32 | encoder-attention-heads: 4 33 | 34 | # InterCTC 35 | inter-ctc-weight: 1.0 36 | inter-ctc-layers: 6,9,12,15 37 | share-inter-ctc: True -------------------------------------------------------------------------------- /fairseq/distributed/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .distributed_timeout_wrapper import DistributedTimeoutWrapper 7 | from .fully_sharded_data_parallel import fsdp_enable_wrap, fsdp_wrap, FullyShardedDataParallel 8 | from .legacy_distributed_data_parallel import LegacyDistributedDataParallel 9 | from .module_proxy_wrapper import ModuleProxyWrapper 10 | from .tpu_distributed_data_parallel import TPUDistributedDataParallel 11 | 12 | 13 | __all__ = [ 14 | "DistributedTimeoutWrapper", 15 | "fsdp_enable_wrap", 16 | "fsdp_wrap", 17 | "FullyShardedDataParallel", 18 | "LegacyDistributedDataParallel", 19 | "ModuleProxyWrapper", 20 | "TPUDistributedDataParallel", 21 | ] 22 | -------------------------------------------------------------------------------- /egs/mustc/asr/local/monitor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu_num=4 4 | cmd="sh train.sh" 5 | 6 | while : 7 | do 8 | record=$(mktemp -t temp.record.XXXXXX) 9 | gpustat > $record 10 | all_devices=$(seq 0 "$(sed '1,2d' ${record} | wc -l)"); 11 | 12 | count=0 13 | for dev in ${all_devices[@]} 14 | do 15 | line=$((dev + 2)) 16 | use=$(head -n $line ${record} | tail -1 | cut -d '|' -f3 | cut -d '/' -f1) 17 | 18 | if [[ $use -lt 100 ]]; then 19 | device[$count]=$dev 20 | count=$((count + 1)) 21 | if [[ $count -eq $gpu_num ]]; then 22 | break 23 | fi 24 | fi 25 | done 26 | if [[ ${#device[@]} -lt $gpu_num ]]; then 27 | sleep 60s 28 | else 29 | echo "Run $cmd" 30 | eval $cmd 31 | sleep 10s 32 | exit 33 | fi 34 | done 35 | -------------------------------------------------------------------------------- /egs/mustc/mt/local/monitor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu_num=4 4 | cmd="sh train.sh" 5 | 6 | while : 7 | do 8 | record=$(mktemp -t temp.record.XXXXXX) 9 | gpustat > $record 10 | all_devices=$(seq 0 "$(sed '1,2d' ${record} | wc -l)"); 11 | 12 | count=0 13 | for dev in ${all_devices[@]} 14 | do 15 | line=$((dev + 2)) 16 | use=$(head -n $line ${record} | tail -1 | cut -d '|' -f3 | cut -d '/' -f1) 17 | 18 | if [[ $use -lt 100 ]]; then 19 | device[$count]=$dev 20 | count=$((count + 1)) 21 | if [[ $count -eq $gpu_num ]]; then 22 | break 23 | fi 24 | fi 25 | done 26 | if [[ ${#device[@]} -lt $gpu_num ]]; then 27 | sleep 60s 28 | else 29 | echo "Run $cmd" 30 | eval $cmd 31 | sleep 10s 32 | exit 33 | fi 34 | done 35 | -------------------------------------------------------------------------------- /egs/mustc/st/local/monitor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu_num=4 4 | cmd="sh train.sh" 5 | 6 | while : 7 | do 8 | record=$(mktemp -t temp.record.XXXXXX) 9 | gpustat > $record 10 | all_devices=$(seq 0 "$(sed '1,2d' ${record} | wc -l)"); 11 | 12 | count=0 13 | for dev in ${all_devices[@]} 14 | do 15 | line=$((dev + 2)) 16 | use=$(head -n $line ${record} | tail -1 | cut -d '|' -f3 | cut -d '/' -f1) 17 | 18 | if [[ $use -lt 100 ]]; then 19 | device[$count]=$dev 20 | count=$((count + 1)) 21 | if [[ $count -eq $gpu_num ]]; then 22 | break 23 | fi 24 | fi 25 | done 26 | if [[ ${#device[@]} -lt $gpu_num ]]; then 27 | sleep 60s 28 | else 29 | echo "Run $cmd" 30 | eval $cmd 31 | sleep 10s 32 | exit 33 | fi 34 | done 35 | -------------------------------------------------------------------------------- /egs/aishell/asr/local/monitor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu_num=4 4 | cmd="sh train.sh" 5 | 6 | while : 7 | do 8 | record=$(mktemp -t temp.record.XXXXXX) 9 | gpustat > $record 10 | all_devices=$(seq 0 "$(sed '1,2d' ${record} | wc -l)"); 11 | 12 | count=0 13 | for dev in ${all_devices[@]} 14 | do 15 | line=$((dev + 2)) 16 | use=$(head -n $line ${record} | tail -1 | cut -d '|' -f3 | cut -d '/' -f1) 17 | 18 | if [[ $use -lt 100 ]]; then 19 | device[$count]=$dev 20 | count=$((count + 1)) 21 | if [[ $count -eq $gpu_num ]]; then 22 | break 23 | fi 24 | fi 25 | done 26 | if [[ ${#device[@]} -lt $gpu_num ]]; then 27 | sleep 60s 28 | else 29 | echo "Run $cmd" 30 | eval $cmd 31 | sleep 10s 32 | exit 33 | fi 34 | done 35 | -------------------------------------------------------------------------------- /fairseq/data/encoders/nltk_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_tokenizer 7 | from fairseq.dataclass import FairseqDataclass 8 | 9 | 10 | @register_tokenizer("nltk", dataclass=FairseqDataclass) 11 | class NLTKTokenizer(object): 12 | def __init__(self, *unused): 13 | try: 14 | from nltk.tokenize import word_tokenize 15 | 16 | self.word_tokenize = word_tokenize 17 | except ImportError: 18 | raise ImportError("Please install nltk with: pip install nltk") 19 | 20 | def encode(self, x: str) -> str: 21 | return " ".join(self.word_tokenize(x)) 22 | 23 | def decode(self, x: str) -> str: 24 | return x 25 | -------------------------------------------------------------------------------- /fairseq/data/list_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ListDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, sizes=None): 11 | super().__init__(dataset) 12 | self._sizes = sizes 13 | 14 | def __iter__(self): 15 | for x in self.dataset: 16 | yield x 17 | 18 | def collater(self, samples): 19 | return samples 20 | 21 | @property 22 | def sizes(self): 23 | return self._sizes 24 | 25 | def num_tokens(self, index): 26 | return self.sizes[index] 27 | 28 | def size(self, index): 29 | return self.sizes[index] 30 | 31 | def set_epoch(self, epoch): 32 | pass 33 | -------------------------------------------------------------------------------- /egs/librispeech/asr/local/monitor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu_num=4 4 | cmd="sh train.sh" 5 | 6 | while : 7 | do 8 | record=$(mktemp -t temp.record.XXXXXX) 9 | gpustat > $record 10 | all_devices=$(seq 0 "$(sed '1,2d' ${record} | wc -l)"); 11 | 12 | count=0 13 | for dev in ${all_devices[@]} 14 | do 15 | line=$((dev + 2)) 16 | use=$(head -n $line ${record} | tail -1 | cut -d '|' -f3 | cut -d '/' -f1) 17 | 18 | if [[ $use -lt 100 ]]; then 19 | device[$count]=$dev 20 | count=$((count + 1)) 21 | if [[ $count -eq $gpu_num ]]; then 22 | break 23 | fi 24 | fi 25 | done 26 | if [[ ${#device[@]} -lt $gpu_num ]]; then 27 | sleep 60s 28 | else 29 | echo "Run $cmd" 30 | eval $cmd 31 | sleep 10s 32 | exit 33 | fi 34 | done 35 | -------------------------------------------------------------------------------- /examples/linformer/README.md: -------------------------------------------------------------------------------- 1 | # Linformer: Self-Attention with Linear Complexity (Wang et al., 2020) 2 | 3 | This example contains code to train Linformer models as described in our paper 4 | [Linformer: Self-Attention with Linear Complexity](https://arxiv.org/abs/2006.04768). 5 | 6 | ## Training a new Linformer RoBERTa model 7 | 8 | You can mostly follow the [RoBERTa pretraining README](/examples/roberta/README.pretraining.md), 9 | updating your training command with `--user-dir examples/linformer/linformer_src --arch linformer_roberta_base`. 10 | 11 | ## Citation 12 | 13 | If you use our work, please cite: 14 | 15 | ```bibtex 16 | @article{wang2020linformer, 17 | title={Linformer: Self-Attention with Linear Complexity}, 18 | author={Wang, Sinong and Li, Belinda and Khabsa, Madian and Fang, Han and Ma, Hao}, 19 | journal={arXiv preprint arXiv:2006.04768}, 20 | year={2020} 21 | } 22 | ``` 23 | -------------------------------------------------------------------------------- /docs/criterions.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | .. _Criterions: 5 | 6 | Criterions 7 | ========== 8 | 9 | Criterions compute the loss function given the model and batch, roughly:: 10 | 11 | loss = criterion(model, batch) 12 | 13 | .. automodule:: fairseq.criterions 14 | :members: 15 | 16 | .. autoclass:: fairseq.criterions.FairseqCriterion 17 | :members: 18 | :undoc-members: 19 | 20 | .. autoclass:: fairseq.criterions.adaptive_loss.AdaptiveLoss 21 | :members: 22 | :undoc-members: 23 | .. autoclass:: fairseq.criterions.composite_loss.CompositeLoss 24 | :members: 25 | :undoc-members: 26 | .. autoclass:: fairseq.criterions.cross_entropy.CrossEntropyCriterion 27 | :members: 28 | :undoc-members: 29 | .. autoclass:: fairseq.criterions.label_smoothed_cross_entropy.LabelSmoothedCrossEntropyCriterion 30 | :members: 31 | :undoc-members: 32 | -------------------------------------------------------------------------------- /fairseq/data/encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | 12 | 13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry( 14 | "--tokenizer", 15 | default=None, 16 | ) 17 | 18 | 19 | build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry( 20 | "--bpe", 21 | default=None, 22 | ) 23 | 24 | 25 | # automatically import any Python files in the encoders/ directory 26 | for file in os.listdir(os.path.dirname(__file__)): 27 | if file.endswith(".py") and not file.startswith("_"): 28 | module = file[: file.find(".py")] 29 | importlib.import_module("fairseq.data.encoders." + module) 30 | -------------------------------------------------------------------------------- /examples/m2m_100/tokenizers/tokenizer_ar.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | # 7 | # Please follow the instructions here http://alt.qcri.org/tools/arabic-normalizer/ 8 | # to install tools needed for Arabic 9 | 10 | echo "Please install Arabic tools: http://alt.qcri.org/tools/arabic-normalizer/" 11 | echo "Then update environment variables in tokenizer_ar.sh" 12 | exit 1 13 | 14 | SVMTOOL=... 15 | GOMOSESGO=... 16 | QCRI_ARABIC_NORMALIZER=... 17 | 18 | export PERL5LIB="$SVMTOOL/lib":"$GOMOSESGO/bin/MADA-3.2":$PERL5LIB 19 | 20 | 21 | tempfile=$(mktemp) 22 | cat - > $tempfile 23 | 24 | cd $QCRI_ARABIC_NORMALIZER 25 | 26 | bash qcri_normalizer_mada3.2_aramorph1.2.1.sh $tempfile 27 | cat $tempfile.mada_norm-aramorph.europarl_tok 28 | -------------------------------------------------------------------------------- /fairseq/scoring/chrf.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.scoring import BaseScorer, register_scorer 7 | 8 | 9 | @register_scorer("chrf") 10 | class ChrFScorer(BaseScorer): 11 | def __init__(self, args): 12 | super(ChrFScorer, self).__init__(args) 13 | import sacrebleu 14 | 15 | self.sacrebleu = sacrebleu 16 | 17 | def add_string(self, ref, pred): 18 | self.ref.append(ref) 19 | self.pred.append(pred) 20 | 21 | def score(self, order=4): 22 | return self.result_string(order).score 23 | 24 | def result_string(self, order=4): 25 | if order != 4: 26 | raise NotImplementedError 27 | return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format() 28 | -------------------------------------------------------------------------------- /tests/gpu/transformer_quantization_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | # This file defines example configuration arguments for quantizing 7 | # a transformer model with product quantization 8 | 9 | n_centroids: 10 | Linear: 11 | key: in_features 12 | value: {"*": 8} 13 | Embedding: 14 | key: embedding_dim 15 | value: {"*": 8} 16 | 17 | block_sizes: 18 | Linear: 19 | key: fuzzy_name 20 | value: {fc: 8, attn: 4, emb: 4} 21 | Embedding: 22 | key: fuzzy_name 23 | value: {emb: 8} 24 | 25 | layers_to_quantize: 26 | - decoder\\.layers\\.\d+\\.fc[12] 27 | - decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01] 28 | - decoder\\.layers\\.\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj) 29 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=fairseq 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/basis.yaml: -------------------------------------------------------------------------------- 1 | train-subset: train-clean-100,train-clean-360,train-other-500 2 | valid-subset: dev-other,dev-clean 3 | 4 | max-epoch: 300 5 | max-update: 300000 6 | patience: 20 7 | post-process: sentencepiece 8 | weight-decay: 1e-4 9 | 10 | # best-checkpoint-metric: loss 11 | # maximize-best-checkpoint-metric: False 12 | 13 | eval-wer: True 14 | eval-wer-args: {"beam": 1, "lenpen": 1.0} 15 | eval-wer-tok-args: {"wer_remove_punct": true, "wer_lowercase": true, "wer_char_level": false} 16 | eval-wer-remove-bpe: sentencepiece 17 | eval-wer-print-samples: True 18 | best_checkpoint_metric: dec_wer 19 | maximize_best_checkpoint_metric: False 20 | 21 | validate-interval: 5 22 | # no-epoch-checkpoints: True 23 | keep-last-epochs: 10 24 | keep-best-checkpoints: 10 25 | 26 | num-workers: 8 27 | no-progress-bar: True 28 | log-interval: 100 29 | seed: 1 30 | report-accuracy: True 31 | skip-invalid-size-inputs-valid-test: True -------------------------------------------------------------------------------- /examples/multilingual/multilingual_fairseq_gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | lang_pairs="en-fr,en-cs,fr-en,cs-en" 9 | path_2_data=$1 # 10 | lang_list=$2 # 11 | model=$3 # 12 | source_lang=cs 13 | target_lang=en 14 | 15 | fairseq-generate "$path_2_data" \ 16 | --path "$model" \ 17 | --task translation_multi_simple_epoch \ 18 | --gen-subset test \ 19 | --source-lang "$source_lang" \ 20 | --target-lang "$target_lang" \ 21 | --sacrebleu --remove-bpe 'sentencepiece'\ 22 | --batch-size 32 \ 23 | --encoder-langtok "src" \ 24 | --decoder-langtok \ 25 | --lang-dict "$lang_list" \ 26 | --lang-pairs "$lang_pairs" 27 | -------------------------------------------------------------------------------- /fairseq/data/numel_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class NumelDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, reduce=False): 14 | super().__init__(dataset) 15 | self.reduce = reduce 16 | 17 | def __getitem__(self, index): 18 | item = self.dataset[index] 19 | if torch.is_tensor(item): 20 | return torch.numel(item) 21 | else: 22 | return np.size(item) 23 | 24 | def __len__(self): 25 | return len(self.dataset) 26 | 27 | def collater(self, samples): 28 | if self.reduce: 29 | return sum(samples) 30 | else: 31 | return torch.tensor(samples) 32 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_base_16_growth.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 256 5 | pds-stages: 4 6 | pds-layers: 5_5_10_6 7 | pds-ratios: 2_2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 160_192_224_256 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | ctc-weight: 1.0 31 | post-process: sentencepiece 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 26 37 | encoder-attention-heads: 4 38 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_base_8_growth.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 256 5 | pds-stages: 4 6 | pds-layers: 6_6_6_6 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 192_224_224_256 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | ctc-weight: 1.0 31 | post-process: sentencepiece 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 24 37 | encoder-attention-heads: 4 38 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_base_8_growth360.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 360 5 | pds-stages: 4 6 | pds-layers: 4_4_4_4 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 200_256_256_360 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | ctc-weight: 1.0 31 | post-process: sentencepiece 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 16 37 | encoder-attention-heads: 4 38 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_big_16.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 512 5 | pds-stages: 4 6 | pds-layers: 3_3_8_4 7 | pds-ratios: 2_2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 512_512_512_512 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 4_4_4_4 18 | pds-attn-heads: 8_8_8_8 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 0.0014 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | zero_infinity: True 31 | ctc-weight: 1.0 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-layers: 18 36 | 37 | #load-pretrained-encoder-from: 38 | #load-pretrained-decoder-from: 39 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_large_16.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 640 5 | pds-stages: 4 6 | pds-layers: 4_4_9_5 7 | pds-ratios: 2_2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 640_640_640_640 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 4_4_4_4 18 | pds-attn-heads: 8_8_8_8 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 0.001 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | zero_infinity: True 31 | ctc-weight: 1.0 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-layers: 22 36 | 37 | #load-pretrained-encoder-from: 38 | #load-pretrained-decoder-from: 39 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/purectc_pds_base_8.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 256 5 | pds-stages: 4 6 | pds-layers: 3_3_3_3 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 256_256_256_256 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | post-process: sentencepiece 31 | 32 | dropout: 0.1 33 | activation-fn: relu 34 | encoder-ffn-embed-dim: 2048 35 | encoder-layers: 12 36 | encoder-attention-heads: 4 37 | 38 | #load-pretrained-encoder-from: 39 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/base.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_s 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 2e-3 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | subsampling-type: conv1d 15 | subsampling-layers: 2 16 | subsampling-filter: 1024 17 | subsampling-kernel: 5 18 | subsampling-stride: 2 19 | subsampling-norm: none 20 | subsampling-activation: glu 21 | 22 | dropout: 0.1 23 | activation-fn: relu 24 | encoder-embed-dim: 256 25 | encoder-ffn-embed-dim: 2048 26 | encoder-layers: 12 27 | decoder-layers: 6 28 | encoder-attention-heads: 4 29 | 30 | decoder-embed-dim: 256 31 | decoder-ffn-embed-dim: 2048 32 | decoder-attention-heads: 4 33 | attention-dropout: 0.1 34 | activation-dropout: 0.1 35 | 36 | #load-pretrained-encoder-from: 37 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/base.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_s 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 2e-3 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | subsampling-type: conv1d 15 | subsampling-layers: 2 16 | subsampling-filter: 1024 17 | subsampling-kernel: 5 18 | subsampling-stride: 2 19 | subsampling-norm: none 20 | subsampling-activation: glu 21 | 22 | dropout: 0.1 23 | activation-fn: relu 24 | encoder-embed-dim: 256 25 | encoder-ffn-embed-dim: 2048 26 | encoder-layers: 12 27 | decoder-layers: 6 28 | encoder-attention-heads: 4 29 | 30 | decoder-embed-dim: 256 31 | decoder-ffn-embed-dim: 2048 32 | decoder-attention-heads: 4 33 | attention-dropout: 0.1 34 | activation-dropout: 0.1 35 | 36 | #load-pretrained-encoder-from: 37 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_base_16_growth_fusion256.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 256 5 | pds-stages: 4 6 | pds-layers: 3_3_6_3 7 | pds-ratios: 2_2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 192_224_256_320 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | ctc-weight: 1.0 31 | post-process: sentencepiece 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 20 37 | encoder-attention-heads: 4 38 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_base_16_growth_fusion320.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 320 5 | pds-stages: 4 6 | pds-layers: 4_4_6_4 7 | pds-ratios: 2_2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 192_224_256_320 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | ctc-weight: 1.0 31 | post-process: sentencepiece 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 18 37 | encoder-attention-heads: 4 38 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_base_16_growth_fusion360.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 360 5 | pds-stages: 4 6 | pds-layers: 4_4_4_4 7 | pds-ratios: 2_2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 200_256_256_360 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | ctc-weight: 1.0 31 | post-process: sentencepiece 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 16 37 | encoder-attention-heads: 4 38 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_base_8_growth_fusion256.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 256 5 | pds-stages: 4 6 | pds-layers: 3_4_4_3 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 192_256_256_320 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | ctc-weight: 1.0 31 | post-process: sentencepiece 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 18 37 | encoder-attention-heads: 4 38 | -------------------------------------------------------------------------------- /scripts/convert_dictionary.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (c) Facebook, Inc. and its affiliates. 2 | -- 3 | -- This source code is licensed under the MIT license found in the 4 | -- LICENSE file in the root directory of this source tree. 5 | -- 6 | -- Usage: convert_dictionary.lua 7 | require 'fairseq' 8 | require 'torch' 9 | require 'paths' 10 | 11 | if #arg < 1 then 12 | print('usage: convert_dictionary.lua ') 13 | os.exit(1) 14 | end 15 | if not paths.filep(arg[1]) then 16 | print('error: file does not exit: ' .. arg[1]) 17 | os.exit(1) 18 | end 19 | 20 | dict = torch.load(arg[1]) 21 | dst = paths.basename(arg[1]):gsub('.th7', '.txt') 22 | assert(dst:match('.txt$')) 23 | 24 | f = io.open(dst, 'w') 25 | for idx, symbol in ipairs(dict.index_to_symbol) do 26 | if idx > dict.cutoff then 27 | break 28 | end 29 | f:write(symbol) 30 | f:write(' ') 31 | f:write(dict.index_to_freq[idx]) 32 | f:write('\n') 33 | end 34 | f:close() 35 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/base_nonorm.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_s 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 2e-3 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | subsampling-type: conv1d 15 | subsampling-layers: 2 16 | subsampling-filter: 1024 17 | subsampling-kernel: 5 18 | subsampling-stride: 2 19 | subsampling-norm: none 20 | subsampling-activation: glu 21 | 22 | dropout: 0.1 23 | activation-fn: relu 24 | encoder-embed-dim: 256 25 | encoder-ffn-embed-dim: 2048 26 | encoder-layers: 12 27 | decoder-layers: 6 28 | encoder-attention-heads: 4 29 | 30 | decoder-embed-dim: 256 31 | decoder-ffn-embed-dim: 2048 32 | decoder-attention-heads: 4 33 | attention-dropout: 0.1 34 | activation-dropout: 0.1 35 | 36 | #load-pretrained-encoder-from: 37 | #load-pretrained-decoder-from: -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_big_8.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 512 5 | pds-stages: 4 6 | pds-layers: 4_5_5_4 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 512_512_512_512 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 4_4_4_4 18 | pds-attn-heads: 8_8_8_8 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 0.0014 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | zero_infinity: True 31 | ctc-weight: 1.0 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 18 37 | encoder-attention-heads: 8 38 | 39 | #load-pretrained-encoder-from: -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/purectc_pds_large_8.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 640 5 | pds-stages: 4 6 | pds-layers: 4_6_6_6 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 640_640_640_640 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 4_4_4_4 18 | pds-attn-heads: 8_8_8_8 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 0.001 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | zero_infinity: True 31 | ctc-weight: 1.0 32 | 33 | dropout: 0.1 34 | activation-fn: relu 35 | encoder-ffn-embed-dim: 2048 36 | encoder-layers: 22 37 | encoder-attention-heads: 8 38 | 39 | #load-pretrained-encoder-from: -------------------------------------------------------------------------------- /egs/mustc/asr/conf/purectc_pds_base_8_grow512.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 512 5 | pds-stages: 4 6 | encoder-layers: 10 7 | pds-layers: 3_2_2_3 8 | pds-ratios: 2_2_1_2 9 | pds-fusion: False 10 | pds-fusion-method: all_conv2 11 | pds-fusion-layers: 0_1_1_1 12 | pds-fusion-weight: 0.2_0.3_0.5 13 | pds-embed-dims: 256_384_384_512 14 | pds-ds-method: conv 15 | pds-embed-norm: True 16 | pds-position-embed: 1_1_1_1 17 | pds-kernel-sizes: 5_5_5_5 18 | pds-ffn-ratios: 8_4_4_4 19 | pds-attn-heads: 4_6_6_8 20 | 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 0.002 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: ctc 30 | post-process: sentencepiece 31 | 32 | dropout: 0.1 33 | activation-fn: relu 34 | 35 | macaron-style: True 36 | use-cnn-module: True 37 | cnn-module-kernel: 31 38 | encoder-activation-fn: swish 39 | 40 | #load-pretrained-encoder-from: 41 | -------------------------------------------------------------------------------- /fairseq/data/colorize_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class ColorizeDataset(BaseWrapperDataset): 12 | """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """ 13 | 14 | def __init__(self, dataset, color_getter): 15 | super().__init__(dataset) 16 | self.color_getter = color_getter 17 | 18 | def collater(self, samples): 19 | base_collate = super().collater(samples) 20 | if len(base_collate) > 0: 21 | base_collate["net_input"]["colors"] = torch.tensor( 22 | list(self.color_getter(self.dataset, s["id"]) for s in samples), 23 | dtype=torch.long, 24 | ) 25 | return base_collate 26 | -------------------------------------------------------------------------------- /fairseq/data/pad_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import data_utils 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class PadDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, pad_idx, left_pad): 13 | super().__init__(dataset) 14 | self.pad_idx = pad_idx 15 | self.left_pad = left_pad 16 | 17 | def collater(self, samples): 18 | return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) 19 | 20 | 21 | class LeftPadDataset(PadDataset): 22 | def __init__(self, dataset, pad_idx): 23 | super().__init__(dataset, pad_idx, left_pad=True) 24 | 25 | 26 | class RightPadDataset(PadDataset): 27 | def __init__(self, dataset, pad_idx): 28 | super().__init__(dataset, pad_idx, left_pad=False) 29 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/big.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_m 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 0.0014 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | encoder-embed-norm: True 15 | encoder-no-scale-embedding: True 16 | 17 | subsampling-type: conv1d 18 | subsampling-layers: 2 19 | subsampling-filter: 2048 20 | subsampling-kernel: 5 21 | subsampling-stride: 2 22 | subsampling-norm: none 23 | subsampling-activation: glu 24 | 25 | dropout: 0.15 26 | activation-fn: relu 27 | encoder-embed-dim: 512 28 | encoder-ffn-embed-dim: 2048 29 | encoder-layers: 12 30 | decoder-layers: 6 31 | encoder-attention-heads: 8 32 | 33 | decoder-embed-dim: 512 34 | decoder-ffn-embed-dim: 2048 35 | decoder-attention-heads: 8 36 | 37 | #load-pretrained-encoder-from: 38 | #load-pretrained-decoder-from: 39 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/big.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_m 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 1e-3 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | encoder-embed-norm: True 15 | encoder-no-scale-embedding: True 16 | 17 | subsampling-type: conv1d 18 | subsampling-layers: 2 19 | subsampling-filter: 2048 20 | subsampling-kernel: 5 21 | subsampling-stride: 2 22 | subsampling-norm: none 23 | subsampling-activation: glu 24 | 25 | dropout: 0.15 26 | activation-fn: relu 27 | encoder-embed-dim: 512 28 | encoder-ffn-embed-dim: 2048 29 | encoder-layers: 12 30 | decoder-layers: 6 31 | encoder-attention-heads: 8 32 | 33 | decoder-embed-dim: 512 34 | decoder-ffn-embed-dim: 2048 35 | decoder-attention-heads: 8 36 | 37 | #load-pretrained-encoder-from: 38 | #load-pretrained-decoder-from: 39 | -------------------------------------------------------------------------------- /egs/wmt16/mt/local/replace-unicode-punctuation.perl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | # 3 | # This file is part of moses. Its use is licensed under the GNU Lesser General 4 | # Public License version 2.1 or, at your option, any later version. 5 | 6 | use warnings; 7 | use strict; 8 | 9 | #binmode(STDIN, ":utf8"); 10 | #binmode(STDOUT, ":utf8"); 11 | 12 | while() { 13 | s/,/,/g; 14 | s/。 */. /g; 15 | s/、/,/g; 16 | s/”/"/g; 17 | s/“/"/g; 18 | s/∶/:/g; 19 | s/:/:/g; 20 | s/?/\?/g; 21 | s/《/"/g; 22 | s/》/"/g; 23 | s/)/\)/g; 24 | s/!/\!/g; 25 | s/(/\(/g; 26 | s/;/;/g; 27 | s/1/"/g; 28 | s/」/"/g; 29 | s/「/"/g; 30 | s/0/0/g; 31 | s/3/3/g; 32 | s/2/2/g; 33 | s/5/5/g; 34 | s/6/6/g; 35 | s/9/9/g; 36 | s/7/7/g; 37 | s/8/8/g; 38 | s/4/4/g; 39 | s/. */. /g; 40 | s/~/\~/g; 41 | s/’/\'/g; 42 | s/…/\.\.\./g; 43 | s/━/\-/g; 44 | s/〈/\/g; 46 | s/【/\[/g; 47 | s/】/\]/g; 48 | s/%/\%/g; 49 | print $_; 50 | } 51 | -------------------------------------------------------------------------------- /fairseq/clib/libbleu/module.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include 10 | 11 | 12 | static PyMethodDef method_def[] = { 13 | {NULL, NULL, 0, NULL} 14 | }; 15 | 16 | static struct PyModuleDef module_def = { 17 | PyModuleDef_HEAD_INIT, 18 | "libbleu", /* name of module */ 19 | NULL, /* module documentation, may be NULL */ 20 | -1, /* size of per-interpreter state of the module, 21 | or -1 if the module keeps state in global variables. */ 22 | method_def 23 | }; 24 | 25 | 26 | #if PY_MAJOR_VERSION == 2 27 | PyMODINIT_FUNC init_libbleu() 28 | #else 29 | PyMODINIT_FUNC PyInit_libbleu() 30 | #endif 31 | { 32 | PyObject *m = PyModule_Create(&module_def); 33 | if (!m) { 34 | return NULL; 35 | } 36 | return m; 37 | } 38 | -------------------------------------------------------------------------------- /examples/megatron_11b/detok.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import argparse 8 | import fileinput 9 | 10 | import sacremoses 11 | 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="") 15 | parser.add_argument("files", nargs="*", help="input files") 16 | args = parser.parse_args() 17 | 18 | detok = sacremoses.MosesDetokenizer() 19 | 20 | for line in fileinput.input(args.files, openhook=fileinput.hook_compressed): 21 | print( 22 | detok.detokenize(line.strip().split(" ")) 23 | .replace(" @", "") 24 | .replace("@ ", "") 25 | .replace(" =", "=") 26 | .replace("= ", "=") 27 | .replace(" – ", "–") 28 | ) 29 | 30 | 31 | if __name__ == "__main__": 32 | main() 33 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/purectc_pds_base_8_compare.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 200 5 | pds-stages: 3 6 | pds-layers: 4_4_4 7 | pds-ratios: 2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 200_200_200 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1 16 | pds-kernel-sizes: 5_5_5 17 | pds-ffn-ratios: 4_4_4 18 | pds-attn-heads: 4_4_4 19 | 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 0.0015 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: ctc 29 | post-process: sentencepiece 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-layers: 12 34 | 35 | #macaron-style: True 36 | #use-cnn-module: True 37 | #cnn-module-kernel: 15 38 | #encoder-activation-fn: swish 39 | #encoder-attention-type: rel_pos 40 | 41 | #load-pretrained-encoder-from: 42 | -------------------------------------------------------------------------------- /examples/unsupervised_quality_estimation/repeat_lines.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import argparse 7 | import sys 8 | 9 | 10 | def _normalize_spaces(line): 11 | return " ".join(line.split()) 12 | 13 | 14 | def main(): 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("-i", "--input_file", required=True, type=str) 17 | parser.add_argument("-n", "--repeat_times", required=True, type=int) 18 | parser.add_argument("-o", "--output_file", required=False, type=str) 19 | args = parser.parse_args() 20 | stream = open(args.output_file, "w") if args.output_file else sys.stdout 21 | 22 | for line in open(args.input_file): 23 | for _ in range(args.repeat_times): 24 | stream.write(_normalize_spaces(line) + "\n") 25 | 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/purectc_pds_base_8_grow_compare.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_ctc 2 | encoder-type: pds 3 | 4 | encoder-embed-dim: 240 5 | pds-stages: 3 6 | pds-layers: 4_4_4 7 | pds-ratios: 2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 120_168_240 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1 16 | pds-kernel-sizes: 5_5_5 17 | pds-ffn-ratios: 4_4_4 18 | pds-attn-heads: 4_4_4 19 | 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 0.0015 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: ctc 29 | post-process: sentencepiece 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-layers: 12 34 | 35 | #macaron-style: True 36 | #use-cnn-module: True 37 | #cnn-module-kernel: 15 38 | #encoder-activation-fn: swish 39 | #encoder-attention-type: rel_pos 40 | 41 | #load-pretrained-encoder-from: 42 | -------------------------------------------------------------------------------- /examples/multilingual/data_scripts/preprocess_ML50_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | if [ -z $WORKDIR_ROOT ] ; 9 | then 10 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." 11 | exit 12 | fi 13 | 14 | if [ -z $SPM_PATH ] ; 15 | then 16 | echo "Please install sentence piecence from https://github.com/google/sentencepiece and set SPM_PATH pointing to the installed spm_encode.py. Exitting..." 17 | exit 18 | fi 19 | 20 | ML50=${WORKDIR_ROOT}/ML50 21 | 22 | mkdir -p $ML50/dedup 23 | mkdir -p $ML50/cleaned_dedup 24 | 25 | python ./dedup_all.py --from-folder $ML50/raw --to-folder $ML50/dedup 26 | python ./remove_valid_test_in_train.py --from-folder $ML50/dedup --to-folder $ML50/clean 27 | python ./binarize.py --raw-folder $ML50/clean -------------------------------------------------------------------------------- /examples/translation_moe/translation_moe_src/logsumexp_moe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class LogSumExpMoE(torch.autograd.Function): 10 | """Standard LogSumExp forward pass, but use *posterior* for the backward. 11 | 12 | See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" 13 | (Shen et al., 2019) `_. 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, logp, posterior, dim=-1): 18 | ctx.save_for_backward(posterior) 19 | ctx.dim = dim 20 | return torch.logsumexp(logp, dim=dim) 21 | 22 | @staticmethod 23 | def backward(ctx, grad_output): 24 | (posterior,) = ctx.saved_tensors 25 | grad_logp = grad_output.unsqueeze(ctx.dim) * posterior 26 | return grad_logp, None, None 27 | -------------------------------------------------------------------------------- /fairseq/data/encoders/bytes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from fairseq.data.encoders import register_bpe 8 | from fairseq.data.encoders.byte_utils import ( 9 | SPACE, 10 | SPACE_ESCAPE, 11 | byte_encode, 12 | smart_byte_decode, 13 | ) 14 | 15 | 16 | @register_bpe("bytes") 17 | class Bytes(object): 18 | def __init__(self, *unused): 19 | pass 20 | 21 | @staticmethod 22 | def add_args(parser): 23 | pass 24 | 25 | @staticmethod 26 | def encode(x: str) -> str: 27 | encoded = byte_encode(x) 28 | escaped = encoded.replace(SPACE, SPACE_ESCAPE) 29 | return SPACE.join(list(escaped)) 30 | 31 | @staticmethod 32 | def decode(x: str) -> str: 33 | unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) 34 | return smart_byte_decode(unescaped) 35 | -------------------------------------------------------------------------------- /examples/language_model/prepare-wikitext-103.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh 3 | 4 | URLS=( 5 | "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip" 6 | ) 7 | FILES=( 8 | "wikitext-103-v1.zip" 9 | ) 10 | 11 | for ((i=0;i<${#URLS[@]};++i)); do 12 | file=${FILES[i]} 13 | if [ -f $file ]; then 14 | echo "$file already exists, skipping download" 15 | else 16 | url=${URLS[i]} 17 | wget "$url" 18 | if [ -f $file ]; then 19 | echo "$url successfully downloaded." 20 | else 21 | echo "$url not successfully downloaded." 22 | exit -1 23 | fi 24 | if [ ${file: -4} == ".tgz" ]; then 25 | tar zxvf $file 26 | elif [ ${file: -4} == ".tar" ]; then 27 | tar xvf $file 28 | elif [ ${file: -4} == ".zip" ]; then 29 | unzip $file 30 | fi 31 | fi 32 | done 33 | cd .. 34 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/reproduction_aipa_kd.yaml: -------------------------------------------------------------------------------- 1 | # Append-based Interpolation Augmentation 2 | inter-mixup: True 3 | 4 | inter-mixup-layer: -1 5 | inter-mixup-decoder-layer: 0 6 | inter-mixup-prob: 1.0 7 | inter-mixup-ratio: 1.0 8 | inter-mixup-beta: 0.2 9 | 10 | inter-mixup-keep-org: True 11 | inter-mixup-decoder-emb: True 12 | 13 | cal-mixup-loss: True 14 | no-specaugment: False 15 | layer-out-norm: False 16 | 17 | inter-mixup-ratio-decay: False 18 | inter-mixup-ratio-decay-params: 20000,40000,0 19 | 20 | # MTL 21 | ctc-weight: 0.3 22 | inter-ctc-weight: 0.2 23 | inter-ctc-layers: 6,9 24 | share-inter-ctc: True 25 | share-ctc-and-embed: True 26 | 27 | ctc-pae: inter_league 28 | pae-unnorm-input: True 29 | 30 | ctc-mixup-consistent-weight: 0.15 31 | inter-ctc-mixup-consistent-weight: 0.1 32 | mixup-consistent-weight: 0.5 33 | 34 | # Conformer 35 | macaron-style: True 36 | use-cnn-module: True 37 | cnn-module-kernel: 15 38 | encoder-attention-type: rel_pos 39 | encoder-activation-fn: swish 40 | layer-padding-mask: True -------------------------------------------------------------------------------- /docs/optim.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | .. _optimizers: 5 | 6 | Optimizers 7 | ========== 8 | 9 | Optimizers update the Model parameters based on the gradients. 10 | 11 | .. automodule:: fairseq.optim 12 | :members: 13 | 14 | .. autoclass:: fairseq.optim.FairseqOptimizer 15 | :members: 16 | :undoc-members: 17 | 18 | .. autoclass:: fairseq.optim.adadelta.Adadelta 19 | :members: 20 | :undoc-members: 21 | .. autoclass:: fairseq.optim.adagrad.Adagrad 22 | :members: 23 | :undoc-members: 24 | .. autoclass:: fairseq.optim.adafactor.FairseqAdafactor 25 | :members: 26 | :undoc-members: 27 | .. autoclass:: fairseq.optim.adam.FairseqAdam 28 | :members: 29 | :undoc-members: 30 | .. autoclass:: fairseq.optim.fp16_optimizer.FP16Optimizer 31 | :members: 32 | :undoc-members: 33 | .. autoclass:: fairseq.optim.nag.FairseqNAG 34 | :members: 35 | :undoc-members: 36 | .. autoclass:: fairseq.optim.sgd.SGD 37 | :members: 38 | :undoc-members: 39 | -------------------------------------------------------------------------------- /examples/constrained_decoding/tok.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import sys 9 | 10 | import sacremoses 11 | 12 | 13 | def main(args): 14 | """Tokenizes, preserving tabs""" 15 | mt = sacremoses.MosesTokenizer(lang=args.lang) 16 | 17 | def tok(s): 18 | return mt.tokenize(s, return_str=True) 19 | 20 | for line in sys.stdin: 21 | parts = list(map(tok, line.split("\t"))) 22 | print(*parts, sep="\t", flush=True) 23 | 24 | 25 | if __name__ == "__main__": 26 | import argparse 27 | 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument("--lang", "-l", default="en") 30 | parser.add_argument("--penn", "-p", action="store_true") 31 | parser.add_argument("--fields", "-f", help="fields to tokenize") 32 | args = parser.parse_args() 33 | 34 | main(args) 35 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/big_wenet.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_m 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 1e-3 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | subsampling-type: conv2d 15 | subsampling-layers: 2 16 | subsampling-filter: 512 17 | subsampling-kernel: 3 18 | subsampling-stride: 2 19 | subsampling-norm: none 20 | subsampling-activation: relu 21 | 22 | dropout: 0.15 23 | activation-fn: relu 24 | encoder-embed-dim: 512 25 | encoder-ffn-embed-dim: 2048 26 | encoder-layers: 12 27 | decoder-layers: 6 28 | encoder-attention-heads: 8 29 | 30 | decoder-embed-dim: 512 31 | decoder-ffn-embed-dim: 2048 32 | decoder-attention-heads: 8 33 | 34 | cnn-module-norm: layer_norm 35 | 36 | load-pretrained-encoder-from: /home/xuchen/after.pt 37 | load-pretrained-decoder-from: /home/xuchen/after.pt 38 | #load-pretrained-decoder-from: 39 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/reproduction_aipa_kd_womixuploss.yaml: -------------------------------------------------------------------------------- 1 | # Append-based Interpolation Augmentation 2 | inter-mixup: True 3 | 4 | inter-mixup-layer: -1 5 | inter-mixup-decoder-layer: 0 6 | inter-mixup-prob: 1.0 7 | inter-mixup-ratio: 1.0 8 | inter-mixup-beta: 0.2 9 | 10 | inter-mixup-keep-org: True 11 | inter-mixup-decoder-emb: True 12 | 13 | cal-mixup-loss: False 14 | no-specaugment: False 15 | layer-out-norm: False 16 | 17 | inter-mixup-ratio-decay: False 18 | inter-mixup-ratio-decay-params: 20000,40000,0 19 | 20 | # MTL 21 | ctc-weight: 0.3 22 | share-ctc-and-embed: True 23 | inter-ctc-weight: 0.2 24 | inter-ctc-layers: 6,9 25 | share-inter-ctc: True 26 | 27 | ctc-pae: inter_league 28 | pae-unnorm-input: True 29 | 30 | ctc-mixup-consistent-weight: 0.15 31 | inter-ctc-mixup-consistent-weight: 0.1 32 | mixup-consistent-weight: 0.5 33 | 34 | # Conformer 35 | macaron-style: True 36 | use-cnn-module: True 37 | cnn-module-kernel: 15 38 | encoder-attention-type: rel_pos 39 | encoder-activation-fn: swish 40 | layer-padding-mask: True -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base_8_444.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 3 5 | pds-layers: 4_4_4 6 | pds-ratios: 2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1 15 | pds-kernel-sizes: 5_5_5 16 | pds-ffn-ratios: 8_8_8 17 | pds-attn-heads: 4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/mustc/asr/conf/base.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_s 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 2e-3 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | encoder-embed-norm: True 15 | encoder-no-scale-embedding: True 16 | 17 | subsampling-type: conv1d 18 | subsampling-layers: 2 19 | subsampling-filter: 1024 20 | subsampling-kernel: 5 21 | subsampling-stride: 2 22 | subsampling-norm: none 23 | subsampling-activation: glu 24 | 25 | dropout: 0.1 26 | activation-fn: relu 27 | encoder-embed-dim: 256 28 | encoder-ffn-embed-dim: 2048 29 | encoder-layers: 12 30 | decoder-layers: 6 31 | encoder-attention-heads: 4 32 | 33 | decoder-embed-dim: 256 34 | decoder-ffn-embed-dim: 2048 35 | decoder-attention-heads: 4 36 | attention-dropout: 0.1 37 | activation-dropout: 0.1 38 | 39 | #load-pretrained-encoder-from: 40 | #load-pretrained-decoder-from: 41 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/base.yaml: -------------------------------------------------------------------------------- 1 | arch: s2t_transformer_s 2 | share-decoder-input-output-embed: True 3 | optimizer: adam 4 | clip-norm: 10.0 5 | lr-scheduler: inverse_sqrt 6 | warmup-init-lr: 1e-7 7 | warmup-updates: 10000 8 | lr: 2e-3 9 | adam_betas: (0.9,0.98) 10 | 11 | criterion: label_smoothed_cross_entropy_with_ctc 12 | label_smoothing: 0.1 13 | 14 | encoder-embed-norm: True 15 | encoder-no-scale-embedding: True 16 | 17 | subsampling-type: conv1d 18 | subsampling-layers: 2 19 | subsampling-filter: 1024 20 | subsampling-kernel: 5 21 | subsampling-stride: 2 22 | subsampling-norm: none 23 | subsampling-activation: glu 24 | 25 | dropout: 0.1 26 | activation-fn: relu 27 | encoder-embed-dim: 256 28 | encoder-ffn-embed-dim: 2048 29 | encoder-layers: 12 30 | decoder-layers: 6 31 | encoder-attention-heads: 4 32 | 33 | decoder-embed-dim: 256 34 | decoder-ffn-embed-dim: 2048 35 | decoder-attention-heads: 4 36 | attention-dropout: 0.1 37 | activation-dropout: 0.1 38 | 39 | #load-pretrained-encoder-from: 40 | #load-pretrained-decoder-from: 41 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/xinter.yaml: -------------------------------------------------------------------------------- 1 | inter-xctc-weight: 0.2 2 | inter-xctc-layers: 6,9 3 | 4 | xctc-pae: none 5 | # xctc-pae: inter_league 6 | 7 | xctc-cross-attn: False 8 | cross-attn-start-layer: 7 9 | cross-attn-layer: 6 10 | cross-attn-collaboration-mode: parallel 11 | cross-attn-league-s1-ratio: 0.5 12 | cross-attn-league-s2-ratio: 0.5 13 | cross-attn-league-out-norm: False 14 | cross-attn-league-gated: False 15 | cross-attn-league-drop-net: False 16 | cross-attn-league-drop-net-prob: 0.2 17 | cross-attn-league-drop-net-mix: False 18 | 19 | # xctc-pae-ground-truth-ratio: 0.1 20 | # xctc-pae-ground-truth-ratio-adaptive: True 21 | # xctc-pae-ground-truth-only-mistake: True 22 | # pae-oracle-smooth: True 23 | # pae-gumbel: True 24 | # pae-distribution-hard: True 25 | # pae-drop-prob: 0.0 26 | # pae-distribution-cutoff: 10 27 | # share-pae-and-xctc: True 28 | # pae-embed-norm: True 29 | # pae-out-norm: True 30 | 31 | # ctc-self-distill-weight: 1 32 | # target-ctc-self-distill-weight: 1 33 | # ctc-self-distill-prob: 0.1 34 | # cal-all-ctc: True -------------------------------------------------------------------------------- /egs/mustc/mt/train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # training the model 4 | 5 | gpu_num=1 6 | update_freq=1 7 | max_tokens=8192 8 | 9 | extra_tag= 10 | extra_parameter= 11 | #extra_tag="${extra_tag}" 12 | #extra_parameter="${extra_parameter} " 13 | 14 | exp_tag=baseline 15 | 16 | config_list=(small) 17 | 18 | # exp full name 19 | exp_name= 20 | 21 | train_config=$(echo ${config_list[*]} | sed 's/ /,/g') 22 | 23 | cmd="./run.sh 24 | --stage 1 25 | --stop_stage 2 26 | --gpu_num ${gpu_num} 27 | --update_freq ${update_freq} 28 | --train_config ${train_config} 29 | --max_tokens ${max_tokens} 30 | " 31 | 32 | if [[ -n ${exp_name} ]]; then 33 | cmd="$cmd --exp_name ${exp_name}" 34 | fi 35 | if [[ -n ${exp_tag} ]]; then 36 | cmd="$cmd --exp_tag ${exp_tag}" 37 | fi 38 | if [[ -n ${extra_tag} ]]; then 39 | cmd="$cmd --extra_tag ${extra_tag}" 40 | fi 41 | if [[ -n ${extra_parameter} ]]; then 42 | cmd="$cmd --extra_parameter \"${extra_parameter}\"" 43 | fi 44 | 45 | echo ${cmd} 46 | eval ${cmd} 47 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/xinter.yaml: -------------------------------------------------------------------------------- 1 | inter-xctc-weight: 0.2 2 | inter-xctc-layers: 6,9 3 | 4 | xctc-pae: none 5 | # xctc-pae: inter_league 6 | 7 | xctc-cross-attn: False 8 | cross-attn-start-layer: 7 9 | cross-attn-layer: 6 10 | cross-attn-collaboration-mode: parallel 11 | cross-attn-league-s1-ratio: 0.5 12 | cross-attn-league-s2-ratio: 0.5 13 | cross-attn-league-out-norm: False 14 | cross-attn-league-gated: False 15 | cross-attn-league-drop-net: False 16 | cross-attn-league-drop-net-prob: 0.2 17 | cross-attn-league-drop-net-mix: False 18 | 19 | # xctc-pae-ground-truth-ratio: 0.1 20 | # xctc-pae-ground-truth-ratio-adaptive: True 21 | # xctc-pae-ground-truth-only-mistake: True 22 | # pae-oracle-smooth: True 23 | # pae-gumbel: True 24 | # pae-distribution-hard: True 25 | # pae-drop-prob: 0.0 26 | # pae-distribution-cutoff: 10 27 | # share-pae-and-xctc: True 28 | # pae-embed-norm: True 29 | # pae-out-norm: True 30 | 31 | # ctc-self-distill-weight: 1 32 | # target-ctc-self-distill-weight: 1 33 | # ctc-self-distill-prob: 0.1 34 | # cal-all-ctc: True -------------------------------------------------------------------------------- /egs/aishell/asr/conf/pds_base_16.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_16 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 2_2_6_2 6 | pds-ratios: 2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/aishell/asr/conf/pds_base_8.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 3_3_3_3 6 | pds-ratios: 2_2_1_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/aishell/asr/conf/xinter.yaml: -------------------------------------------------------------------------------- 1 | inter-xctc-weight: 0.2 2 | inter-xctc-layers: 6,9 3 | 4 | xctc-pae: none 5 | # xctc-pae: inter_league 6 | 7 | xctc-cross-attn: False 8 | cross-attn-start-layer: 7 9 | cross-attn-layer: 6 10 | cross-attn-collaboration-mode: parallel 11 | cross-attn-league-s1-ratio: 0.5 12 | cross-attn-league-s2-ratio: 0.5 13 | cross-attn-league-out-norm: False 14 | cross-attn-league-gated: False 15 | cross-attn-league-drop-net: False 16 | cross-attn-league-drop-net-prob: 0.2 17 | cross-attn-league-drop-net-mix: False 18 | 19 | # xctc-pae-ground-truth-ratio: 0.1 20 | # xctc-pae-ground-truth-ratio-adaptive: True 21 | # xctc-pae-ground-truth-only-mistake: True 22 | # pae-oracle-smooth: True 23 | # pae-gumbel: True 24 | # pae-distribution-hard: True 25 | # pae-drop-prob: 0.0 26 | # pae-distribution-cutoff: 10 27 | # share-pae-and-xctc: True 28 | # pae-embed-norm: True 29 | # pae-out-norm: True 30 | 31 | # ctc-self-distill-weight: 1 32 | # target-ctc-self-distill-weight: 1 33 | # ctc-self-distill-prob: 0.1 34 | # cal-all-ctc: True -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base_8.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 3_3_3_3 6 | pds-ratios: 2_2_1_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/mustc/st/conf/pds_deep_8.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 4_5_5_4 6 | pds-ratios: 2_2_1_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 18 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/wmt16/mt/decode.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu_num=1 4 | 5 | data_dir= 6 | test_subset=(test) 7 | 8 | exp_name= 9 | if [ "$#" -eq 1 ]; then 10 | exp_name=$1 11 | fi 12 | 13 | sacrebleu=0 14 | n_average=5 15 | beam_size=4 16 | len_penalty=0.6 17 | max_tokens=4000 18 | batch_size=1 19 | infer_debug=0 20 | dec_model=checkpoint_best.pt 21 | 22 | cmd="./run.sh 23 | --stage 2 24 | --stop_stage 2 25 | --gpu_num ${gpu_num} 26 | --exp_name ${exp_name} 27 | --sacrebleu ${sacrebleu} 28 | --n_average ${n_average} 29 | --beam_size ${beam_size} 30 | --len_penalty ${len_penalty} 31 | --batch_size ${batch_size} 32 | --max_tokens ${max_tokens} 33 | --dec_model ${dec_model} 34 | --infer_debug ${infer_debug} 35 | " 36 | 37 | if [[ -n ${data_dir} ]]; then 38 | cmd="$cmd --data_dir ${data_dir}" 39 | fi 40 | if [[ ${#test_subset[@]} -ne 0 ]]; then 41 | subsets=$(echo ${test_subset[*]} | sed 's/ /,/g') 42 | cmd="$cmd --test_subset ${subsets}" 43 | fi 44 | 45 | echo $cmd 46 | eval $cmd 47 | -------------------------------------------------------------------------------- /fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | std::vector dynamicconv_cpu_forward( 5 | float* input, 6 | float* filters, 7 | int padding_l); 8 | 9 | std::vector dynamicconv_cpu_backward( 10 | float* gradOutput, 11 | int padding_l, 12 | float* input, 13 | float* filters); 14 | 15 | std::vector dynamicconv_forward( 16 | float* input, 17 | float* filters, 18 | int padding_l) { 19 | 20 | return dynamicconv_cpu_forward(input, filters, padding_l); 21 | } 22 | 23 | std::vector dynamicconv_backward( 24 | float* gradOutput, 25 | int padding_l, 26 | float* input, 27 | float* filters) { 28 | 29 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters); 30 | } 31 | 32 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 33 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)"); 34 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)"); 35 | } 36 | -------------------------------------------------------------------------------- /egs/aishell/asr/conf/pds_big_8.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_m_8 2 | 3 | encoder-embed-dim: 512 4 | pds-stages: 4 5 | pds-layers: 3_3_3_3 6 | pds-ratios: 2_2_1_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 512_512_512_512 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 4_4_4_4 17 | pds-attn-heads: 8_8_8_8 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 0.0014 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.15 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 8 37 | 38 | decoder-embed-dim: 512 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 8 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base_16.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_16 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 2_2_6_2 6 | pds-ratios: 2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/xinter.yaml: -------------------------------------------------------------------------------- 1 | inter-xctc-weight: 0.2 2 | inter-xctc-layers: 6,9 3 | 4 | xctc-pae: none 5 | # xctc-pae: inter_league 6 | 7 | xctc-cross-attn: False 8 | cross-attn-start-layer: 7 9 | cross-attn-layer: 6 10 | cross-attn-collaboration-mode: parallel 11 | cross-attn-league-s1-ratio: 0.5 12 | cross-attn-league-s2-ratio: 0.5 13 | cross-attn-league-out-norm: False 14 | cross-attn-league-gated: False 15 | cross-attn-league-drop-net: False 16 | cross-attn-league-drop-net-prob: 0.2 17 | cross-attn-league-drop-net-mix: False 18 | 19 | # xctc-pae-ground-truth-ratio: 0.1 20 | # xctc-pae-ground-truth-ratio-adaptive: True 21 | # xctc-pae-ground-truth-only-mistake: True 22 | # pae-oracle-smooth: True 23 | # pae-gumbel: True 24 | # pae-distribution-hard: True 25 | # pae-drop-prob: 0.0 26 | # pae-distribution-cutoff: 10 27 | # share-pae-and-xctc: True 28 | # pae-embed-norm: True 29 | # pae-out-norm: True 30 | 31 | # ctc-self-distill-weight: 1 32 | # target-ctc-self-distill-weight: 1 33 | # ctc-self-distill-prob: 0.1 34 | # cal-all-ctc: True -------------------------------------------------------------------------------- /fairseq/modules/scalar_bias.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | # 6 | 7 | import torch 8 | 9 | 10 | class ScalarBias(torch.autograd.Function): 11 | """ 12 | Adds a vector of scalars, used in self-attention mechanism to allow 13 | the model to optionally attend to this vector instead of the past 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, input, dim, bias_init): 18 | size = list(input.size()) 19 | size[dim] += 1 20 | output = input.new(*size).fill_(bias_init) 21 | output.narrow(dim, 1, size[dim] - 1).copy_(input) 22 | ctx.dim = dim 23 | return output 24 | 25 | @staticmethod 26 | def backward(ctx, grad): 27 | return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None 28 | 29 | 30 | def scalar_bias(input, dim, bias_init=0): 31 | return ScalarBias.apply(input, dim, bias_init) 32 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base_4.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 3_3_3_3 6 | pds-ratios: 2_2_1_1 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_big_16.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_m_16 2 | 3 | encoder-embed-dim: 512 4 | pds-stages: 4 5 | pds-layers: 2_2_6_2 6 | pds-ratios: 2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 512_512_512_512 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 4_4_4_4 17 | pds-attn-heads: 8_8_8_8 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 0.0014 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.15 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 8 37 | 38 | decoder-embed-dim: 512 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 8 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_big_8.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_m_8 2 | 3 | encoder-embed-dim: 512 4 | pds-stages: 4 5 | pds-layers: 3_3_3_3 6 | pds-ratios: 2_2_1_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 512_512_512_512 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 4_4_4_4 17 | pds-attn-heads: 8_8_8_8 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 0.0014 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.15 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 8 37 | 38 | decoder-embed-dim: 512 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 8 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_deep30_8.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 7_7_7_9 6 | pds-ratios: 2_2_1_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 30 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_deep_16.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_16 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 3_3_8_4 6 | pds-ratios: 2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 18 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_deep_8.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 4_5_5_4 6 | pds-ratios: 2_2_1_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 18 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/wmt16/mt/train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # training the model 4 | 5 | gpu_num=8 6 | update_freq=1 7 | max_tokens=8192 8 | 9 | extra_tag= 10 | extra_parameter= 11 | #extra_tag="${extra_tag}" 12 | #extra_parameter="${extra_parameter} " 13 | 14 | exp_tag=baseline 15 | config_list=(base) 16 | #config_list=(deep) 17 | 18 | # exp full name 19 | exp_name= 20 | 21 | train_config=$(echo ${config_list[*]} | sed 's/ /,/g') 22 | 23 | cmd="./run.sh 24 | --stage 1 25 | --stop_stage 2 26 | --gpu_num ${gpu_num} 27 | --update_freq ${update_freq} 28 | --train_config ${train_config} 29 | --max_tokens ${max_tokens} 30 | " 31 | 32 | if [[ -n ${exp_name} ]]; then 33 | cmd="$cmd --exp_name ${exp_name}" 34 | fi 35 | if [[ -n ${exp_tag} ]]; then 36 | cmd="$cmd --exp_tag ${exp_tag}" 37 | fi 38 | if [[ -n ${extra_tag} ]]; then 39 | cmd="$cmd --extra_tag ${extra_tag}" 40 | fi 41 | if [[ -n ${extra_parameter} ]]; then 42 | cmd="$cmd --extra_parameter \"${extra_parameter}\"" 43 | fi 44 | 45 | echo ${cmd} 46 | eval ${cmd} 47 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base_16_growth.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_16 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 3_3_9_3 6 | pds-ratios: 2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 160_192_224_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 16 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_deep18_16.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_16 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 3_3_8_4 6 | pds-ratios: 2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 18 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_deep30_16.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_16 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | pds-layers: 5_5_12_8 6 | pds-ratios: 2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8 17 | pds-attn-heads: 4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 30 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/mustc/st/conf/pds_big_32.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_m_32 2 | 3 | encoder-embed-dim: 512 4 | pds-stages: 5 5 | pds-layers: 2_2_3_3_2 6 | pds-ratios: 2_2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 512_512_512_512_512 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5_5 16 | pds-ffn-ratios: 4_4_4_4_4 17 | pds-attn-heads: 8_8_8_8_8 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 1e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.15 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 8 37 | 38 | decoder-embed-dim: 512 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 8 -------------------------------------------------------------------------------- /egs/aishell/asr/conf/pds_base_32.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_32 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 5 5 | pds-layers: 2_2_3_3_2 6 | pds-ratios: 2_2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8_8 17 | pds-attn-heads: 4_4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base_32.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_32 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 5 5 | pds-layers: 2_2_3_3_2 6 | pds-ratios: 2_2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8_8 17 | pds-attn-heads: 4_4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base_8_growth.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | 6 | pds-layers: 5_3_3_5 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 192_224_224_256 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: label_smoothed_cross_entropy_with_ctc 30 | label_smoothing: 0.1 31 | 32 | dropout: 0.1 33 | activation-fn: relu 34 | encoder-ffn-embed-dim: 2048 35 | encoder-layers: 16 36 | decoder-layers: 6 37 | encoder-attention-heads: 4 38 | 39 | decoder-embed-dim: 256 40 | decoder-ffn-embed-dim: 2048 41 | decoder-attention-heads: 4 42 | -------------------------------------------------------------------------------- /egs/mustc/asr/conf/pds_base_8_grow.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | encoder-embed-dim: 384 4 | pds-stages: 4 5 | encoder-layers: 12 6 | pds-layers: 4_3_3_2 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 192_256_256_384 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_4 18 | pds-attn-heads: 4_4_4_6 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: label_smoothed_cross_entropy_with_ctc 30 | label_smoothing: 0.1 31 | 32 | dropout: 0.1 33 | activation-fn: relu 34 | 35 | decoder-layers: 6 36 | decoder-embed-dim: 256 37 | decoder-ffn-embed-dim: 2048 38 | decoder-attention-heads: 4 39 | 40 | #load-pretrained-encoder-from: 41 | #load-pretrained-decoder-from: 42 | -------------------------------------------------------------------------------- /fairseq/data/encoders/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from fairseq.data import encoders 8 | 9 | 10 | def get_whole_word_mask(args, dictionary): 11 | bpe = encoders.build_bpe(args) 12 | if bpe is not None: 13 | 14 | def is_beginning_of_word(i): 15 | if i < dictionary.nspecial: 16 | # special elements are always considered beginnings 17 | return True 18 | tok = dictionary[i] 19 | if tok.startswith("madeupword"): 20 | return True 21 | try: 22 | return bpe.is_beginning_of_word(tok) 23 | except ValueError: 24 | return True 25 | 26 | mask_whole_words = torch.ByteTensor( 27 | list(map(is_beginning_of_word, range(len(dictionary)))) 28 | ) 29 | return mask_whole_words 30 | return None 31 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_big_32.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_m_32 2 | 3 | encoder-embed-dim: 512 4 | pds-stages: 5 5 | pds-layers: 2_2_3_3_2 6 | pds-ratios: 2_2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 512_512_512_512_512 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5_5 16 | pds-ffn-ratios: 4_4_4_4_4 17 | pds-attn-heads: 8_8_8_8_8 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 0.0014 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.15 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 12 35 | decoder-layers: 6 36 | encoder-attention-heads: 8 37 | 38 | decoder-embed-dim: 512 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 8 -------------------------------------------------------------------------------- /egs/mustc/asr/conf/pds_base_8_grow512.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | encoder-embed-dim: 512 4 | pds-stages: 4 5 | encoder-layers: 18 6 | pds-layers: 6_3_3_6 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 256_384_384_512 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_4_4_4 18 | pds-attn-heads: 4_6_6_8 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: label_smoothed_cross_entropy_with_ctc 30 | label_smoothing: 0.1 31 | 32 | dropout: 0.1 33 | activation-fn: relu 34 | 35 | decoder-layers: 6 36 | decoder-embed-dim: 512 37 | decoder-ffn-embed-dim: 2048 38 | decoder-attention-heads: 8 39 | 40 | #load-pretrained-encoder-from: 41 | #load-pretrained-decoder-from: 42 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_base_8_growth_fusion256.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_s_8 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 4 5 | 6 | pds-layers: 3_3_3_3 7 | pds-ratios: 2_2_1_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 192_256_256_320 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8 18 | pds-attn-heads: 4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: label_smoothed_cross_entropy_with_ctc 30 | label_smoothing: 0.1 31 | 32 | dropout: 0.1 33 | activation-fn: relu 34 | encoder-ffn-embed-dim: 2048 35 | encoder-layers: 12 36 | decoder-layers: 6 37 | encoder-attention-heads: 4 38 | 39 | decoder-embed-dim: 256 40 | decoder-ffn-embed-dim: 2048 41 | decoder-attention-heads: 4 42 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_deep_32.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_32 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 5 5 | pds-layers: 3_3_4_4_4 6 | pds-ratios: 2_2_2_2_2 7 | pds-fusion: False 8 | pds-fusion-method: all_conv2 9 | pds-fusion-layers: 0_0_1_1_1 10 | pds-fusion-weight: 0.2_0.3_0.5 11 | pds-embed-dims: 256_256_256_256_256 12 | pds-ds-method: conv 13 | pds-embed-norm: True 14 | pds-position-embed: 1_1_1_1_1 15 | pds-kernel-sizes: 5_5_5_5_5 16 | pds-ffn-ratios: 8_8_8_8_8 17 | pds-attn-heads: 4_4_4_4_4 18 | 19 | share-decoder-input-output-embed: True 20 | optimizer: adam 21 | clip-norm: 10.0 22 | lr-scheduler: inverse_sqrt 23 | warmup-init-lr: 1e-7 24 | warmup-updates: 10000 25 | lr: 2e-3 26 | adam_betas: (0.9,0.98) 27 | 28 | criterion: label_smoothed_cross_entropy_with_ctc 29 | label_smoothing: 0.1 30 | 31 | dropout: 0.1 32 | activation-fn: relu 33 | encoder-ffn-embed-dim: 2048 34 | encoder-layers: 18 35 | decoder-layers: 6 36 | encoder-attention-heads: 4 37 | 38 | decoder-embed-dim: 256 39 | decoder-ffn-embed-dim: 2048 40 | decoder-attention-heads: 4 41 | -------------------------------------------------------------------------------- /egs/librispeech/asr/conf/pds_deep18_32.yaml: -------------------------------------------------------------------------------- 1 | arch: pdss2t_transformer_sd_32 2 | 3 | encoder-embed-dim: 256 4 | pds-stages: 5 5 | # ctc-layer: 12 6 | pds-layers: 3_3_4_4_4 7 | pds-ratios: 2_2_2_2_2 8 | pds-fusion: False 9 | pds-fusion-method: all_conv2 10 | pds-fusion-layers: 0_0_1_1_1 11 | pds-fusion-weight: 0.2_0.3_0.5 12 | pds-embed-dims: 256_256_256_256_256 13 | pds-ds-method: conv 14 | pds-embed-norm: True 15 | pds-position-embed: 1_1_1_1_1 16 | pds-kernel-sizes: 5_5_5_5_5 17 | pds-ffn-ratios: 8_8_8_8_8 18 | pds-attn-heads: 4_4_4_4_4 19 | 20 | share-decoder-input-output-embed: True 21 | optimizer: adam 22 | clip-norm: 10.0 23 | lr-scheduler: inverse_sqrt 24 | warmup-init-lr: 1e-7 25 | warmup-updates: 10000 26 | lr: 2e-3 27 | adam_betas: (0.9,0.98) 28 | 29 | criterion: label_smoothed_cross_entropy_with_ctc 30 | label_smoothing: 0.1 31 | 32 | dropout: 0.1 33 | activation-fn: relu 34 | encoder-ffn-embed-dim: 2048 35 | encoder-layers: 18 36 | decoder-layers: 6 37 | encoder-attention-heads: 4 38 | 39 | decoder-embed-dim: 256 40 | decoder-ffn-embed-dim: 2048 41 | decoder-attention-heads: 4 42 | --------------------------------------------------------------------------------