├── Audio_Captioning.ipynb
├── Captioning.ipynb
├── Image_gen.ipynb
├── LICENSE
├── README.md
├── VG.ipynb
├── VQA.ipynb
├── Video_Captioning.ipynb
├── __pycache__
├── trainer.cpython-37.pyc
├── trainer.cpython-38.pyc
└── trainer.cpython-39.pyc
├── checkpoints.md
├── colab.md
├── criterions
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── __init__.cpython-39.pyc
│ ├── clip_scst_loss.cpython-37.pyc
│ ├── clip_scst_loss.cpython-38.pyc
│ ├── clip_scst_loss.cpython-39.pyc
│ ├── label_smoothed_cross_entropy.cpython-37.pyc
│ ├── label_smoothed_cross_entropy.cpython-38.pyc
│ ├── label_smoothed_cross_entropy.cpython-39.pyc
│ ├── label_smoothed_cross_entropy_scst.cpython-39.pyc
│ ├── label_smoothed_encouraging_loss.cpython-37.pyc
│ ├── label_smoothed_encouraging_loss.cpython-38.pyc
│ ├── label_smoothed_encouraging_loss.cpython-39.pyc
│ ├── refcoco_scst_loss.cpython-39.pyc
│ ├── scst_loss.cpython-37.pyc
│ ├── scst_loss.cpython-38.pyc
│ └── scst_loss.cpython-39.pyc
├── clip_scst_loss.py
├── label_smoothed_cross_entropy.py
├── label_smoothed_cross_entropy_scst.py
├── label_smoothed_encouraging_loss.py
└── refcoco_scst_loss.py
├── data
├── .ipynb_checkpoints
│ └── file_dataset-checkpoint.py
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── __init__.cpython-39.pyc
│ ├── audio_utils.cpython-37.pyc
│ ├── audio_utils.cpython-39.pyc
│ ├── data_utils.cpython-37.pyc
│ ├── data_utils.cpython-38.pyc
│ ├── data_utils.cpython-39.pyc
│ ├── file_dataset.cpython-37.pyc
│ ├── file_dataset.cpython-38.pyc
│ ├── file_dataset.cpython-39.pyc
│ ├── ofa_dataset.cpython-37.pyc
│ ├── ofa_dataset.cpython-38.pyc
│ ├── ofa_dataset.cpython-39.pyc
│ ├── video_utils.cpython-37.pyc
│ └── video_utils.cpython-39.pyc
├── audio_utils.py
├── data_utils.py
├── file_dataset.py
├── mm_data
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── __init__.cpython-39.pyc
│ │ ├── audio_caption_dataset.cpython-37.pyc
│ │ ├── audio_caption_dataset.cpython-39.pyc
│ │ ├── caption_dataset.cpython-37.pyc
│ │ ├── caption_dataset.cpython-38.pyc
│ │ ├── caption_dataset.cpython-39.pyc
│ │ ├── image_gen_dataset.cpython-37.pyc
│ │ ├── image_gen_dataset.cpython-38.pyc
│ │ ├── image_gen_dataset.cpython-39.pyc
│ │ ├── refcoco_dataset.cpython-37.pyc
│ │ ├── refcoco_dataset.cpython-38.pyc
│ │ ├── refcoco_dataset.cpython-39.pyc
│ │ ├── snli_ve_dataset.cpython-37.pyc
│ │ ├── snli_ve_dataset.cpython-38.pyc
│ │ ├── snli_ve_dataset.cpython-39.pyc
│ │ ├── video_caption_dataset.cpython-37.pyc
│ │ ├── video_caption_dataset.cpython-39.pyc
│ │ ├── video_vqa_gen_dataset.cpython-37.pyc
│ │ ├── video_vqa_gen_dataset.cpython-39.pyc
│ │ ├── vqa_gen_dataset.cpython-37.pyc
│ │ ├── vqa_gen_dataset.cpython-38.pyc
│ │ └── vqa_gen_dataset.cpython-39.pyc
│ ├── audio_caption_dataset.py
│ ├── caption_dataset.py
│ ├── image_gen_dataset.py
│ ├── refcoco_dataset.py
│ ├── snli_ve_dataset.py
│ ├── video_caption_dataset.py
│ ├── video_vqa_gen_dataset.py
│ └── vqa_gen_dataset.py
├── ofa_dataset.py
├── pretrain_data
│ ├── .ipynb_checkpoints
│ │ └── unify_dataset-checkpoint.py
│ ├── __pycache__
│ │ ├── unify_dataset.cpython-37.pyc
│ │ ├── unify_dataset.cpython-38.pyc
│ │ └── unify_dataset.cpython-39.pyc
│ └── unify_dataset.py
└── video_utils.py
├── datasets.md
├── evaluate.py
├── examples
├── demo.gif
├── demo.png
├── logo.png
├── output.gif
├── results
│ ├── caption.jpg
│ ├── caption.pdf
│ ├── figures.pdf
│ ├── vg.jpg
│ ├── vg.pdf
│ ├── vqa.jpg
│ └── vqa.pdf
├── teaser.gif
├── teaser.mp4
├── teaser_2.gif
└── unival.gif
├── fairseq
├── .github
│ ├── ISSUE_TEMPLATE.md
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── documentation.md
│ │ ├── feature_request.md
│ │ └── how-to-question.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── stale.yml
│ └── workflows
│ │ ├── build.yml
│ │ └── build_wheels.yml
├── .gitignore
├── .gitmodules
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── docs
│ ├── Makefile
│ ├── _static
│ │ └── theme_overrides.css
│ ├── command_line_tools.rst
│ ├── conf.py
│ ├── criterions.rst
│ ├── data.rst
│ ├── docutils.conf
│ ├── fairseq.gif
│ ├── fairseq_logo.png
│ ├── getting_started.rst
│ ├── hydra_integration.md
│ ├── index.rst
│ ├── lr_scheduler.rst
│ ├── make.bat
│ ├── models.rst
│ ├── modules.rst
│ ├── optim.rst
│ ├── overview.rst
│ ├── requirements.txt
│ ├── tasks.rst
│ ├── tutorial_classifying_names.rst
│ └── tutorial_simple_lstm.rst
├── examples
│ ├── .gitignore
│ ├── __init__.py
│ ├── adaptive_span
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── adagrad_with_grad_clip.py
│ │ ├── adaptive_span_attention.py
│ │ ├── adaptive_span_loss.py
│ │ ├── adaptive_span_model.py
│ │ ├── adaptive_span_model_wrapper.py
│ │ └── truncated_bptt_lm_task.py
│ ├── backtranslation
│ │ ├── README.md
│ │ ├── deduplicate_lines.py
│ │ ├── extract_bt_data.py
│ │ ├── prepare-de-monolingual.sh
│ │ ├── prepare-wmt18en2de.sh
│ │ ├── sacrebleu.sh
│ │ └── tokenized_bleu.sh
│ ├── bart
│ │ ├── README.glue.md
│ │ ├── README.md
│ │ ├── README.summarization.md
│ │ └── summarize.py
│ ├── byte_level_bpe
│ │ ├── README.md
│ │ ├── get_bitext.py
│ │ ├── get_data.sh
│ │ └── gru_transformer.py
│ ├── camembert
│ │ └── README.md
│ ├── constrained_decoding
│ │ ├── README.md
│ │ ├── normalize.py
│ │ └── tok.py
│ ├── conv_seq2seq
│ │ └── README.md
│ ├── criss
│ │ ├── README.md
│ │ ├── download_and_preprocess_flores_test.sh
│ │ ├── download_and_preprocess_tatoeba.sh
│ │ ├── mining
│ │ │ ├── mine.py
│ │ │ └── mine_example.sh
│ │ ├── save_encoder.py
│ │ ├── sentence_retrieval
│ │ │ ├── encoder_analysis.py
│ │ │ └── sentence_retrieval_tatoeba.sh
│ │ └── unsupervised_mt
│ │ │ └── eval.sh
│ ├── cross_lingual_language_model
│ │ └── README.md
│ ├── discriminative_reranking_nmt
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── config
│ │ │ └── deen.yaml
│ │ ├── criterions
│ │ │ ├── __init__.py
│ │ │ └── discriminative_reranking_criterion.py
│ │ ├── drnmt_rerank.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ └── discriminative_reranking_model.py
│ │ ├── scripts
│ │ │ └── prep_data.py
│ │ └── tasks
│ │ │ ├── __init__.py
│ │ │ └── discriminative_reranking_task.py
│ ├── fast_noisy_channel
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── noisy_channel_beam_search.py
│ │ ├── noisy_channel_sequence_generator.py
│ │ └── noisy_channel_translation.py
│ ├── flores101
│ │ ├── README.md
│ │ └── flores_logo.png
│ ├── fully_sharded_data_parallel
│ │ └── README.md
│ ├── gottbert
│ │ └── README.md
│ ├── hubert
│ │ ├── README.md
│ │ ├── config
│ │ │ ├── decode
│ │ │ │ ├── ax_sweep
│ │ │ │ │ ├── ngram.yaml
│ │ │ │ │ └── transformer.yaml
│ │ │ │ ├── infer_fsqlm.yaml
│ │ │ │ ├── infer_kenlm.yaml
│ │ │ │ ├── infer_viterbi.yaml
│ │ │ │ └── run
│ │ │ │ │ ├── submitit_slurm.yaml
│ │ │ │ │ └── submitit_slurm_8gpu.yaml
│ │ │ ├── finetune
│ │ │ │ ├── base_10h.yaml
│ │ │ │ ├── ckpt
│ │ │ │ │ └── it1.yaml
│ │ │ │ ├── lm
│ │ │ │ │ └── ls_4gram.yaml
│ │ │ │ └── run
│ │ │ │ │ └── submitit_reg.yaml
│ │ │ └── pretrain
│ │ │ │ ├── data
│ │ │ │ ├── iter1.yaml
│ │ │ │ └── iter2.yaml
│ │ │ │ ├── hubert_base_librispeech.yaml
│ │ │ │ ├── hubert_large_librivox.yaml
│ │ │ │ ├── hubert_xlarge_librivox.yaml
│ │ │ │ └── run
│ │ │ │ └── submitit_reg.yaml
│ │ ├── measure_teacher_quality.py
│ │ ├── simple_kmeans
│ │ │ ├── README.md
│ │ │ ├── dump_hubert_feature.py
│ │ │ ├── dump_hubert_feature_s2t.py
│ │ │ ├── dump_km_label.py
│ │ │ ├── dump_mfcc_feature.py
│ │ │ ├── dump_w2v2_feature.py
│ │ │ ├── feature_utils.py
│ │ │ └── learn_kmeans.py
│ │ └── update_ckpt.py
│ ├── joint_alignment_translation
│ │ ├── README.md
│ │ └── prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh
│ ├── language_model
│ │ ├── README.adaptive_inputs.md
│ │ ├── README.conv.md
│ │ ├── README.md
│ │ └── prepare-wikitext-103.sh
│ ├── laser
│ │ ├── README.md
│ │ └── laser_src
│ │ │ ├── __init__.py
│ │ │ ├── laser_lstm.py
│ │ │ ├── laser_task.py
│ │ │ ├── laser_transformer.py
│ │ │ └── multitask_data_utils.py
│ ├── latent_depth
│ │ ├── README.md
│ │ └── latent_depth_src
│ │ │ ├── __init__.py
│ │ │ ├── loss
│ │ │ ├── __init__.py
│ │ │ └── latent_depth.py
│ │ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── latent_multilingual_transformer.py
│ │ │ └── latent_transformer.py
│ │ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ └── latent_layers.py
│ │ │ └── multilingual_translation_latent_depth.py
│ ├── layerdrop
│ │ └── README.md
│ ├── linformer
│ │ ├── README.md
│ │ └── linformer_src
│ │ │ ├── __init__.py
│ │ │ ├── models
│ │ │ ├── __init__.py
│ │ │ └── linformer_roberta.py
│ │ │ └── modules
│ │ │ ├── __init__.py
│ │ │ ├── linformer_sentence_encoder.py
│ │ │ ├── linformer_sentence_encoder_layer.py
│ │ │ └── multihead_linear_attention.py
│ ├── m2m_100
│ │ ├── README.md
│ │ ├── install_dependecies.sh
│ │ ├── process_data
│ │ │ ├── clean_histogram.py
│ │ │ ├── dedup_data.py
│ │ │ └── remove_too_much_punc.py
│ │ ├── tok.sh
│ │ └── tokenizers
│ │ │ ├── README.md
│ │ │ ├── seg_ja.sh
│ │ │ ├── seg_ko.sh
│ │ │ ├── thirdparty
│ │ │ └── .gitignore
│ │ │ ├── tokenize_indic.py
│ │ │ ├── tokenize_thai.py
│ │ │ ├── tokenize_zh.py
│ │ │ └── tokenizer_ar.sh
│ ├── mbart
│ │ └── README.md
│ ├── megatron_11b
│ │ ├── README.md
│ │ └── detok.py
│ ├── multilingual
│ │ ├── ML50_langs.txt
│ │ ├── README.md
│ │ ├── data_scripts
│ │ │ ├── README.md
│ │ │ ├── binarize.py
│ │ │ ├── check_iswlt_test_data.py
│ │ │ ├── check_self_overlaps.py
│ │ │ ├── check_valid_test_overlaps.py
│ │ │ ├── dedup_all.py
│ │ │ ├── download_ML50_v1.sh
│ │ │ ├── download_af_xh.sh
│ │ │ ├── download_flores_data.sh
│ │ │ ├── download_iitb.sh
│ │ │ ├── download_iwslt_and_extract.sh
│ │ │ ├── download_lotus.sh
│ │ │ ├── download_ted_and_extract.py
│ │ │ ├── download_wat19_my.sh
│ │ │ ├── download_wmt19_and_before.py
│ │ │ ├── download_wmt20.sh
│ │ │ ├── preprocess_ML50_v1.sh
│ │ │ ├── remove_valid_test_in_train.py
│ │ │ ├── requirement.txt
│ │ │ └── utils
│ │ │ │ ├── dedup.py
│ │ │ │ ├── fasttext_multi_filter.py
│ │ │ │ └── strip_sgm.sh
│ │ ├── finetune_multilingual_model.sh
│ │ ├── multilingual_fairseq_gen.sh
│ │ └── train_multilingual_model.sh
│ ├── noisychannel
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── rerank.py
│ │ ├── rerank_generate.py
│ │ ├── rerank_options.py
│ │ ├── rerank_score_bw.py
│ │ ├── rerank_score_lm.py
│ │ ├── rerank_tune.py
│ │ └── rerank_utils.py
│ ├── nonautoregressive_translation
│ │ ├── README.md
│ │ └── scripts.md
│ ├── paraphraser
│ │ ├── README.md
│ │ └── paraphrase.py
│ ├── pay_less_attention_paper
│ │ └── README.md
│ ├── pointer_generator
│ │ ├── README.md
│ │ ├── README.xsum.md
│ │ ├── pointer_generator_src
│ │ │ ├── __init__.py
│ │ │ └── transformer_pg.py
│ │ ├── postprocess.py
│ │ └── preprocess.py
│ ├── quant_noise
│ │ ├── README.md
│ │ └── transformer_quantization_config.yaml
│ ├── roberta
│ │ ├── README.custom_classification.md
│ │ ├── README.glue.md
│ │ ├── README.md
│ │ ├── README.pretraining.md
│ │ ├── README.race.md
│ │ ├── commonsense_qa
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── commonsense_qa_task.py
│ │ │ └── download_cqa_data.sh
│ │ ├── config
│ │ │ ├── finetuning
│ │ │ │ ├── cola.yaml
│ │ │ │ ├── mnli.yaml
│ │ │ │ ├── mrpc.yaml
│ │ │ │ ├── qnli.yaml
│ │ │ │ ├── qqp.yaml
│ │ │ │ ├── rte.yaml
│ │ │ │ ├── sst_2.yaml
│ │ │ │ └── sts_b.yaml
│ │ │ └── pretraining
│ │ │ │ └── base.yaml
│ │ ├── multiprocessing_bpe_encoder.py
│ │ ├── preprocess_GLUE_tasks.sh
│ │ ├── preprocess_RACE.py
│ │ ├── preprocess_RACE.sh
│ │ └── wsc
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── wsc_criterion.py
│ │ │ ├── wsc_task.py
│ │ │ └── wsc_utils.py
│ ├── rxf
│ │ ├── README.md
│ │ ├── __init__.py
│ │ └── rxf_src
│ │ │ ├── __init__.py
│ │ │ ├── label_smoothed_cross_entropy_r3f.py
│ │ │ └── sentence_prediction_r3f.py
│ ├── scaling_nmt
│ │ └── README.md
│ ├── shuffled_word_order
│ │ ├── README.finetuning.md
│ │ └── README.md
│ ├── simultaneous_translation
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── docs
│ │ │ ├── ende-mma.md
│ │ │ └── enja-waitk.md
│ │ ├── eval
│ │ │ └── agents
│ │ │ │ └── simul_t2t_enja.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── convtransformer_simul_trans.py
│ │ │ └── transformer_monotonic_attention.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── fixed_pre_decision.py
│ │ │ ├── monotonic_multihead_attention.py
│ │ │ └── monotonic_transformer_layer.py
│ │ ├── tests
│ │ │ └── test_text_models.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── functions.py
│ │ │ ├── monotonic_attention.py
│ │ │ └── p_choose_strategy.py
│ ├── speech_recognition
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── criterions
│ │ │ ├── ASG_loss.py
│ │ │ ├── __init__.py
│ │ │ └── cross_entropy_acc.py
│ │ ├── data
│ │ │ ├── __init__.py
│ │ │ ├── asr_dataset.py
│ │ │ ├── collaters.py
│ │ │ ├── data_utils.py
│ │ │ └── replabels.py
│ │ ├── datasets
│ │ │ ├── asr_prep_json.py
│ │ │ └── prepare-librispeech.sh
│ │ ├── infer.py
│ │ ├── kaldi
│ │ │ ├── __init__.py
│ │ │ ├── add-self-loop-simple.cc
│ │ │ ├── config
│ │ │ │ └── kaldi_initializer.yaml
│ │ │ ├── kaldi_decoder.py
│ │ │ └── kaldi_initializer.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── vggtransformer.py
│ │ │ └── w2l_conv_glu_enc.py
│ │ ├── new
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── conf
│ │ │ │ ├── hydra
│ │ │ │ │ └── sweeper
│ │ │ │ │ │ └── ax.yaml
│ │ │ │ └── infer.yaml
│ │ │ ├── decoders
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_decoder.py
│ │ │ │ ├── decoder.py
│ │ │ │ ├── decoder_config.py
│ │ │ │ ├── flashlight_decoder.py
│ │ │ │ └── viterbi_decoder.py
│ │ │ └── infer.py
│ │ ├── tasks
│ │ │ ├── __init__.py
│ │ │ └── speech_recognition.py
│ │ ├── utils
│ │ │ └── wer_utils.py
│ │ └── w2l_decoder.py
│ ├── speech_synthesis
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── data_utils.py
│ │ ├── docs
│ │ │ ├── common_voice_example.md
│ │ │ ├── ljspeech_example.md
│ │ │ └── vctk_example.md
│ │ ├── evaluation
│ │ │ ├── __init__.py
│ │ │ ├── eval_asr.py
│ │ │ ├── eval_f0.py
│ │ │ ├── eval_sp.py
│ │ │ └── get_eval_manifest.py
│ │ ├── generate_waveform.py
│ │ ├── preprocessing
│ │ │ ├── __init__.py
│ │ │ ├── denoise_and_vad_audio.py
│ │ │ ├── denoiser
│ │ │ │ ├── __init__.py
│ │ │ │ ├── demucs.py
│ │ │ │ ├── pretrained.py
│ │ │ │ ├── resample.py
│ │ │ │ └── utils.py
│ │ │ ├── get_common_voice_audio_manifest.py
│ │ │ ├── get_feature_manifest.py
│ │ │ ├── get_ljspeech_audio_manifest.py
│ │ │ ├── get_speaker_embedding.py
│ │ │ ├── get_vctk_audio_manifest.py
│ │ │ ├── speaker_embedder
│ │ │ │ └── __init__.py
│ │ │ └── vad
│ │ │ │ └── __init__.py
│ │ └── utils.py
│ ├── speech_text_joint_to_text
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── configs
│ │ │ └── mustc_noise.list
│ │ ├── criterions
│ │ │ ├── __init__.py
│ │ │ └── text_guide_cross_entropy_acc.py
│ │ ├── docs
│ │ │ ├── ende-mustc.md
│ │ │ └── iwslt2021.md
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── s2t_dualinputtransformer.py
│ │ │ └── s2t_dualinputxmtransformer.py
│ │ ├── scripts
│ │ │ └── g2p_encode.py
│ │ └── tasks
│ │ │ ├── __init__.py
│ │ │ └── speech_text_joint.py
│ ├── speech_to_text
│ │ ├── README.md
│ │ ├── data_utils.py
│ │ ├── docs
│ │ │ ├── covost_example.md
│ │ │ ├── librispeech_example.md
│ │ │ ├── mtedx_example.md
│ │ │ ├── mustc_example.md
│ │ │ └── simulst_mustc_example.md
│ │ ├── prep_covost_data.py
│ │ ├── prep_librispeech_data.py
│ │ ├── prep_mtedx_data.py
│ │ ├── prep_mustc_data.py
│ │ ├── seg_mustc_data.py
│ │ └── simultaneous_translation
│ │ │ └── agents
│ │ │ └── fairseq_simul_st_agent.py
│ ├── stories
│ │ └── README.md
│ ├── textless_nlp
│ │ └── gslm
│ │ │ ├── README.md
│ │ │ ├── metrics
│ │ │ ├── README.md
│ │ │ ├── abx_metrics
│ │ │ │ ├── README.md
│ │ │ │ └── dump_abx_feats.py
│ │ │ └── asr_metrics
│ │ │ │ ├── README.md
│ │ │ │ ├── continuation_eval.py
│ │ │ │ ├── misc
│ │ │ │ ├── bleu_utils.py
│ │ │ │ ├── cut_as.py
│ │ │ │ └── dict.ltr.txt
│ │ │ │ ├── ppx.py
│ │ │ │ └── self_auto_bleu.py
│ │ │ ├── speech2unit
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── clustering
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cluster_kmeans.py
│ │ │ │ ├── dump_feats.py
│ │ │ │ ├── quantize_with_kmeans.py
│ │ │ │ └── utils.py
│ │ │ └── pretrained
│ │ │ │ ├── cpc_feature_reader.py
│ │ │ │ ├── hubert_feature_reader.py
│ │ │ │ ├── logmel_feature_reader.py
│ │ │ │ ├── utils.py
│ │ │ │ └── w2v2_feature_reader.py
│ │ │ ├── tools
│ │ │ ├── README.md
│ │ │ └── resynthesize_speech.py
│ │ │ ├── ulm
│ │ │ ├── README.md
│ │ │ └── sample.py
│ │ │ └── unit2speech
│ │ │ ├── README.md
│ │ │ ├── convert_to_16k.py
│ │ │ ├── glow.py
│ │ │ ├── multiproc.py
│ │ │ ├── synthesize_audio_from_units.py
│ │ │ ├── tacotron2
│ │ │ ├── __init__.py
│ │ │ ├── audio_processing.py
│ │ │ ├── cleaners.py
│ │ │ ├── cmudict.py
│ │ │ ├── layers.py
│ │ │ ├── model.py
│ │ │ ├── numbers.py
│ │ │ ├── stft.py
│ │ │ ├── symbols.py
│ │ │ ├── text.py
│ │ │ ├── utils.py
│ │ │ └── waveglow_denoiser.py
│ │ │ ├── tts_data.py
│ │ │ └── utils.py
│ ├── translation
│ │ ├── README.md
│ │ ├── prepare-iwslt14.sh
│ │ ├── prepare-iwslt17-multilingual.sh
│ │ ├── prepare-wmt14en2de.sh
│ │ └── prepare-wmt14en2fr.sh
│ ├── translation_moe
│ │ ├── README.md
│ │ ├── score.py
│ │ └── translation_moe_src
│ │ │ ├── __init__.py
│ │ │ ├── logsumexp_moe.py
│ │ │ ├── mean_pool_gating_network.py
│ │ │ └── translation_moe.py
│ ├── truncated_bptt
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── transformer_xl_model.py
│ │ └── truncated_bptt_lm_task.py
│ ├── unsupervised_quality_estimation
│ │ ├── README.md
│ │ ├── aggregate_scores.py
│ │ ├── meteor.py
│ │ └── repeat_lines.py
│ ├── wav2vec
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── config
│ │ │ ├── finetuning
│ │ │ │ ├── base_100h.yaml
│ │ │ │ ├── base_10h.yaml
│ │ │ │ ├── base_10m.yaml
│ │ │ │ ├── base_1h.yaml
│ │ │ │ ├── base_960h.yaml
│ │ │ │ ├── vox_100h.yaml
│ │ │ │ ├── vox_10h.yaml
│ │ │ │ ├── vox_10m.yaml
│ │ │ │ ├── vox_1h.yaml
│ │ │ │ └── vox_960h.yaml
│ │ │ └── pretraining
│ │ │ │ ├── wav2vec2_base_librispeech.yaml
│ │ │ │ ├── wav2vec2_large_librivox.yaml
│ │ │ │ ├── wav2vec2_large_librivox_tpu-pod.yaml
│ │ │ │ └── wav2vec2_large_librivox_tpu.yaml
│ │ ├── libri_labels.py
│ │ ├── scripts
│ │ │ └── binarize_manifest.sh
│ │ ├── unsupervised
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── config
│ │ │ │ ├── finetuning
│ │ │ │ │ └── w2v_finetune.yaml
│ │ │ │ ├── gan
│ │ │ │ │ └── w2vu.yaml
│ │ │ │ ├── generate
│ │ │ │ │ └── viterbi.yaml
│ │ │ │ ├── timit_matched
│ │ │ │ │ ├── test.uid
│ │ │ │ │ ├── train.uid
│ │ │ │ │ ├── train_text.uid
│ │ │ │ │ └── valid.uid
│ │ │ │ └── timit_unmatched
│ │ │ │ │ ├── test.uid
│ │ │ │ │ ├── train.uid
│ │ │ │ │ ├── train_text.uid
│ │ │ │ │ └── valid.uid
│ │ │ ├── data
│ │ │ │ ├── __init__.py
│ │ │ │ ├── extracted_features_dataset.py
│ │ │ │ └── random_input_dataset.py
│ │ │ ├── kaldi_self_train
│ │ │ │ ├── README.md
│ │ │ │ └── st
│ │ │ │ │ ├── cmd.sh
│ │ │ │ │ ├── decode_phone.sh
│ │ │ │ │ ├── decode_word_step1.sh
│ │ │ │ │ ├── decode_word_step2.sh
│ │ │ │ │ ├── local
│ │ │ │ │ ├── copy_aligned_text.py
│ │ │ │ │ ├── decode.sh
│ │ │ │ │ ├── prepare_data_from_w2v.py
│ │ │ │ │ ├── prepare_lang.sh
│ │ │ │ │ ├── prepare_lang_word.sh
│ │ │ │ │ ├── prepare_lm.sh
│ │ │ │ │ ├── score.sh
│ │ │ │ │ ├── show_wer.sh
│ │ │ │ │ ├── train_subset_lgbeam.sh
│ │ │ │ │ ├── unsup_select.py
│ │ │ │ │ ├── unsup_select_decode.sh
│ │ │ │ │ └── unsup_select_decode_word.sh
│ │ │ │ │ ├── path.sh
│ │ │ │ │ ├── steps_gan
│ │ │ │ │ ├── train_deltas.sh
│ │ │ │ │ ├── train_lda_mllt.sh
│ │ │ │ │ └── train_sat.sh
│ │ │ │ │ └── train.sh
│ │ │ ├── models
│ │ │ │ ├── __init__.py
│ │ │ │ └── wav2vec_u.py
│ │ │ ├── scripts
│ │ │ │ ├── apply_pca.py
│ │ │ │ ├── copy_labels.py
│ │ │ │ ├── filter_lexicon.py
│ │ │ │ ├── filter_tsv.py
│ │ │ │ ├── g2p_wrd_to_phn.py
│ │ │ │ ├── ltr_to_wrd.py
│ │ │ │ ├── mean_pool.py
│ │ │ │ ├── merge_clusters.py
│ │ │ │ ├── normalize_and_filter_text.py
│ │ │ │ ├── normalize_text.py
│ │ │ │ ├── pca.py
│ │ │ │ ├── phonemize_with_sil.py
│ │ │ │ ├── prepare_audio.sh
│ │ │ │ ├── prepare_text.sh
│ │ │ │ ├── prepare_timit.sh
│ │ │ │ ├── remove_silence.py
│ │ │ │ ├── vads.py
│ │ │ │ ├── wav2vec_apply_cluster_faiss.py
│ │ │ │ ├── wav2vec_cluster_faiss.py
│ │ │ │ ├── wav2vec_extract_features.py
│ │ │ │ ├── wer.py
│ │ │ │ └── wrd_to_ltr.py
│ │ │ ├── tasks
│ │ │ │ ├── __init__.py
│ │ │ │ └── unpaired_audio_text.py
│ │ │ └── w2vu_generate.py
│ │ ├── vq-wav2vec_featurize.py
│ │ ├── wav2vec_featurize.py
│ │ └── wav2vec_manifest.py
│ ├── wmt19
│ │ └── README.md
│ ├── wmt20
│ │ └── README.md
│ └── xlmr
│ │ └── README.md
├── fairseq
│ ├── __init__.py
│ ├── benchmark
│ │ ├── __init__.py
│ │ ├── dummy_dataset.py
│ │ ├── dummy_lm.py
│ │ ├── dummy_masked_lm.py
│ │ ├── dummy_model.py
│ │ └── dummy_mt.py
│ ├── binarizer.py
│ ├── checkpoint_utils.py
│ ├── clib
│ │ ├── cuda
│ │ │ ├── ngram_repeat_block_cuda.cpp
│ │ │ └── ngram_repeat_block_cuda_kernel.cu
│ │ ├── libbase
│ │ │ └── balanced_assignment.cpp
│ │ ├── libbleu
│ │ │ ├── libbleu.cpp
│ │ │ └── module.cpp
│ │ ├── libnat
│ │ │ └── edit_dist.cpp
│ │ └── libnat_cuda
│ │ │ ├── binding.cpp
│ │ │ ├── edit_dist.cu
│ │ │ └── edit_dist.h
│ ├── config
│ │ ├── __init__.py
│ │ ├── config.yaml
│ │ └── model
│ │ │ ├── transformer_lm
│ │ │ ├── transformer_lm_baevski_gbw.yaml
│ │ │ ├── transformer_lm_baevski_wiki103.yaml
│ │ │ ├── transformer_lm_big.yaml
│ │ │ ├── transformer_lm_gbw.yaml
│ │ │ ├── transformer_lm_gpt.yaml
│ │ │ ├── transformer_lm_gpt2_big.yaml
│ │ │ ├── transformer_lm_gpt2_medium.yaml
│ │ │ ├── transformer_lm_gpt2_small.yaml
│ │ │ └── transformer_lm_wiki103.yaml
│ │ │ ├── wav2vec
│ │ │ └── vq_wav2vec_gumbel.yaml
│ │ │ └── wav2vec2
│ │ │ ├── wav2vec2_base.yaml
│ │ │ └── wav2vec2_large.yaml
│ ├── criterions
│ │ ├── __init__.py
│ │ ├── adaptive_loss.py
│ │ ├── composite_loss.py
│ │ ├── cross_entropy.py
│ │ ├── ctc.py
│ │ ├── fairseq_criterion.py
│ │ ├── fastspeech2_loss.py
│ │ ├── hubert_criterion.py
│ │ ├── label_smoothed_cross_entropy.py
│ │ ├── label_smoothed_cross_entropy_latency_augmented.py
│ │ ├── label_smoothed_cross_entropy_with_alignment.py
│ │ ├── legacy_masked_lm.py
│ │ ├── masked_lm.py
│ │ ├── model_criterion.py
│ │ ├── nat_loss.py
│ │ ├── sentence_prediction.py
│ │ ├── sentence_ranking.py
│ │ ├── tacotron2_loss.py
│ │ └── wav2vec_criterion.py
│ ├── data
│ │ ├── __init__.py
│ │ ├── add_target_dataset.py
│ │ ├── append_token_dataset.py
│ │ ├── audio
│ │ │ ├── __init__.py
│ │ │ ├── audio_utils.py
│ │ │ ├── data_cfg.py
│ │ │ ├── feature_transforms
│ │ │ │ ├── __init__.py
│ │ │ │ ├── global_cmvn.py
│ │ │ │ ├── specaugment.py
│ │ │ │ └── utterance_cmvn.py
│ │ │ ├── frm_text_to_speech_dataset.py
│ │ │ ├── hubert_dataset.py
│ │ │ ├── multi_modality_dataset.py
│ │ │ ├── raw_audio_dataset.py
│ │ │ ├── speech_to_text_dataset.py
│ │ │ ├── speech_to_text_joint_dataset.py
│ │ │ └── text_to_speech_dataset.py
│ │ ├── backtranslation_dataset.py
│ │ ├── base_wrapper_dataset.py
│ │ ├── bucket_pad_length_dataset.py
│ │ ├── colorize_dataset.py
│ │ ├── concat_dataset.py
│ │ ├── concat_sentences_dataset.py
│ │ ├── data_utils.py
│ │ ├── data_utils_fast.pyx
│ │ ├── denoising_dataset.py
│ │ ├── dictionary.py
│ │ ├── encoders
│ │ │ ├── __init__.py
│ │ │ ├── byte_bpe.py
│ │ │ ├── byte_utils.py
│ │ │ ├── bytes.py
│ │ │ ├── characters.py
│ │ │ ├── fastbpe.py
│ │ │ ├── gpt2_bpe.py
│ │ │ ├── gpt2_bpe_utils.py
│ │ │ ├── hf_bert_bpe.py
│ │ │ ├── hf_byte_bpe.py
│ │ │ ├── moses_tokenizer.py
│ │ │ ├── nltk_tokenizer.py
│ │ │ ├── sentencepiece_bpe.py
│ │ │ ├── space_tokenizer.py
│ │ │ ├── subword_nmt_bpe.py
│ │ │ └── utils.py
│ │ ├── fairseq_dataset.py
│ │ ├── fasta_dataset.py
│ │ ├── huffman
│ │ │ ├── __init__.py
│ │ │ ├── huffman_coder.py
│ │ │ └── huffman_mmap_indexed_dataset.py
│ │ ├── id_dataset.py
│ │ ├── indexed_dataset.py
│ │ ├── iterators.py
│ │ ├── language_pair_dataset.py
│ │ ├── legacy
│ │ │ ├── __init__.py
│ │ │ ├── block_pair_dataset.py
│ │ │ ├── masked_lm_dataset.py
│ │ │ └── masked_lm_dictionary.py
│ │ ├── list_dataset.py
│ │ ├── lm_context_window_dataset.py
│ │ ├── lru_cache_dataset.py
│ │ ├── mask_tokens_dataset.py
│ │ ├── monolingual_dataset.py
│ │ ├── multi_corpus_dataset.py
│ │ ├── multi_corpus_sampled_dataset.py
│ │ ├── multilingual
│ │ │ ├── __init__.py
│ │ │ ├── multilingual_data_manager.py
│ │ │ ├── multilingual_utils.py
│ │ │ ├── sampled_multi_dataset.py
│ │ │ ├── sampled_multi_epoch_dataset.py
│ │ │ └── sampling_method.py
│ │ ├── nested_dictionary_dataset.py
│ │ ├── noising.py
│ │ ├── num_samples_dataset.py
│ │ ├── numel_dataset.py
│ │ ├── offset_tokens_dataset.py
│ │ ├── pad_dataset.py
│ │ ├── plasma_utils.py
│ │ ├── prepend_dataset.py
│ │ ├── prepend_token_dataset.py
│ │ ├── raw_label_dataset.py
│ │ ├── replace_dataset.py
│ │ ├── resampling_dataset.py
│ │ ├── roll_dataset.py
│ │ ├── round_robin_zip_datasets.py
│ │ ├── shorten_dataset.py
│ │ ├── sort_dataset.py
│ │ ├── strip_token_dataset.py
│ │ ├── subsample_dataset.py
│ │ ├── text_compressor.py
│ │ ├── token_block_dataset.py
│ │ ├── token_block_utils_fast.pyx
│ │ ├── transform_eos_dataset.py
│ │ └── transform_eos_lang_pair_dataset.py
│ ├── dataclass
│ │ ├── __init__.py
│ │ ├── configs.py
│ │ ├── constants.py
│ │ ├── initialize.py
│ │ └── utils.py
│ ├── distributed
│ │ ├── __init__.py
│ │ ├── distributed_timeout_wrapper.py
│ │ ├── fully_sharded_data_parallel.py
│ │ ├── legacy_distributed_data_parallel.py
│ │ ├── module_proxy_wrapper.py
│ │ ├── tpu_distributed_data_parallel.py
│ │ └── utils.py
│ ├── file_chunker_utils.py
│ ├── file_io.py
│ ├── file_utils.py
│ ├── hub_utils.py
│ ├── incremental_decoding_utils.py
│ ├── iterative_refinement_generator.py
│ ├── logging
│ │ ├── __init__.py
│ │ ├── meters.py
│ │ ├── metrics.py
│ │ └── progress_bar.py
│ ├── model_parallel
│ │ ├── __init__.py
│ │ ├── criterions
│ │ │ ├── __init__.py
│ │ │ └── vocab_parallel_cross_entropy.py
│ │ ├── megatron_trainer.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── pipeline_parallel_transformer
│ │ │ │ ├── __init__.py
│ │ │ │ ├── layers.py
│ │ │ │ └── model.py
│ │ │ ├── roberta
│ │ │ │ ├── __init__.py
│ │ │ │ └── model.py
│ │ │ ├── transformer.py
│ │ │ └── transformer_lm.py
│ │ └── modules
│ │ │ ├── __init__.py
│ │ │ ├── multihead_attention.py
│ │ │ └── transformer_layer.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── bart
│ │ │ ├── __init__.py
│ │ │ ├── hub_interface.py
│ │ │ └── model.py
│ │ ├── composite_encoder.py
│ │ ├── distributed_fairseq_model.py
│ │ ├── ema
│ │ │ ├── __init__.py
│ │ │ └── ema.py
│ │ ├── fairseq_decoder.py
│ │ ├── fairseq_encoder.py
│ │ ├── fairseq_incremental_decoder.py
│ │ ├── fairseq_model.py
│ │ ├── fconv.py
│ │ ├── fconv_lm.py
│ │ ├── fconv_self_att.py
│ │ ├── hubert
│ │ │ ├── __init__.py
│ │ │ ├── hubert.py
│ │ │ └── hubert_asr.py
│ │ ├── huggingface
│ │ │ ├── __init__.py
│ │ │ └── hf_gpt2.py
│ │ ├── lightconv.py
│ │ ├── lightconv_lm.py
│ │ ├── lstm.py
│ │ ├── lstm_lm.py
│ │ ├── masked_lm.py
│ │ ├── model_utils.py
│ │ ├── multilingual_transformer.py
│ │ ├── nat
│ │ │ ├── __init__.py
│ │ │ ├── cmlm_transformer.py
│ │ │ ├── fairseq_nat_model.py
│ │ │ ├── insertion_transformer.py
│ │ │ ├── iterative_nonautoregressive_transformer.py
│ │ │ ├── levenshtein_transformer.py
│ │ │ ├── levenshtein_utils.py
│ │ │ ├── nat_crf_transformer.py
│ │ │ ├── nonautoregressive_ensembles.py
│ │ │ └── nonautoregressive_transformer.py
│ │ ├── roberta
│ │ │ ├── __init__.py
│ │ │ ├── alignment_utils.py
│ │ │ ├── enc_dec.py
│ │ │ ├── hub_interface.py
│ │ │ ├── model.py
│ │ │ ├── model_camembert.py
│ │ │ ├── model_gottbert.py
│ │ │ └── model_xlmr.py
│ │ ├── speech_to_text
│ │ │ ├── __init__.py
│ │ │ ├── berard.py
│ │ │ ├── convtransformer.py
│ │ │ ├── modules
│ │ │ │ ├── augmented_memory_attention.py
│ │ │ │ └── emformer.py
│ │ │ ├── s2t_transformer.py
│ │ │ ├── utils.py
│ │ │ └── xm_transformer.py
│ │ ├── text_to_speech
│ │ │ ├── __init__.py
│ │ │ ├── fastspeech2.py
│ │ │ ├── hifigan.py
│ │ │ ├── tacotron2.py
│ │ │ ├── tts_transformer.py
│ │ │ └── vocoder.py
│ │ ├── transformer
│ │ │ ├── __init__.py
│ │ │ ├── transformer_base.py
│ │ │ ├── transformer_config.py
│ │ │ ├── transformer_decoder.py
│ │ │ ├── transformer_encoder.py
│ │ │ └── transformer_legacy.py
│ │ ├── transformer_align.py
│ │ ├── transformer_from_pretrained_xlm.py
│ │ ├── transformer_lm.py
│ │ └── wav2vec
│ │ │ ├── __init__.py
│ │ │ ├── wav2vec.py
│ │ │ ├── wav2vec2.py
│ │ │ └── wav2vec2_asr.py
│ ├── modules
│ │ ├── __init__.py
│ │ ├── adaptive_input.py
│ │ ├── adaptive_softmax.py
│ │ ├── base_layer.py
│ │ ├── beamable_mm.py
│ │ ├── character_token_embedder.py
│ │ ├── checkpoint_activations.py
│ │ ├── conv_tbc.py
│ │ ├── cross_entropy.py
│ │ ├── cuda_utils.cu
│ │ ├── downsampled_multihead_attention.py
│ │ ├── dynamic_convolution.py
│ │ ├── dynamic_crf_layer.py
│ │ ├── dynamicconv_layer
│ │ │ ├── __init__.py
│ │ │ ├── cuda_function_gen.py
│ │ │ ├── dynamicconv_cuda.cpp
│ │ │ ├── dynamicconv_cuda.cuh
│ │ │ ├── dynamicconv_cuda_kernel.cu
│ │ │ ├── dynamicconv_layer.py
│ │ │ ├── dynamiconv_cpu.cpp
│ │ │ └── setup.py
│ │ ├── fairseq_dropout.py
│ │ ├── fp32_group_norm.py
│ │ ├── gelu.py
│ │ ├── grad_multiply.py
│ │ ├── gumbel_vector_quantizer.py
│ │ ├── kmeans_attention.py
│ │ ├── kmeans_vector_quantizer.py
│ │ ├── layer_drop.py
│ │ ├── layer_norm.py
│ │ ├── learned_positional_embedding.py
│ │ ├── lightconv_layer
│ │ │ ├── __init__.py
│ │ │ ├── cuda_function_gen.py
│ │ │ ├── lightconv_cuda.cpp
│ │ │ ├── lightconv_cuda.cuh
│ │ │ ├── lightconv_cuda_kernel.cu
│ │ │ ├── lightconv_layer.py
│ │ │ └── setup.py
│ │ ├── lightweight_convolution.py
│ │ ├── linearized_convolution.py
│ │ ├── location_attention.py
│ │ ├── lstm_cell_with_zoneout.py
│ │ ├── multihead_attention.py
│ │ ├── positional_embedding.py
│ │ ├── quant_noise.py
│ │ ├── quantization
│ │ │ ├── __init__.py
│ │ │ ├── pq
│ │ │ │ ├── __init__.py
│ │ │ │ ├── em.py
│ │ │ │ ├── modules
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── qconv.py
│ │ │ │ │ ├── qemb.py
│ │ │ │ │ └── qlinear.py
│ │ │ │ ├── pq.py
│ │ │ │ └── utils.py
│ │ │ ├── quantization_options.py
│ │ │ └── scalar
│ │ │ │ ├── __init__.py
│ │ │ │ ├── modules
│ │ │ │ ├── __init__.py
│ │ │ │ ├── qact.py
│ │ │ │ ├── qconv.py
│ │ │ │ ├── qemb.py
│ │ │ │ └── qlinear.py
│ │ │ │ ├── ops.py
│ │ │ │ └── utils.py
│ │ ├── same_pad.py
│ │ ├── scalar_bias.py
│ │ ├── sinusoidal_positional_embedding.py
│ │ ├── sparse_multihead_attention.py
│ │ ├── sparse_transformer_sentence_encoder.py
│ │ ├── sparse_transformer_sentence_encoder_layer.py
│ │ ├── transformer_layer.py
│ │ ├── transformer_sentence_encoder.py
│ │ ├── transformer_sentence_encoder_layer.py
│ │ ├── transpose_last.py
│ │ ├── unfold.py
│ │ └── vggblock.py
│ ├── nan_detector.py
│ ├── ngram_repeat_block.py
│ ├── optim
│ │ ├── __init__.py
│ │ ├── adadelta.py
│ │ ├── adafactor.py
│ │ ├── adagrad.py
│ │ ├── adam.py
│ │ ├── adamax.py
│ │ ├── amp_optimizer.py
│ │ ├── bmuf.py
│ │ ├── composite.py
│ │ ├── cpu_adam.py
│ │ ├── dynamic_loss_scaler.py
│ │ ├── fairseq_optimizer.py
│ │ ├── fp16_optimizer.py
│ │ ├── fused_adam.py
│ │ ├── fused_lamb.py
│ │ ├── lr_scheduler
│ │ │ ├── __init__.py
│ │ │ ├── cosine_lr_scheduler.py
│ │ │ ├── fairseq_lr_scheduler.py
│ │ │ ├── fixed_schedule.py
│ │ │ ├── inverse_square_root_schedule.py
│ │ │ ├── manual_lr_scheduler.py
│ │ │ ├── pass_through.py
│ │ │ ├── polynomial_decay_schedule.py
│ │ │ ├── reduce_lr_on_plateau.py
│ │ │ ├── step_lr_scheduler.py
│ │ │ ├── tri_stage_lr_scheduler.py
│ │ │ └── triangular_lr_scheduler.py
│ │ ├── nag.py
│ │ ├── sgd.py
│ │ └── shard.py
│ ├── options.py
│ ├── pdb.py
│ ├── quantization_utils.py
│ ├── registry.py
│ ├── scoring
│ │ ├── __init__.py
│ │ ├── bleu.py
│ │ ├── chrf.py
│ │ ├── tokenizer.py
│ │ └── wer.py
│ ├── search.py
│ ├── sequence_generator.py
│ ├── sequence_scorer.py
│ ├── speech_generator.py
│ ├── tasks
│ │ ├── __init__.py
│ │ ├── audio_finetuning.py
│ │ ├── audio_pretraining.py
│ │ ├── cross_lingual_lm.py
│ │ ├── denoising.py
│ │ ├── fairseq_task.py
│ │ ├── frm_text_to_speech.py
│ │ ├── hubert_pretraining.py
│ │ ├── language_modeling.py
│ │ ├── legacy_masked_lm.py
│ │ ├── masked_lm.py
│ │ ├── multilingual_denoising.py
│ │ ├── multilingual_masked_lm.py
│ │ ├── multilingual_translation.py
│ │ ├── online_backtranslation.py
│ │ ├── semisupervised_translation.py
│ │ ├── sentence_prediction.py
│ │ ├── sentence_ranking.py
│ │ ├── simultaneous_translation.py
│ │ ├── speech_to_text.py
│ │ ├── text_to_speech.py
│ │ ├── translation.py
│ │ ├── translation_from_pretrained_bart.py
│ │ ├── translation_from_pretrained_xlm.py
│ │ ├── translation_lev.py
│ │ └── translation_multi_simple_epoch.py
│ ├── token_generation_constraints.py
│ ├── tokenizer.py
│ ├── trainer.py
│ ├── utils.py
│ └── version.txt
├── fairseq_cli
│ ├── __init__.py
│ ├── eval_lm.py
│ ├── generate.py
│ ├── hydra_train.py
│ ├── interactive.py
│ ├── preprocess.py
│ ├── score.py
│ ├── train.py
│ └── validate.py
├── hubconf.py
├── pyproject.toml
├── scripts
│ ├── __init__.py
│ ├── average_checkpoints.py
│ ├── build_sym_alignment.py
│ ├── compare_namespaces.py
│ ├── compound_split_bleu.sh
│ ├── constraints
│ │ ├── extract.py
│ │ └── validate.py
│ ├── convert_dictionary.lua
│ ├── convert_model.lua
│ ├── count_docs.py
│ ├── read_binarized.py
│ ├── rm_pt.py
│ ├── sacrebleu.sh
│ ├── shard_docs.py
│ ├── split_train_valid_docs.py
│ ├── spm_decode.py
│ ├── spm_encode.py
│ ├── spm_train.py
│ └── test_fsdp.sh
├── setup.py
├── tests
│ ├── __init__.py
│ ├── distributed
│ │ ├── __init__.py
│ │ ├── test_bmuf.py
│ │ ├── test_distributed_timeout_wrapper.py
│ │ ├── test_module_proxy_wrapper.py
│ │ ├── test_utils.py
│ │ └── utils.py
│ ├── gpu
│ │ ├── __init__.py
│ │ ├── test_binaries_gpu.py
│ │ ├── test_ema_gpu.py
│ │ └── transformer_quantization_config.yaml
│ ├── speech_recognition
│ │ ├── __init__.py
│ │ ├── asr_test_base.py
│ │ ├── test_collaters.py
│ │ ├── test_cross_entropy.py
│ │ ├── test_data_utils.py
│ │ └── test_vggtransformer.py
│ ├── test_activation_checkpointing.py
│ ├── test_amp_optimizer.py
│ ├── test_average_checkpoints.py
│ ├── test_backtranslation_dataset.py
│ ├── test_binaries.py
│ ├── test_character_token_embedder.py
│ ├── test_checkpoint_utils.py
│ ├── test_concat_dataset.py
│ ├── test_constraints.py
│ ├── test_convtbc.py
│ ├── test_data_utils.py
│ ├── test_dataclass_utils.py
│ ├── test_dataset.py
│ ├── test_dictionary.py
│ ├── test_ema.py
│ ├── test_export.py
│ ├── test_file_chunker_utils.py
│ ├── test_file_io.py
│ ├── test_fp16_optimizer.py
│ ├── test_huffman.py
│ ├── test_inference_dropout.py
│ ├── test_iopath.py
│ ├── test_iterators.py
│ ├── test_label_smoothing.py
│ ├── test_lm_context_window.py
│ ├── test_lstm_jitable.py
│ ├── test_memory_efficient_fp16.py
│ ├── test_metrics.py
│ ├── test_multi_corpus_dataset.py
│ ├── test_multi_corpus_sampled_dataset.py
│ ├── test_multihead_attention.py
│ ├── test_noising.py
│ ├── test_online_backtranslation.py
│ ├── test_plasma_utils.py
│ ├── test_reproducibility.py
│ ├── test_resampling_dataset.py
│ ├── test_roberta.py
│ ├── test_sequence_generator.py
│ ├── test_sequence_scorer.py
│ ├── test_sparse_multihead_attention.py
│ ├── test_token_block_dataset.py
│ ├── test_train.py
│ ├── test_transformer.py
│ ├── test_utils.py
│ ├── test_valid_subset_checks.py
│ └── utils.py
└── train.py
├── models
├── .ipynb_checkpoints
│ └── __init__-checkpoint.py
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── __init__.cpython-39.pyc
│ ├── search.cpython-37.pyc
│ ├── search.cpython-38.pyc
│ ├── search.cpython-39.pyc
│ ├── sequence_generator.cpython-37.pyc
│ └── sequence_generator.cpython-39.pyc
├── clip
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── clip.cpython-37.pyc
│ │ ├── clip.cpython-38.pyc
│ │ ├── model.cpython-37.pyc
│ │ ├── model.cpython-38.pyc
│ │ ├── simple_tokenizer.cpython-37.pyc
│ │ └── simple_tokenizer.cpython-38.pyc
│ ├── bpe_simple_vocab_16e6.txt.gz
│ ├── clip.py
│ ├── model.py
│ └── simple_tokenizer.py
├── search.py
├── sequence_generator.py
├── taming
│ ├── .ipynb_checkpoints
│ │ └── util-checkpoint.py
│ ├── __pycache__
│ │ ├── lr_scheduler.cpython-39.pyc
│ │ ├── util.cpython-37.pyc
│ │ ├── util.cpython-38.pyc
│ │ └── util.cpython-39.pyc
│ ├── lr_scheduler.py
│ ├── models
│ │ ├── .ipynb_checkpoints
│ │ │ └── vqgan-checkpoint.py
│ │ ├── __pycache__
│ │ │ ├── vqgan.cpython-37.pyc
│ │ │ ├── vqgan.cpython-38.pyc
│ │ │ └── vqgan.cpython-39.pyc
│ │ └── vqgan.py
│ ├── modules
│ │ ├── .ipynb_checkpoints
│ │ │ └── util-checkpoint.py
│ │ ├── __pycache__
│ │ │ └── util.cpython-39.pyc
│ │ ├── diffusionmodules
│ │ │ ├── __pycache__
│ │ │ │ ├── model.cpython-37.pyc
│ │ │ │ ├── model.cpython-38.pyc
│ │ │ │ └── model.cpython-39.pyc
│ │ │ └── model.py
│ │ ├── discriminator
│ │ │ ├── __pycache__
│ │ │ │ └── model.cpython-39.pyc
│ │ │ └── model.py
│ │ ├── losses
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-39.pyc
│ │ │ │ ├── lpips.cpython-39.pyc
│ │ │ │ └── vqperceptual.cpython-39.pyc
│ │ │ ├── lpips.py
│ │ │ ├── segmentation.py
│ │ │ └── vqperceptual.py
│ │ ├── misc
│ │ │ └── coord.py
│ │ ├── util.py
│ │ └── vqvae
│ │ │ ├── __pycache__
│ │ │ ├── quantize.cpython-37.pyc
│ │ │ ├── quantize.cpython-38.pyc
│ │ │ └── quantize.cpython-39.pyc
│ │ │ └── quantize.py
│ └── util.py
└── unival
│ ├── .ipynb_checkpoints
│ ├── __init__-checkpoint.py
│ ├── ofa-checkpoint.py
│ └── unify_transformer-checkpoint.py
│ ├── __init__.py
│ ├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── __init__.cpython-39.pyc
│ ├── frozen_bn.cpython-37.pyc
│ ├── frozen_bn.cpython-38.pyc
│ ├── frozen_bn.cpython-39.pyc
│ ├── ofa.cpython-37.pyc
│ ├── ofa.cpython-38.pyc
│ ├── ofa.cpython-39.pyc
│ ├── resnet.cpython-37.pyc
│ ├── resnet.cpython-38.pyc
│ ├── resnet.cpython-39.pyc
│ ├── resnet3d.cpython-39.pyc
│ ├── timesformer.cpython-39.pyc
│ ├── unify_multihead_attention.cpython-37.pyc
│ ├── unify_multihead_attention.cpython-38.pyc
│ ├── unify_multihead_attention.cpython-39.pyc
│ ├── unify_transformer.cpython-37.pyc
│ ├── unify_transformer.cpython-38.pyc
│ ├── unify_transformer.cpython-39.pyc
│ ├── unify_transformer_layer.cpython-37.pyc
│ ├── unify_transformer_layer.cpython-38.pyc
│ ├── unify_transformer_layer.cpython-39.pyc
│ ├── unival.cpython-37.pyc
│ └── vit.cpython-39.pyc
│ ├── encoders
│ ├── __pycache__
│ │ ├── ast.cpython-39.pyc
│ │ ├── audio_utils.cpython-37.pyc
│ │ ├── audio_utils.cpython-39.pyc
│ │ ├── clip.cpython-39.pyc
│ │ ├── feature_fusion.cpython-37.pyc
│ │ ├── feature_fusion.cpython-39.pyc
│ │ ├── htsat.cpython-39.pyc
│ │ ├── pann.cpython-37.pyc
│ │ ├── pann.cpython-39.pyc
│ │ ├── resnext3d.cpython-37.pyc
│ │ ├── resnext3d.cpython-39.pyc
│ │ ├── s3d.cpython-39.pyc
│ │ ├── simple_tokenizer.cpython-39.pyc
│ │ ├── timm_resnet.cpython-37.pyc
│ │ └── timm_resnet.cpython-39.pyc
│ ├── audio_utils.py
│ ├── feature_fusion.py
│ ├── pann.py
│ ├── resnext3d.py
│ ├── simple_tokenizer.py
│ └── timm_resnet.py
│ ├── frozen_bn.py
│ ├── resnet.py
│ ├── unify_multihead_attention.py
│ ├── unify_transformer.py
│ ├── unify_transformer_layer.py
│ └── unival.py
├── ofa_module
├── __init__.py
└── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ └── __init__.cpython-39.pyc
├── ofa_test.ipynb
├── preprocess
├── .ipynb_checkpoints
│ ├── create_tsv_files-checkpoint.py
│ └── utils-checkpoint.py
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── average_save_models.cpython-39.pyc
│ ├── utils.cpython-37.pyc
│ └── utils.cpython-39.pyc
├── average_save_models.py
├── change_model_name.py
├── compress.py
└── utils.py
├── requirements.txt
├── rewarded_soups.md
├── run_scripts
├── averaging
│ ├── fusing
│ │ └── scaling_best
│ │ │ ├── unival_caption_stage_1_initavg_caprefsnlivqa.sh
│ │ │ ├── unival_refcocoplus_initavg_caprefsnlivqa.sh
│ │ │ ├── unival_snli_ve_initavg_caprefsnlivqa.sh
│ │ │ ├── unival_vqa_initavg_caprefsnlivqa.sh
│ │ │ └── video
│ │ │ ├── video_caption_stage_1_ofaplus_base_pretrain_s2_hsep1_shuf_el_db_da_initavgvideocaptionvqa.sh
│ │ │ └── video_vqa_ofaplus_base_pretrain_s2_hsep1_shuf_el_db_initavgvideocaptionvqa.sh
│ └── ratatouille
│ │ ├── eval
│ │ ├── eval_caption.sh
│ │ ├── eval_refcoco.sh
│ │ └── eval_vqa.sh
│ │ └── scaling_best
│ │ ├── caption
│ │ ├── unival_caption_stage_1_initrefcocoplus.sh
│ │ ├── unival_caption_stage_1_initsnlive.sh
│ │ ├── unival_caption_stage_1_initvqa.sh
│ │ └── video
│ │ │ └── unival_video_caption_stage_1_initvideoqa.sh
│ │ ├── refcocoplus
│ │ ├── unival_refcocoplus_initcaption.sh
│ │ ├── unival_refcocoplus_initsnlive.sh
│ │ └── unival_refcocoplus_initvqa.sh
│ │ ├── snli_ve
│ │ ├── unival_snli_ve_initcaption.sh
│ │ ├── unival_snli_ve_initrefcocoplus.sh
│ │ └── unival_snli_ve_initvqa.sh
│ │ └── vqa
│ │ ├── unival_vqa_initcaption.sh
│ │ ├── unival_vqa_initsnlive.sh
│ │ ├── unival_vqa_ofaplus_initrefcocoplus.sh
│ │ └── video
│ │ └── unival_video_vqa_initvideocaption.sh
├── caption
│ ├── audio
│ │ ├── clotho
│ │ │ └── unival_audio_caption_clotho.sh
│ │ └── unival_audio_caption.sh
│ ├── coco_eval.py
│ ├── eval
│ │ ├── eval_caption.sh
│ │ ├── eval_caption_avg.sh
│ │ └── eval_nocaps.sh
│ ├── onlylinear
│ │ ├── unival_audio_caption_s1_onlylinear.sh
│ │ ├── unival_audio_caption_s2_onlylinear.sh
│ │ ├── unival_caption_stage_s1_onlylinear.sh
│ │ ├── unival_caption_stage_s2_onlylinear.sh
│ │ ├── unival_video_caption_s1_onlylinear.sh
│ │ └── unival_video_caption_s2_onlylinear.sh
│ ├── unival_caption_stage_1.sh
│ └── video
│ │ ├── activitynet
│ │ └── unival_video_caption_activitynet_stage_1.sh
│ │ └── unival_video_caption_stage_1.sh
├── image_gen
│ ├── .ipynb_checkpoints
│ │ ├── generate_code-checkpoint.py
│ │ └── generate_for_vqgan_code-checkpoint.sh
│ ├── eval
│ │ └── eval_image_gen.sh
│ ├── eval_utils
│ │ ├── __pycache__
│ │ │ ├── dataset.cpython-39.pyc
│ │ │ └── inceptionV3.cpython-39.pyc
│ │ ├── dataset.py
│ │ └── inceptionV3.py
│ ├── fid_score.py
│ ├── generate_code.py
│ ├── image_gen_example.py
│ ├── inception_score.py
│ ├── unival_image_gen_stage_1.sh
│ └── unival_image_gen_stage_2.sh
├── pretraining
│ ├── unival_s1.sh
│ ├── unival_s2.sh
│ └── unival_s2_hs.sh
├── refcoco
│ ├── eval
│ │ ├── eval_refcoco.sh
│ │ ├── eval_refcocog.sh
│ │ ├── eval_refcocoplus.sh
│ │ └── eval_refcocoplus_avg.sh
│ ├── scst
│ │ ├── unival_refcocoplus_acc0_5large_lreinf5.sh
│ │ ├── unival_refcocoplus_acc0_5medium_lreinf5.sh
│ │ ├── unival_refcocoplus_acc0_5mediumlarge_lreinf5.sh
│ │ ├── unival_refcocoplus_acc0_5mediumsmall_lreinf5.sh
│ │ ├── unival_refcocoplus_acc0_5small_lreinf5.sh
│ │ └── unival_refcocoplus_acc0_5smalllarge_lreinf5.sh
│ ├── unival_refcoco.sh
│ ├── unival_refcocog.sh
│ └── unival_refcocoplus.sh
├── snli_ve
│ ├── eval
│ │ ├── eval_snli_ve.sh
│ │ └── eval_snli_ve_avg.sh
│ └── unival_snli_ve.sh
└── vqa
│ ├── eval
│ ├── eval_okvqa.sh
│ ├── eval_vizwiz.sh
│ ├── eval_vqa.sh
│ ├── eval_vqa_avg.sh
│ └── video
│ │ ├── eval_video_qa.sh
│ │ ├── eval_video_qa_avg.sh
│ │ └── eval_video_qa_msvd.sh
│ ├── onlylinear
│ ├── unival_video_vqa_s1_onlylinear.sh
│ ├── unival_video_vqa_s2_onlylinear.sh
│ ├── unival_vqa_s1_onlylinear.sh
│ └── unival_vqa_s2_onlylinear.sh
│ ├── unival_vqa.sh
│ └── video
│ ├── msvd
│ └── unival_video_vqa_msvd.sh
│ └── unival_video_vqa.sh
├── slurm_adastra
├── averaging
│ ├── branching
│ │ ├── caption
│ │ │ ├── ofa_mini_caption_stage_1_pretrain_branchcap.sh
│ │ │ ├── ofa_mini_caption_stage_1_pretrain_branchcapgroundvqa.sh
│ │ │ ├── ofa_mini_caption_stage_1_pretrain_branchimvid.sh
│ │ │ ├── ofa_mini_caption_stage_1_pretrain_qa_ground_10epmore.sh
│ │ │ └── video
│ │ │ │ ├── ofa_mini_video_caption_stage_1_bart_pretrain_branchimvid.sh
│ │ │ │ └── ofa_mini_video_caption_stage_1_bart_pretrain_branchvid.sh
│ │ ├── refcoco
│ │ │ ├── ofa_ratarefcocoplus_branchcapgroundvqa.sh
│ │ │ ├── ofa_ratarefcocoplus_branchground.sh
│ │ │ └── ofa_ratarefcocoplus_pretrain_qa_ground_10epmore.sh
│ │ └── vqa
│ │ │ ├── ofa_mini_vqa_pretrain_branchcapgroundvqa.sh
│ │ │ ├── ofa_mini_vqa_pretrain_branchimvid.sh
│ │ │ ├── ofa_mini_vqa_pretrain_branvqa.sh
│ │ │ ├── ofa_mini_vqa_pretrain_qa_ground_10epmore.sh
│ │ │ └── video
│ │ │ ├── ofa_mini_video_vqa_pretrain_branchimvid.sh
│ │ │ └── ofa_mini_video_vqa_pretrain_branchvid.sh
│ ├── caption
│ │ ├── ofa_caption_stage_1_long1e5.sh
│ │ ├── ofa_caption_stage_1_soup1same.sh
│ │ ├── ofa_caption_stage_1_soup2same.sh
│ │ ├── ofa_caption_stage_1_soup_bs32lr5e6.sh
│ │ ├── ofa_caption_stage_1_soup_lr2e5dropout02.sh
│ │ ├── ofa_wacaption_capground_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_snlicap_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_vqacapground_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_vqacapgroundofapt_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_vqacapofa_caption_stage_1.sh
│ │ ├── ofa_wacaption_vqacapofa_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_vqacapofa_caption_stage_1_lr1e6.sh
│ │ ├── ofa_wacaption_vqacapofapt_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_vqacapsnli_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_vqacapsnliground_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_vqacapsnligroundofapt_caption_stage_1_lr1e5.sh
│ │ ├── ofa_wacaption_vqasnliground_caption_stage_1_lr1e5.sh
│ │ └── ofa_wacaption_vqasnligroundofapt_caption_stage_1_lr1e5.sh
│ ├── eval
│ │ ├── ._eval_refcocoplus.sh
│ │ ├── eval_refcocoplus.sh
│ │ └── eval_refcocoplus_wa.sh
│ ├── fusing
│ │ ├── scaling_best
│ │ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh
│ │ │ ├── refcocoplus_ofaplus_base_pretrain_s2_hsep1_fix_lr5e5_bs8_4_shuf_initavg_caprefsnlivqa.sh
│ │ │ ├── snli_ve_ofaplus_base_pretrain_s2_hsep1_initavg_caprefsnlivqa.sh
│ │ │ ├── video
│ │ │ │ ├── video_caption_stage_1_ofaplus_base_pretrain_s2_hsep1_shuf_el_db_da_initavgvideocaptionvqa.sh
│ │ │ │ └── video_vqa_ofaplus_base_pretrain_s2_hsep1_shuf_el_db_initavgvideocaptionvqa.sh
│ │ │ └── vqa_ofaplus_base_pretrain_s2_bs16_lr1e4_shuf_hsep1_initavg_caprefsnlivqa.sh
│ │ └── t.sh
│ ├── ratatouille
│ │ ├── caption
│ │ │ ├── ofa_ratacaption_ground_caption_stage_1_lr1e5.sh
│ │ │ ├── ofa_ratacaption_snli_caption_stage_1_lr1e5.sh
│ │ │ ├── ofa_ratacaption_vqa_caption_stage_1_lr1e5.sh
│ │ │ └── video
│ │ │ │ └── t.sh
│ │ ├── eval
│ │ │ ├── eval_caption.sh
│ │ │ ├── eval_refcoco.sh
│ │ │ ├── eval_vqa.sh
│ │ │ └── eval_vqa_lambdas.sh
│ │ ├── refcoco
│ │ │ ├── ofa_ratarefcocoplus_cap_refcocoplus.sh
│ │ │ ├── ofa_ratarefcocoplus_snli_refcocoplus.sh
│ │ │ └── ofa_ratarefcocoplus_vqa_refcocoplus.sh
│ │ ├── scaling_best
│ │ │ ├── caption
│ │ │ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initrefcocoplus.sh
│ │ │ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initsnlive.sh
│ │ │ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initvqa.sh
│ │ │ │ └── video
│ │ │ │ │ └── video_caption_stage_1_ofaplus_base_pretrain_s2_hsep1_shuf_el_db_da_initvideoqa.sh
│ │ │ ├── refcocoplus
│ │ │ │ ├── refcocoplus_ofaplus_base_pretrain_s2_hsep1_fix_lr5e5_bs8_4_shuf_initcaption.sh
│ │ │ │ ├── refcocoplus_ofaplus_base_pretrain_s2_hsep1_fix_lr5e5_bs8_4_shuf_initsnlive.sh
│ │ │ │ └── refcocoplus_ofaplus_base_pretrain_s2_hsep1_fix_lr5e5_bs8_4_shuf_initvqa.sh
│ │ │ ├── snli_ve
│ │ │ │ ├── snli_ve_ofaplus_base_pretrain_s2_hsep1_initcaption.sh
│ │ │ │ ├── snli_ve_ofaplus_base_pretrain_s2_hsep1_initrefcocoplus.sh
│ │ │ │ └── snli_ve_ofaplus_base_pretrain_s2_hsep1_initvqa.sh
│ │ │ └── vqa
│ │ │ │ ├── video
│ │ │ │ └── video_vqa_ofaplus_base_pretrain_s2_hsep1_shuf_el_db_initvideocaption.sh
│ │ │ │ ├── vqa_ofaplus_base_pretrain_s2_bs16_lr1e4_shuf_hsep1_initcaption.sh
│ │ │ │ ├── vqa_ofaplus_base_pretrain_s2_bs16_lr1e4_shuf_hsep1_initrefcocoplus.sh
│ │ │ │ └── vqa_ofaplus_base_pretrain_s2_bs16_lr1e4_shuf_hsep1_initsnlive.sh
│ │ └── vqa
│ │ │ ├── ofa_ratavqa_cap_vqa_bart_noema_lr1e5.sh
│ │ │ ├── ofa_ratavqa_cap_vqa_bart_noema_lr1e6.sh
│ │ │ ├── ofa_ratavqa_cap_vqa_bart_noema_lr5e5.sh
│ │ │ ├── ofa_ratavqa_ground_bart_noema_lr1e5.sh
│ │ │ ├── ofa_ratavqa_ground_bart_noema_lr1e6.sh
│ │ │ ├── ofa_ratavqa_ground_bart_noema_lr5e5.sh
│ │ │ ├── ofa_ratavqa_snli_bart_noema_lr1e5.sh
│ │ │ ├── ofa_ratavqa_snli_bart_noema_lr1e6.sh
│ │ │ └── ofa_ratavqa_snli_bart_noema_lr5e5.sh
│ ├── refcoco
│ │ ├── ofa_long_refcocoplus.sh
│ │ ├── ofa_warefcocoplus_ground_refcocoplus.sh
│ │ ├── ofa_warefcocoplus_ground_refcocoplus_lr1e5.sh
│ │ ├── ofa_warefcocoplus_ground_refcocoplus_lr5e6.sh
│ │ ├── ofa_warefcocoplus_vqacapsnliground_refcocoplus.sh
│ │ ├── ofa_warefcocoplus_vqacapsnligroundofapt_refcocoplus.sh
│ │ └── ofa_warefcocoplus_vqacapsnliofapt_refcocoplus.sh
│ └── vqa
│ │ ├── ofa_vqa_bart_noema_long_lr1e6.sh
│ │ ├── ofa_vqa_bart_noema_long_lr5e5.sh
│ │ ├── ofa_wavqa_capsnligroundofapt_vqa_bart_noema_lr1e6.sh
│ │ ├── ofa_wavqa_vqacapofa_vqa_bart_noema.sh
│ │ ├── ofa_wavqa_vqacapofa_vqa_bart_noema_lr1e6.sh
│ │ ├── ofa_wavqa_vqacapofa_vqa_bart_noema_lr5e5.sh
│ │ ├── ofa_wavqa_vqacapofa_vqa_bart_noema_lr5e6.sh
│ │ ├── ofa_wavqa_vqacapofa_vqa_bart_noema_lr5e7.sh
│ │ ├── ofa_wavqa_vqacapsnliground_vqa_bart_noema_lr1e6.sh
│ │ └── ofa_wavqa_vqacapsnligroundofapt_vqa_bart_noema_lr1e6.sh
├── caption
│ ├── audio
│ │ ├── clotho
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_audiosetcls_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_audiosetclsdesc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_onestage_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN_lr1e4.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_bs32.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_bs8.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_lr1e4.sh
│ │ │ ├── ofa_mini_clotho_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_lr5e5.sh
│ │ │ └── ofa_mini_clotho_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4.sh
│ │ ├── ofa_mini_audio_caption_stage_1_bart_allpannc14_nosample_bart_mel128.sh
│ │ ├── ofa_mini_audio_caption_stage_1_bart_allpannc14_nosample_bart_mel64.sh
│ │ ├── ofa_mini_audio_caption_stage_1_bart_allpannc14_nosample_scratch.sh
│ │ ├── ofa_mini_audio_caption_stage_1_bart_allpannc14_nosample_scratch_mel128.sh
│ │ ├── ofa_mini_audio_caption_stage_1_bart_allpannc14_nosample_scratch_mel64.sh
│ │ ├── ofa_mini_audio_caption_stage_1_bart_pannc14_nosample_scratch_multinodes.sh
│ │ ├── ofa_mini_audio_caption_stage_1_bart_pannc14melb128_nosample_scratch_multinodes.sh
│ │ ├── ofa_mini_audio_caption_stage_1_ofa_cc3m_cc12m_pretrain_bart_allresnet_ep10.sh
│ │ ├── ofa_mini_audio_caption_stage_1_ofa_mini_initbart.sh
│ │ ├── ofa_mini_audio_caption_stage_1_ofa_mini_pretrain_bart_allresnet_ep10.sh
│ │ ├── ofa_mini_audio_caption_stage_1_ofa_mini_pretrain_bart_allresnet_inittext.sh
│ │ ├── ofa_mini_audio_caption_stage_1_ofa_mini_pretrain_bart_allresnet_inittext_onlylinear.sh
│ │ ├── ofa_mini_audio_caption_stage_1_onlyaudio_audiovcc_onestage_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ ├── ofa_mini_audio_caption_stage_1_onlyaudio_audiovcc_onestage_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_onlylinear.sh
│ │ ├── ofa_mini_audio_caption_stage_1_onlyvideo_viddatacapqa_pretrain_bart_allresnxtvid_init_8f_lr4_20ep.sh
│ │ ├── ofa_mini_audio_caption_stage_1_pretrain_bart_allresnet_ep20.sh
│ │ ├── ofa_mini_audio_caption_stage_1_pretrain_bart_allresnet_ep20_onlylinear.sh
│ │ ├── ofa_mini_audio_caption_stage_1_pretrain_bart_allresnet_pretraintext.sh
│ │ ├── ofa_mini_audio_caption_stage_1_pretrain_bart_allresnet_pretraintext_long_onlylinear.sh
│ │ ├── ofa_mini_audio_caption_stage_1_pretrain_bart_allresnet_pretraintext_onlylinear.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiosetcls_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_audiosetcls_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_audiosetcls_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_audiosetcls_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN_lr1e4_freezeaudio.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_audiosetclsdesc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_audiosetclsdesc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_onestage_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_onestage_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_ep20.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allhtsat_init_8f_lr4_wav_audioembLN_lr1e4.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN_freezeaudio.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN_lr1e4.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN_lr1e4_freezeaudio.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_bartinit_ep20.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_bartinit_ep20_freezeencdec.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_bartinit_ep20_onlylinear.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4_ast.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4_freezeencdec.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4_htsat.sh
│ │ ├── ofa_mini_audio_caption_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4_onlylinear.sh
│ │ └── scaling_best
│ │ │ └── audio_caption_ofaplus_base_pretrain_s2_bs8_4.sh
│ ├── eval
│ │ ├── ._eval_caption_base_best.sh
│ │ ├── ._eval_nocaps_base.sh
│ │ ├── audio
│ │ │ ├── eval_audiocaps_audio_caption.sh
│ │ │ └── eval_clotho_audio_caption.sh
│ │ ├── eval_caption_base_best.sh
│ │ ├── eval_caption_base_best_avg.sh
│ │ ├── eval_nocaps_base.sh
│ │ └── video
│ │ │ ├── ._eval_msrvtt_video_caption.sh
│ │ │ ├── eval_activitynet_video_caption.sh
│ │ │ ├── eval_msrvtt_video_caption.sh
│ │ │ ├── eval_msrvtt_video_caption_avg.sh
│ │ │ ├── eval_msvd_video_caption.sh
│ │ │ └── eval_youcookii_video_caption.sh
│ ├── scaling
│ │ ├── ._caption_stage_1_ofa_base_pretrain_s2.sh
│ │ ├── caption_stage_1_ofa_base_pretrain_s2.sh
│ │ ├── caption_stage_1_ofa_huge_pretrain_s2.sh
│ │ ├── caption_stage_1_ofa_large_pretrain_s2.sh
│ │ └── video
│ │ │ ├── video_caption_stage_1_ofa_base_pretrain_s2.sh
│ │ │ └── video_caption_stage_1_ofa_large_pretrain_s2.sh
│ ├── scaling_best
│ │ ├── audio
│ │ │ ├── audio_caption_ofaplus_base_pretrain_s2.sh
│ │ │ ├── audio_caption_ofaplus_base_pretrain_s2_bs8_4.sh
│ │ │ ├── audio_caption_ofaplus_base_pretrain_s2_lr1e3.sh
│ │ │ ├── audio_caption_ofaplus_base_pretrain_s2_lr1e4.sh
│ │ │ ├── audio_caption_ofaplus_base_pretrain_s2_lr1e4_nosr_shuf_el_db.sh
│ │ │ ├── audio_caption_ofaplus_base_pretrain_s2_lr5e5.sh
│ │ │ ├── audio_caption_ofaplus_huge_pretrain_s2_lr1e4_nosr_shuf_el_db.sh
│ │ │ └── clotho
│ │ │ │ ├── audio_caption_clotho_ofaplus_base_pretrain_s2_lr1e4_nosr_shuf_el_db.sh
│ │ │ │ ├── audio_caption_clotho_ofaplus_base_pretrain_s2_lr1e5_nosr_shuf_el_db.sh
│ │ │ │ ├── audio_caption_clotho_ofaplus_base_pretrain_s2_lr5e5_nosr_shuf_el_db.sh
│ │ │ │ └── audio_caption_clotho_ofaplus_base_pretrain_s2_lr5e5_nosr_shuf_el_db_long.sh
│ │ ├── caption_stage_1_ofa_base_pretrain_s2_bs16_shuf_el.sh
│ │ ├── caption_stage_1_ofaplus_base_pretrain_s2.sh
│ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_bs16.sh
│ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_bs16_shuf.sh
│ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_bs16_shuf_el.sh
│ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_bs16_shuf_el_da.sh
│ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_fix.sh
│ │ ├── caption_stage_1_ofaplus_base_pretrain_s2_lr5e5.sh
│ │ ├── onlylinear
│ │ │ ├── audio_caption_ofaplus_s0_onlylinear.sh
│ │ │ ├── audio_caption_ofaplus_s1_onlylinear.sh
│ │ │ ├── audio_caption_ofaplus_s2_onlylinear.sh
│ │ │ ├── caption_stage_1_ofaplus_s0_onlylinear.sh
│ │ │ ├── caption_stage_1_ofaplus_s1_onlylinear.sh
│ │ │ ├── caption_stage_1_ofaplus_s2_onlylinear.sh
│ │ │ ├── caption_stage_2_ofaplus_s1_onlylinear.sh
│ │ │ ├── video_caption_ofaplus_s0_onlylinear.sh
│ │ │ ├── video_caption_ofaplus_s1_onlylinear.sh
│ │ │ └── video_caption_ofaplus_s2_onlylinear.sh
│ │ ├── scst
│ │ │ └── caption_stage_2_ofaplus_base_pretrain_s2_hsep1_bs16_shuf.sh
│ │ └── video
│ │ │ ├── activitynet
│ │ │ ├── ._video_caption_activitynet_stage_1_ofaplus_base_pretrain_s2_shuf_el_db_da.sh
│ │ │ ├── video_caption_activitynet_stage_1_ofaplus_base_pretrain_s2_hs_shuf_el_db_da_long.sh
│ │ │ └── video_caption_activitynet_stage_1_ofaplus_base_pretrain_s2_shuf_el_db_da.sh
│ │ │ ├── msvd
│ │ │ ├── video_caption_msvd_stage_1_ofaplus_base_pretrain_s2.sh
│ │ │ ├── video_caption_msvd_stage_1_ofaplus_base_pretrain_s2_bs16.sh
│ │ │ ├── video_caption_msvd_stage_1_ofaplus_base_pretrain_s2_shuf_el_db.sh
│ │ │ └── video_caption_msvd_stage_1_ofaplus_base_pretrain_s2_shuf_el_db_da.sh
│ │ │ ├── vatex
│ │ │ ├── video_caption_vatex_stage_1_ofaplus_base_pretrain_s2_hs_shuf_el_db_da_long.sh
│ │ │ └── video_caption_vatex_stage_1_ofaplus_base_pretrain_s2_shuf_el_db_da.sh
│ │ │ ├── video_caption_stage_1_ofaplus_base_pretrain_s2.sh
│ │ │ ├── video_caption_stage_1_ofaplus_base_pretrain_s2_bs16.sh
│ │ │ ├── video_caption_stage_1_ofaplus_base_pretrain_s2_lr5e5.sh
│ │ │ ├── video_caption_stage_1_ofaplus_base_pretrain_s2_shuf_el_db.sh
│ │ │ ├── video_caption_stage_1_ofaplus_base_pretrain_s2_shuf_el_db_da.sh
│ │ │ └── youcookii
│ │ │ └── video_caption_youcookii_ofaplus_base_pretrain_s2_shuf_el_db.sh
│ ├── scst
│ │ └── ofa_mini_caption_stage_2_video_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_test.sh
│ └── video
│ │ ├── msvd
│ │ └── ofa_mini_msvd_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_det_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_ground_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_itdata_cc12m7m_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_qa_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_text_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_ep20_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_4f_nosample_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_4f_vids2_lr4_nosample_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_4f_vids2_nosample_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_4f_8f_vids2_lr4_nosample_hres_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_4f_vids2_lr4_nosample_hres_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_4f_vids2_lr4_nosample_hres_scratch_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_4f_vids2_lr4_nosample_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_4f_vids2_nosample_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_8f_vids2_lr4_negcapvid_nosample_hres_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_8f_vids2_lr4_nosample_hres_longt_bs16_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_8f_vids2_lr4_nosample_hres_longt_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_8f_vids2_lr4_nosample_hres_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_8f_vids2_lr4_prog1_nosample_hres_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_res_resnxtvid_init_8f_vids2_lr4_prog2_nosample_hres_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_shared_tformer_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_viddata_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_viddata_allres_alls3d_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_videp20_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_fps1.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_lr1e4.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_onlylinear.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_viddatacapqa_allres_bs2k_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_scratch_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_vitb16_hres_enceval_nosample_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_vitb16_hres_enceval_withcls_nosample_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_bart_vitb16_hres_unfreeze_nosample_multinodes.sh
│ │ ├── ofa_mini_video_caption_stage_1_initlmscratch.sh
│ │ ├── ofa_mini_video_caption_stage_1_ofa_mini_pretrain_bart_allresne_ep10.sh
│ │ ├── ofa_mini_video_caption_stage_1_ofa_mini_pretrain_bart_allresnet_inittext.sh
│ │ ├── ofa_mini_video_caption_stage_1_ofa_mini_pretrain_bart_allresnet_inittext_onlylinear.sh
│ │ ├── ofa_mini_video_caption_stage_1_onlyvideo_viddatacapqa_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_caption_stage_1_pretrain_bart_allresnet_ep20_onlylinear.sh
│ │ ├── ofa_mini_video_caption_stage_1_pretrain_bart_allresnet_pretraintext_onlylinear.sh
│ │ ├── ofa_mini_video_caption_stage_1_video_audiovcc_onestage_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ │ ├── ofa_mini_video_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav.sh
│ │ ├── ofa_mini_video_caption_stage_1_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ │ ├── ofa_mini_video_caption_stage_1_webvid10m_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f_vids2_lr4_prog2_nosample_hres_multinodes.sh
│ │ ├── scst
│ │ ├── ofa_mini_video_caption_stage_2BLUE_4_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_lr1e5.sh
│ │ ├── ofa_mini_video_caption_stage_2ROUGEL_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_lr1e5.sh
│ │ └── ofa_mini_video_caption_stage_2_4_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_lr1e5.sh
│ │ └── youcookii
│ │ ├── ofa_mini_youcookii_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt.sh
│ │ ├── ofa_mini_youcookii_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_32f.sh
│ │ ├── ofa_mini_youcookii_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_32f_224res.sh
│ │ ├── ofa_mini_youcookii_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_480res.sh
│ │ └── ofa_mini_youcookii_video_caption_stage_1_bart_viddatacapqa_allres_allresnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_lr1e4.sh
├── image_gen
│ ├── eval
│ │ ├── ._eval_image_gen.sh
│ │ └── eval_image_gen.sh
│ ├── ofa_mini_image_gen_stage_1_initbart.sh
│ ├── ofa_mini_image_gen_stage_1_ofa_mini_pretrain_bart_allresnet_ep20.sh
│ ├── ofa_mini_image_gen_stage_1_pretrain_bart_allresnet_pretraintext.sh
│ ├── ofa_mini_image_gen_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4.sh
│ ├── ofa_mini_image_gen_stage_1_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4_onlylinear.sh
│ ├── ofa_mini_image_gen_stage_2_video_s2_viddatacapqa_pretrain_bart_allres_allresnxtvid_init_8f_lr4.sh
│ └── scaling_best
│ │ ├── image_gen_ofa_stage_1_base.sh
│ │ ├── image_gen_ofa_stage_2_base.sh
│ │ ├── image_gen_ofaplus_stage_1_base_s2.sh
│ │ ├── image_gen_ofaplus_stage_1_base_s2_bs8_4.sh
│ │ ├── image_gen_ofaplus_stage_1_base_s2_hsep1_long.sh
│ │ ├── image_gen_ofaplus_stage_1_base_s2_lr5e3.sh
│ │ └── image_gen_ofaplus_stage_2_base_s2_hsep1_long.sh
├── refcoco
│ ├── eval
│ │ ├── eval_refcoco.sh
│ │ ├── eval_refcocog.sh
│ │ ├── eval_refcocoplus.sh
│ │ └── eval_refcocoplus_avg.sh
│ ├── scaling_best
│ │ ├── refcoco_ofa_base_pretrain_s2_hs_fix_lr5e5_bs8_4_shuf.sh
│ │ ├── refcoco_ofaplus_base_pretrain_s2_hsep1_fix_lr5e5_bs8_4_shuf.sh
│ │ ├── refcocog_ofa_base_pretrain_s2_hs_fix_lr5e5_bs8_4_shuf.sh
│ │ ├── refcocog_ofaplus_base_pretrain_s2_hsep1_fix_lr5e5_bs8_4_shuf.sh
│ │ ├── refcocoplus_ofa_base_pretrain_s2_hs_fix_lr5e5_bs8_4_shuf.sh
│ │ ├── refcocoplus_ofaplus_base_pretrain_s2.sh
│ │ ├── refcocoplus_ofaplus_base_pretrain_s2_bs8_2_lr5e5.sh
│ │ ├── refcocoplus_ofaplus_base_pretrain_s2_bs8_4.sh
│ │ ├── refcocoplus_ofaplus_base_pretrain_s2_fix.sh
│ │ ├── refcocoplus_ofaplus_base_pretrain_s2_fix_5e5_bs8_4.sh
│ │ ├── refcocoplus_ofaplus_base_pretrain_s2_fix_lr5e5_bs8_4_shuf.sh
│ │ ├── refcocoplus_ofaplus_base_pretrain_s2_lr5e5.sh
│ │ ├── refcocoplus_ofaplus_base_pretrain_s2_lr5e5_el_db.sh
│ │ └── refcocoplus_ofaplus_base_pretrain_s2_lr5e5_shuf.sh
│ └── scst
│ │ ├── ._ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1norm_lreinf10.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_acc0_5_lreinf5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_acc0_5medium_lreinf1.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_acc0_5medium_lreinf5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_acc0_5mediumlarge_lreinf5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_acc0_5mediumsmall_lreinf5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_acc0_5smalllarge_lreinf5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1_lreinf0_5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1_lreinf1.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1_lreinf5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1norm_lreinf1.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1norm_lreinf10.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1norm_lreinf5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_mse_lreinf0_5.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_scst_lprob_supervised_l1_large.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_scst_lprob_supervised_l1_medium.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_scst_lprob_supervised_l1_small.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_scst_supervised_l1_large.sh
│ │ ├── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_scst_supervised_l1_medium.sh
│ │ └── ofa_mini_qa_ground_cc12m_balanced_refcocoplus_scst_supervised_l1_small.sh
├── scaling
│ ├── ._ofa_base_pretrain_s1_ret_startonlylinear.sh
│ ├── ._ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata.sh
│ ├── ._ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata_2.sh
│ ├── ._ofa_base_pretrain_s2_long_lr1e4_50ep_startonlylinear.sh
│ ├── ofa_base_pretrain_baseline.sh
│ ├── ofa_base_pretrain_baseline_hs.sh
│ ├── ofa_base_pretrain_s1.sh
│ ├── ofa_base_pretrain_s1_long.sh
│ ├── ofa_base_pretrain_s1_long_init_ofa.sh
│ ├── ofa_base_pretrain_s1_long_initcc.sh
│ ├── ofa_base_pretrain_s1_ret.sh
│ ├── ofa_base_pretrain_s1_ret_startonlylinear.sh
│ ├── ofa_base_pretrain_s2.sh
│ ├── ofa_base_pretrain_s2_long.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_init_ofa.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_nolsdata.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata_2.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata_fimgbn.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata_fimgbn_fvidenc.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata_fimgenc_fvidenc.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_nolsdata.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_nolsdata_hs.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_nolsdata_startonlylinear.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_nolsdata_vidhs.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_onlyenc.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_startonlyenc.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_startonlyenc_freezeenc.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_startonlylinear.sh
│ ├── ofa_base_pretrain_s2_long_lr1e4_50ep_startonlylinear_lr5e5.sh
│ ├── ofa_base_pretrain_s3.sh
│ ├── ofa_huge_pretrain_s1.sh
│ ├── ofa_huge_pretrain_s2.sh
│ ├── ofa_huge_pretrain_s2_fixedlr.sh
│ ├── ofa_huge_pretrain_s2_fixedlr1e4.sh
│ ├── ofa_huge_pretrain_s2_fixedlr5e5.sh
│ ├── ofa_huge_pretrain_s3.sh
│ ├── ofa_large_pretrain_s1.sh
│ ├── ofa_large_pretrain_s1_long.sh
│ ├── ofa_large_pretrain_s1_long_inittext.sh
│ ├── ofa_large_pretrain_s1_ret.sh
│ ├── ofa_large_pretrain_s2.sh
│ ├── ofa_large_pretrain_s2_long.sh
│ ├── ofa_large_pretrain_s2_long_lr1e4.sh
│ ├── ofa_large_pretrain_s2_long_lr1e4_ep50.sh
│ └── ofa_large_pretrain_s3.sh
├── snli_ve
│ ├── eval
│ │ ├── eval_snli_ve_base_best.sh
│ │ └── eval_snli_ve_base_best_avg.sh
│ └── scaling_best
│ │ ├── snli_ve_ofa_base_pretrain_s2.sh
│ │ ├── snli_ve_ofaplus_base_pretrain_s2.sh
│ │ ├── snli_ve_ofaplus_base_pretrain_s2_bs16_4.sh
│ │ ├── snli_ve_ofaplus_base_pretrain_s2_el_db.sh
│ │ ├── snli_ve_ofaplus_base_pretrain_s2_lr1e4.sh
│ │ └── snli_ve_ofaplus_base_pretrain_s2_shuf.sh
└── vqa
│ ├── eval
│ ├── ._eval_okvqa_base_best.sh
│ ├── ._eval_vizwiz_base_best.sh
│ ├── eval_okvqa_base_best.sh
│ ├── eval_vizwiz_base_best.sh
│ ├── eval_vqa_base_best.sh
│ ├── eval_vqa_base_best_avg.sh
│ ├── t.sh
│ └── video
│ │ ├── eval_video_qa.sh
│ │ ├── eval_video_qa_avg.sh
│ │ └── eval_video_qa_msvd.sh
│ ├── scaling
│ ├── video
│ │ └── video_vqa_ofa_base_pretrain_s2.sh
│ └── vqa_ofa_base_pretrain_s2.sh
│ ├── scaling_best
│ ├── onlylinear
│ │ ├── video_vqa_ofaplus_s1_onlylinear.sh
│ │ ├── video_vqa_ofaplus_s2_onlylinear.sh
│ │ ├── vqa_ofaplus_s1_onlylinear.sh
│ │ └── vqa_ofaplus_s2_onlylinear.sh
│ ├── video
│ │ ├── msvd
│ │ │ ├── t.sh
│ │ │ ├── video_vqa_msvd_ofaplus_base_pretrain_s2.sh
│ │ │ ├── video_vqa_msvd_ofaplus_base_pretrain_s2_el.sh
│ │ │ ├── video_vqa_msvd_ofaplus_base_pretrain_s2_el_db.sh
│ │ │ ├── video_vqa_msvd_ofaplus_base_pretrain_s2_el_db_da.sh
│ │ │ └── video_vqa_msvd_ofaplus_base_pretrain_s2_el_nodb.sh
│ │ ├── video_vqa_ofaplus_base_pretrain_s2.sh
│ │ ├── video_vqa_ofaplus_base_pretrain_s2_f16.sh
│ │ ├── video_vqa_ofaplus_base_pretrain_s2_lr5e5.sh
│ │ ├── video_vqa_ofaplus_base_pretrain_s2_shuf_el_db.sh
│ │ ├── video_vqa_ofaplus_base_pretrain_s2_shuf_el_db_da.sh
│ │ └── video_vqa_ofaplus_base_pretrain_s2_shuf_el_db_noema.sh
│ ├── vqa_ofa_base_pretrain_s2_bs16_lr1e4_shuf.sh
│ ├── vqa_ofaplus_base_pretrain_s2.sh
│ ├── vqa_ofaplus_base_pretrain_s2_bs16.sh
│ ├── vqa_ofaplus_base_pretrain_s2_bs16_lr1e4_shuf.sh
│ ├── vqa_ofaplus_base_pretrain_s2_bs16_lr1e4_shuf_el_db.sh
│ ├── vqa_ofaplus_base_pretrain_s2_bs16_lr1e4_shuf_el_db_nodw.sh
│ ├── vqa_ofaplus_base_pretrain_s2_bs16_lr1e4_shuf_hs_lr3e4.sh
│ └── vqa_ofaplus_base_pretrain_s2_lr1e4.sh
│ └── video
│ ├── msvd
│ ├── ofa_mini_msvd_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_16f_vids2_lr4_nosample_hres_longt2_bs8_1kans.sh
│ ├── ofa_mini_msvd_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_msvd_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8_1kans_lr1e4.sh
│ ├── ofa_mini_msvd_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8_1kans_lr1e4_freezeenc.sh
│ ├── ofa_mini_msvd_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8_1kans_lr1e5.sh
│ └── ofa_mini_msvd_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8_1kans_lr5e4.sh
│ ├── ofa_mini_det_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_ground_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_itdata_cc12m7m_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_qa_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_text_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_caption_stage_1_pretrain_bart_allresnet_pretraintext_onlylinear.sh
│ ├── ofa_mini_video_vqa_bart_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_bart_res_4f_nosample_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_4f_vids2_lr4_nosample_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_4f_vids2_nosample_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_8f_vids2_lr4_nosample_hres_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_vids2_lr4_nosample_hres_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_vids2_lr4_nosample_hres_scratch_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_vids2_lr4_nosample_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_vids2_nosample_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f216f_vids2_lr4_nosample_hres_longt_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f_vids2_lr4_negcapvid_nosample_hres_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f_vids2_lr4_nosample_hres_longt_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f_vids2_lr4_nosample_hres_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f_vids2_lr4_prog1_nosample_hres_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_res_resnxtvid_init_8f_vids2_lr4_prog2_nosample_hres_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_shared_tformer_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_bart_viddata_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_bart_viddata_allres_alls3d_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_videp20_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8_fps1.sh
│ ├── ofa_mini_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8_lr1e4.sh
│ ├── ofa_mini_video_vqa_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8_onlylinear.sh
│ ├── ofa_mini_video_vqa_bart_viddatacapqa_allres_bs2k_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_bart_vitb16_hres_enceval_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_vitb16_hres_enceval_nosample_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_vitb16_hres_enceval_scratch_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_vitb16_hres_enceval_withcls_nosample_multinodes.sh
│ ├── ofa_mini_video_vqa_bart_vitb16_hres_unfreeze_nosample_multinodes.sh
│ ├── ofa_mini_video_vqa_ep20_lr4_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_initlmscratch.sh
│ ├── ofa_mini_video_vqa_ofa_mini_pretrain_bart_allresnet_ep20_onlylinear.sh
│ ├── ofa_mini_video_vqa_ofa_mini_pretrain_bart_allresnet_inittext.sh
│ ├── ofa_mini_video_vqa_ofa_mini_pretrain_bart_allresnet_inittext_onlylinear_lr1e4.sh
│ ├── ofa_mini_video_vqa_ofa_mini_pretrain_bart_allresnt_ep20.sh
│ ├── ofa_mini_video_vqa_onlyvideo_viddatacapqa_ep20_nosample_hres_longt2_bs8.sh
│ ├── ofa_mini_video_vqa_pretrain_bart_allresnet_pretraintext_onlylinear.sh
│ ├── ofa_mini_video_vqa_video_audiovcc_s3_viddatacapqa_pretrain_bart_allres_allresnxtvid_allpannc14mel64h200_init_8f_lr4_wav_audioembLN.sh
│ └── ofa_mini_video_vqa_webvid10m_bart_viddatacapqa_allres_allresnxtvid_init_8f_vids2_lr4_nosample_hres_longt2_bs8.sh
├── tasks
├── .ipynb_checkpoints
│ ├── __init__-checkpoint.py
│ └── ofa_task-checkpoint.py
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── __init__.cpython-39.pyc
│ ├── ofa_task.cpython-37.pyc
│ ├── ofa_task.cpython-38.pyc
│ └── ofa_task.cpython-39.pyc
├── mm_tasks
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── __init__.cpython-39.pyc
│ │ ├── audio_caption.cpython-37.pyc
│ │ ├── audio_caption.cpython-39.pyc
│ │ ├── caption.cpython-37.pyc
│ │ ├── caption.cpython-38.pyc
│ │ ├── caption.cpython-39.pyc
│ │ ├── image_gen.cpython-37.pyc
│ │ ├── image_gen.cpython-38.pyc
│ │ ├── image_gen.cpython-39.pyc
│ │ ├── refcoco.cpython-37.pyc
│ │ ├── refcoco.cpython-38.pyc
│ │ ├── refcoco.cpython-39.pyc
│ │ ├── snli_ve.cpython-37.pyc
│ │ ├── snli_ve.cpython-38.pyc
│ │ ├── snli_ve.cpython-39.pyc
│ │ ├── video_caption.cpython-37.pyc
│ │ ├── video_caption.cpython-39.pyc
│ │ ├── video_vqa_gen.cpython-37.pyc
│ │ ├── video_vqa_gen.cpython-39.pyc
│ │ ├── vqa_gen.cpython-37.pyc
│ │ ├── vqa_gen.cpython-38.pyc
│ │ └── vqa_gen.cpython-39.pyc
│ ├── audio_caption.py
│ ├── caption.py
│ ├── image_gen.py
│ ├── refcoco.py
│ ├── snli_ve.py
│ ├── video_caption.py
│ ├── video_vqa_gen.py
│ └── vqa_gen.py
├── ofa_task.py
└── pretrain_tasks
│ ├── .ipynb_checkpoints
│ └── unify_task-checkpoint.py
│ ├── __init__.py
│ ├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── __init__.cpython-39.pyc
│ ├── unify_task.cpython-37.pyc
│ ├── unify_task.cpython-38.pyc
│ └── unify_task.cpython-39.pyc
│ └── unify_task.py
├── test.py
├── train.py
├── trainer.py
├── transformers.md
└── utils
├── BERT_CN_dict
├── dict.txt
└── vocab.txt
├── BPE
├── __init__.py
├── dict.txt
├── encoder.json
└── vocab.bpe
├── __init__.py
├── __pycache__
├── __init__.cpython-37.pyc
├── __init__.cpython-38.pyc
├── __init__.cpython-39.pyc
├── checkpoint_utils.cpython-37.pyc
├── checkpoint_utils.cpython-38.pyc
├── checkpoint_utils.cpython-39.pyc
├── eval_utils.cpython-37.pyc
├── eval_utils.cpython-39.pyc
├── transforms.cpython-37.pyc
├── transforms.cpython-38.pyc
├── transforms.cpython-39.pyc
├── trie.cpython-37.pyc
├── trie.cpython-38.pyc
├── trie.cpython-39.pyc
├── utils.cpython-37.pyc
├── utils.cpython-39.pyc
├── vision_helper.cpython-37.pyc
├── vision_helper.cpython-38.pyc
├── vision_helper.cpython-39.pyc
├── zero_shot_utils.cpython-37.pyc
└── zero_shot_utils.cpython-39.pyc
├── checkpoint_utils.py
├── cider
└── pyciderevalcap
│ ├── __init__.py
│ ├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ └── __init__.cpython-39.pyc
│ ├── cider
│ ├── __init__.py
│ ├── cider.py
│ └── cider_scorer.py
│ └── ciderD
│ ├── __init__.py
│ ├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── __init__.cpython-39.pyc
│ ├── ciderD.cpython-37.pyc
│ ├── ciderD.cpython-38.pyc
│ ├── ciderD.cpython-39.pyc
│ ├── ciderD_scorer.cpython-37.pyc
│ ├── ciderD_scorer.cpython-38.pyc
│ └── ciderD_scorer.cpython-39.pyc
│ ├── ciderD.py
│ └── ciderD_scorer.py
├── eval_utils.py
├── map_boxes
├── __init__.py
├── compute_overlap.pyx
└── compute_overlap_slow.py
├── rouge.py
├── transforms.py
├── trie.py
├── utils.py
├── vision_helper.py
└── zero_shot_utils.py
/__pycache__/trainer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/__pycache__/trainer.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/trainer.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/__pycache__/trainer.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/trainer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/__pycache__/trainer.cpython-39.pyc
--------------------------------------------------------------------------------
/colab.md:
--------------------------------------------------------------------------------
1 | # Colab Notebooks
2 |
3 | We provide Colab notebooks of different downstream tasks for you guys to enjoy OFA. See below.
4 |
5 | * [Image Captioning in Huggingface Transformers](https://colab.research.google.com/drive/1Ho81RBV8jysZ7e0FhsSCk_v938QeDuy3?usp=sharing)
6 | * [Generic Interface](https://colab.research.google.com/drive/1jogyZ-2rdHU3XxZOf3TBfhex1XHqX-1m?usp=sharing#scrollTo=s9Vni6YUZOpC) (using different instructions to perform various tasks with just one model.)
7 | * [Image Captioning](https://colab.research.google.com/drive/1Q4eNhhhLcgOP4hHqwZwU1ijOlabgve1W?usp=sharing)
8 | * [Referring Expression Comprehension](https://colab.research.google.com/drive/1AHQNRdaUpRTgr3XySHSlba8aXwBAjwPB?usp=sharing)
9 | * [Open-Domain Visual Question Answering](https://colab.research.google.com/drive/14v6OQe_MxV_HMnsiKfnEeMR1UMqhzZNb?usp=sharing)
10 |
--------------------------------------------------------------------------------
/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | from .label_smoothed_cross_entropy import AdjustLabelSmoothedCrossEntropyCriterion
2 | from .clip_scst_loss import ClipScstRewardCriterion
3 | from .label_smoothed_encouraging_loss import AdjustLabelSmoothedEncouragingLossCriterion
4 | from .label_smoothed_cross_entropy_scst import AdjustLabelSmoothedCrossEntropySCSTCriterion
5 | from .refcoco_scst_loss import RefCOCOScstRewardCriterion
--------------------------------------------------------------------------------
/criterions/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/clip_scst_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/clip_scst_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/clip_scst_loss.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/clip_scst_loss.cpython-38.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/clip_scst_loss.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/clip_scst_loss.cpython-39.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/label_smoothed_cross_entropy.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/label_smoothed_cross_entropy.cpython-37.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/label_smoothed_cross_entropy.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/label_smoothed_cross_entropy.cpython-38.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/label_smoothed_cross_entropy.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/label_smoothed_cross_entropy.cpython-39.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/label_smoothed_cross_entropy_scst.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/label_smoothed_cross_entropy_scst.cpython-39.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/label_smoothed_encouraging_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/label_smoothed_encouraging_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/label_smoothed_encouraging_loss.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/label_smoothed_encouraging_loss.cpython-38.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/label_smoothed_encouraging_loss.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/label_smoothed_encouraging_loss.cpython-39.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/refcoco_scst_loss.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/refcoco_scst_loss.cpython-39.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/scst_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/scst_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/scst_loss.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/scst_loss.cpython-38.pyc
--------------------------------------------------------------------------------
/criterions/__pycache__/scst_loss.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/criterions/__pycache__/scst_loss.cpython-39.pyc
--------------------------------------------------------------------------------
/data/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__init__.py
--------------------------------------------------------------------------------
/data/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/data/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/data/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/data/__pycache__/audio_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/audio_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/data/__pycache__/audio_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/audio_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/data/__pycache__/data_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/data_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/data/__pycache__/data_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/data_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/data/__pycache__/data_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/data_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/data/__pycache__/file_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/file_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/__pycache__/file_dataset.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/file_dataset.cpython-38.pyc
--------------------------------------------------------------------------------
/data/__pycache__/file_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/file_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/__pycache__/ofa_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/ofa_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/__pycache__/ofa_dataset.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/ofa_dataset.cpython-38.pyc
--------------------------------------------------------------------------------
/data/__pycache__/ofa_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/ofa_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/__pycache__/video_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/video_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/data/__pycache__/video_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/__pycache__/video_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__init__.py
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/audio_caption_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/audio_caption_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/audio_caption_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/audio_caption_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/caption_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/caption_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/caption_dataset.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/caption_dataset.cpython-38.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/caption_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/caption_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/image_gen_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/image_gen_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/image_gen_dataset.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/image_gen_dataset.cpython-38.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/image_gen_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/image_gen_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/refcoco_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/refcoco_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/refcoco_dataset.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/refcoco_dataset.cpython-38.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/refcoco_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/refcoco_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/snli_ve_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/snli_ve_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/snli_ve_dataset.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/snli_ve_dataset.cpython-38.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/snli_ve_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/snli_ve_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/video_caption_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/video_caption_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/video_caption_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/video_caption_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/video_vqa_gen_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/video_vqa_gen_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/video_vqa_gen_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/video_vqa_gen_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/vqa_gen_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/vqa_gen_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/vqa_gen_dataset.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/vqa_gen_dataset.cpython-38.pyc
--------------------------------------------------------------------------------
/data/mm_data/__pycache__/vqa_gen_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/mm_data/__pycache__/vqa_gen_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/data/pretrain_data/__pycache__/unify_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/pretrain_data/__pycache__/unify_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/data/pretrain_data/__pycache__/unify_dataset.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/pretrain_data/__pycache__/unify_dataset.cpython-38.pyc
--------------------------------------------------------------------------------
/data/pretrain_data/__pycache__/unify_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/data/pretrain_data/__pycache__/unify_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/examples/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/demo.gif
--------------------------------------------------------------------------------
/examples/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/demo.png
--------------------------------------------------------------------------------
/examples/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/logo.png
--------------------------------------------------------------------------------
/examples/output.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/output.gif
--------------------------------------------------------------------------------
/examples/results/caption.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/results/caption.jpg
--------------------------------------------------------------------------------
/examples/results/caption.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/results/caption.pdf
--------------------------------------------------------------------------------
/examples/results/figures.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/results/figures.pdf
--------------------------------------------------------------------------------
/examples/results/vg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/results/vg.jpg
--------------------------------------------------------------------------------
/examples/results/vg.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/results/vg.pdf
--------------------------------------------------------------------------------
/examples/results/vqa.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/results/vqa.jpg
--------------------------------------------------------------------------------
/examples/results/vqa.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/results/vqa.pdf
--------------------------------------------------------------------------------
/examples/teaser.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/teaser.gif
--------------------------------------------------------------------------------
/examples/teaser.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/teaser.mp4
--------------------------------------------------------------------------------
/examples/teaser_2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/teaser_2.gif
--------------------------------------------------------------------------------
/examples/unival.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/examples/unival.gif
--------------------------------------------------------------------------------
/fairseq/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## 👉 [Please follow one of these issue templates](https://github.com/pytorch/fairseq/issues/new/choose) 👈
2 |
3 | Note: to keep the backlog clean and actionable, issues may be immediately closed if they do not follow one of the above issue templates.
4 |
--------------------------------------------------------------------------------
/fairseq/.github/ISSUE_TEMPLATE/documentation.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: 📚 Documentation/Typos
3 | about: Report an issue related to documentation or a typo
4 | labels: 'documentation, needs triage'
5 | ---
6 |
7 | ## 📚 Documentation
8 |
9 | For typos and doc fixes, please go ahead and:
10 |
11 | 1. Create an issue.
12 | 2. Fix the typo.
13 | 3. Submit a PR.
14 |
15 | Thanks!
16 |
--------------------------------------------------------------------------------
/fairseq/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # Before submitting
2 |
3 | - [ ] Was this discussed/approved via a Github issue? (no need for typos, doc improvements)
4 | - [ ] Did you read the [contributor guideline](https://github.com/pytorch/fairseq/blob/main/CONTRIBUTING.md)?
5 | - [ ] Did you make sure to update the docs?
6 | - [ ] Did you write any new necessary tests?
7 |
8 | ## What does this PR do?
9 | Fixes # (issue).
10 |
11 | ## PR review
12 | Anyone in the community is free to review the PR once the tests have passed.
13 | If we didn't discuss your PR in Github issues there's a high chance it will not be merged.
14 |
15 | ## Did you have fun?
16 | Make sure you had fun coding 🙃
17 |
--------------------------------------------------------------------------------
/fairseq/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "fairseq/model_parallel/megatron"]
2 | path = fairseq/model_parallel/megatron
3 | url = https://github.com/ngoyal2707/Megatron-LM
4 | branch = fairseq
5 |
--------------------------------------------------------------------------------
/fairseq/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = python -msphinx
7 | SPHINXPROJ = fairseq
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/fairseq/docs/_static/theme_overrides.css:
--------------------------------------------------------------------------------
1 | .wy-table-responsive table td kbd {
2 | white-space: nowrap;
3 | }
4 | .wy-table-responsive table td {
5 | white-space: normal !important;
6 | }
7 | .wy-table-responsive {
8 | overflow: visible !important;
9 | }
10 |
--------------------------------------------------------------------------------
/fairseq/docs/docutils.conf:
--------------------------------------------------------------------------------
1 | [writers]
2 | option-limit=0
3 |
--------------------------------------------------------------------------------
/fairseq/docs/fairseq.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/docs/fairseq.gif
--------------------------------------------------------------------------------
/fairseq/docs/fairseq_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/docs/fairseq_logo.png
--------------------------------------------------------------------------------
/fairseq/docs/modules.rst:
--------------------------------------------------------------------------------
1 | Modules
2 | =======
3 |
4 | Fairseq provides several stand-alone :class:`torch.nn.Module` classes that may
5 | be helpful when implementing a new :class:`~fairseq.models.BaseFairseqModel`.
6 |
7 | .. automodule:: fairseq.modules
8 | :members:
9 | :undoc-members:
10 |
--------------------------------------------------------------------------------
/fairseq/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx<2.0
2 | sphinx-argparse
3 |
--------------------------------------------------------------------------------
/fairseq/examples/.gitignore:
--------------------------------------------------------------------------------
1 | !*/*.sh
2 | !*/*.md
3 |
--------------------------------------------------------------------------------
/fairseq/examples/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | try:
7 | from fairseq.version import __version__ # noqa
8 | except ImportError:
9 | pass
10 |
--------------------------------------------------------------------------------
/fairseq/examples/adaptive_span/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | # automatically import any Python files in the current directory
10 | cur_dir = os.path.dirname(__file__)
11 | for file in os.listdir(cur_dir):
12 | path = os.path.join(cur_dir, file)
13 | if (
14 | not file.startswith("_")
15 | and not file.startswith(".")
16 | and (file.endswith(".py") or os.path.isdir(path))
17 | ):
18 | mod_name = file[: file.find(".py")] if file.endswith(".py") else file
19 | module = importlib.import_module(__name__ + "." + mod_name)
20 |
--------------------------------------------------------------------------------
/fairseq/examples/constrained_decoding/normalize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) Facebook, Inc. and its affiliates.
4 | #
5 | # This source code is licensed under the MIT license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | import sys
9 |
10 | from sacremoses.normalize import MosesPunctNormalizer
11 |
12 |
13 | def main(args):
14 | normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn)
15 | for line in sys.stdin:
16 | print(normalizer.normalize(line.rstrip()), flush=True)
17 |
18 |
19 | if __name__ == "__main__":
20 | import argparse
21 |
22 | parser = argparse.ArgumentParser()
23 | parser.add_argument("--lang", "-l", default="en")
24 | parser.add_argument("--penn", "-p", action="store_true")
25 | args = parser.parse_args()
26 |
27 | main(args)
28 |
--------------------------------------------------------------------------------
/fairseq/examples/discriminative_reranking_nmt/__init__.py:
--------------------------------------------------------------------------------
1 | from . import criterions, models, tasks # noqa
2 |
--------------------------------------------------------------------------------
/fairseq/examples/discriminative_reranking_nmt/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | from .discriminative_reranking_criterion import KLDivergenceRerankingCriterion
2 |
3 |
4 | __all__ = [
5 | "KLDivergenceRerankingCriterion",
6 | ]
7 |
--------------------------------------------------------------------------------
/fairseq/examples/discriminative_reranking_nmt/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .discriminative_reranking_model import DiscriminativeNMTReranker
2 |
3 |
4 | __all__ = [
5 | "DiscriminativeNMTReranker",
6 | ]
7 |
--------------------------------------------------------------------------------
/fairseq/examples/discriminative_reranking_nmt/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | from .discriminative_reranking_task import DiscriminativeRerankingNMTTask
2 |
3 |
4 | __all__ = [
5 | "DiscriminativeRerankingNMTTask",
6 | ]
7 |
--------------------------------------------------------------------------------
/fairseq/examples/fast_noisy_channel/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import noisy_channel_translation # noqa
7 | from . import noisy_channel_sequence_generator # noqa
8 | from . import noisy_channel_beam_search # noqa
9 |
--------------------------------------------------------------------------------
/fairseq/examples/flores101/flores_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/flores101/flores_logo.png
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/decode/infer_viterbi.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | defaults:
4 | - model: null
5 |
6 | hydra:
7 | run:
8 | dir: ${common_eval.results_path}/viterbi
9 | sweep:
10 | dir: ${common_eval.results_path}
11 | subdir: viterbi
12 |
13 | task:
14 | _name: hubert_pretraining
15 | single_target: true
16 | fine_tuning: true
17 | data: ???
18 | normalize: ???
19 |
20 | decoding:
21 | type: viterbi
22 | unique_wer_file: true
23 | common_eval:
24 | results_path: ???
25 | path: ???
26 | post_process: letter
27 | dataset:
28 | max_tokens: 1100000
29 | gen_subset: ???
30 |
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/decode/run/submitit_slurm.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | hydra:
3 | launcher:
4 | cpus_per_task: ${distributed_training.distributed_world_size}
5 | gpus_per_node: ${distributed_training.distributed_world_size}
6 | tasks_per_node: ${hydra.launcher.gpus_per_node}
7 | nodes: 1
8 | mem_gb: 200
9 | timeout_min: 4320
10 | max_num_timeout: 50
11 | name: ${hydra.job.config_name}
12 | submitit_folder: ${hydra.sweep.dir}/submitit
13 |
14 | distributed_training:
15 | distributed_world_size: 1
16 | distributed_no_spawn: true
17 | distributed_port: 29761
18 |
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/decode/run/submitit_slurm_8gpu.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | hydra:
3 | launcher:
4 | cpus_per_task: ${distributed_training.distributed_world_size}
5 | gpus_per_node: ${distributed_training.distributed_world_size}
6 | tasks_per_node: ${hydra.launcher.gpus_per_node}
7 | nodes: 1
8 | mem_gb: 200
9 | timeout_min: 4320
10 | max_num_timeout: 50
11 | name: ${hydra.job.config_name}
12 | submitit_folder: ${hydra.sweep.dir}/submitit
13 |
14 | distributed_training:
15 | distributed_world_size: 8
16 | distributed_no_spawn: true
17 | distributed_port: 29761
18 |
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/finetune/ckpt/it1.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | task:
4 | normalize: false
5 |
6 | model:
7 | w2v_path: /checkpoint/wnhsu/w2v/hubert_final/iter1/hubert.km.randcrop.pmw1_0.puw0_0.grpnorm.ml10.mp0_8.untie.mxsz250000.ufreq1.maxtok1400000.MU400k.s1337.ngpu32/checkpoint_last.pt
8 |
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/finetune/lm/ls_4gram.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | criterion:
4 | wer_kenlm_model: /checkpoint/abdo/old_checkpoint02/datasets/librispeech/4-gram.bin
5 | wer_lexicon: /checkpoint/abdo/old_checkpoint02/datasets/librispeech/10h/raw/lexicon_ltr.lst
6 | wer_lm_weight: 2.0
7 | wer_word_score: -1.0
8 |
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/finetune/run/submitit_reg.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | hydra:
4 | launcher:
5 | cpus_per_task: 8
6 | gpus_per_node: 8
7 | tasks_per_node: ${hydra.launcher.gpus_per_node}
8 | nodes: 1
9 | comment: null
10 | mem_gb: 384
11 | timeout_min: 4320
12 | max_num_timeout: 100
13 | constraint: volta32gb
14 | name: ${hydra.job.config_name}/${hydra.job.override_dirname}
15 | submitit_folder: ${hydra.sweep.dir}/submitit/%j
16 |
17 | distributed_training:
18 | distributed_world_size: 8
19 | distributed_port: 29671
20 | nprocs_per_node: 8
21 |
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/pretrain/data/iter1.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | task:
4 | label_dir: ???
5 | labels: ["km"]
6 |
7 | model:
8 | label_rate: 100
9 |
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/pretrain/data/iter2.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | task:
4 | label_dir: ???
5 | labels: ["km"]
6 |
7 | model:
8 | label_rate: 50
9 |
--------------------------------------------------------------------------------
/fairseq/examples/hubert/config/pretrain/run/submitit_reg.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 |
3 | hydra:
4 | launcher:
5 | cpus_per_task: 8
6 | gpus_per_node: 8
7 | tasks_per_node: ${hydra.launcher.gpus_per_node}
8 | nodes: 4
9 | comment: null
10 | mem_gb: 384
11 | timeout_min: 4320
12 | max_num_timeout: 100
13 | constraint: volta32gb
14 | name: ${hydra.job.config_name}/${hydra.job.override_dirname}
15 | submitit_folder: ${hydra.sweep.dir}/submitit/%j
16 |
17 | distributed_training:
18 | distributed_world_size: 32
19 | distributed_port: 29671
20 | nprocs_per_node: 8
21 |
--------------------------------------------------------------------------------
/fairseq/examples/laser/laser_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .laser_task import * # noqa
7 | from .laser_lstm import * # noqa
8 | from .laser_transformer import * # noqa
9 |
--------------------------------------------------------------------------------
/fairseq/examples/latent_depth/latent_depth_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import multilingual_translation_latent_depth # noqa
7 | from .loss import latent_depth # noqa
8 | from .models import latent_multilingual_transformer # noqa
9 | from .modules import latent_layers # noqa
10 |
--------------------------------------------------------------------------------
/fairseq/examples/latent_depth/latent_depth_src/loss/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/latent_depth/latent_depth_src/loss/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/latent_depth/latent_depth_src/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/latent_depth/latent_depth_src/modules/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/linformer/linformer_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .models import linformer_roberta # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/linformer/linformer_src/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/linformer/linformer_src/models/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/linformer/linformer_src/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/linformer/linformer_src/modules/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/m2m_100/tokenizers/README.md:
--------------------------------------------------------------------------------
1 | # M2M-100 Tokenization
2 |
3 | We apply different tokenization strategies for different languages following the existing literature. Here we provide tok.sh a tokenizer that can be used to reproduce our results.
4 |
5 | To reproduce the results, follow these steps:
6 |
7 | ```
8 | tgt_lang=...
9 | reference_translation=...
10 | cat generation_output | grep -P "^H" | sort -V | cut -f 3- | sh tok.sh $tgt_lang > hyp
11 | cat $reference_translation |sh tok.sh $tgt_lang > ref
12 | sacrebleu -tok 'none' ref < hyp
13 | ```
14 |
15 | ## Installation
16 |
17 | Tools needed for all the languages except Arabic can be installed by running install_dependencies.sh
18 | If you want to evaluate Arabic models, please follow the instructions provided here: http://alt.qcri.org/tools/arabic-normalizer/ to install
19 |
--------------------------------------------------------------------------------
/fairseq/examples/m2m_100/tokenizers/seg_ja.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | SCRIPT=`realpath $0`
7 | KYTEA=`dirname $SCRIPT`/thirdparty/kytea
8 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$KYTEA/lib:/usr/local/lib
9 | export PATH=$PATH:"$KYTEA/bin"
10 |
11 | cat - | tr -d "[:blank:]" | kytea -notags
12 |
--------------------------------------------------------------------------------
/fairseq/examples/m2m_100/tokenizers/seg_ko.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | SCRIPT=`realpath $0`
7 | MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2
8 |
9 | export PATH=$PATH:"$MECAB/bin":"$MECAB/lib"
10 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib"
11 |
12 | cat - | mecab -O wakati
13 |
--------------------------------------------------------------------------------
/fairseq/examples/m2m_100/tokenizers/thirdparty/.gitignore:
--------------------------------------------------------------------------------
1 | seg_my.py
2 | indic_nlp_library/
3 | indic_nlp_resources/
4 | kytea/
5 | mecab-0.996-ko-0.9.2.tar.gz
6 | mecab-0.996-ko-0.9.2/
7 | mosesdecoder/
8 | wat2020.my-en.zip
9 | wat2020.my-en/
10 | wmt16-scripts/
11 | mecab-ko-dic-2.1.1-20180720/
12 | mecab-ko-dic-2.1.1-20180720.tar.gz
--------------------------------------------------------------------------------
/fairseq/examples/m2m_100/tokenizers/tokenize_indic.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | # Use: echo {text} | python tokenize_indic.py {language}
8 |
9 | import sys
10 |
11 | from indicnlp.normalize.indic_normalize import IndicNormalizerFactory
12 | from indicnlp.tokenize.indic_tokenize import trivial_tokenize
13 |
14 |
15 | factory = IndicNormalizerFactory()
16 | normalizer = factory.get_normalizer(
17 | sys.argv[1], remove_nuktas=False, nasals_mode="do_nothing"
18 | )
19 |
20 | for line in sys.stdin:
21 | normalized_line = normalizer.normalize(line.strip())
22 | tokenized_line = " ".join(trivial_tokenize(normalized_line, sys.argv[1]))
23 | print(tokenized_line)
24 |
--------------------------------------------------------------------------------
/fairseq/examples/m2m_100/tokenizers/tokenize_thai.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | import sys
8 |
9 | from pythainlp import word_tokenize
10 |
11 |
12 | for line in sys.stdin:
13 | print(" ".join(word_tokenize(line.strip())))
14 |
--------------------------------------------------------------------------------
/fairseq/examples/m2m_100/tokenizers/tokenize_zh.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 |
8 | import fileinput
9 |
10 | import sacrebleu
11 |
12 |
13 | for line in fileinput.input():
14 | print(sacrebleu.tokenize_zh(line))
15 |
--------------------------------------------------------------------------------
/fairseq/examples/multilingual/ML50_langs.txt:
--------------------------------------------------------------------------------
1 | ar_AR
2 | cs_CZ
3 | de_DE
4 | en_XX
5 | es_XX
6 | et_EE
7 | fi_FI
8 | fr_XX
9 | gu_IN
10 | hi_IN
11 | it_IT
12 | ja_XX
13 | kk_KZ
14 | ko_KR
15 | lt_LT
16 | lv_LV
17 | my_MM
18 | ne_NP
19 | nl_XX
20 | ro_RO
21 | ru_RU
22 | si_LK
23 | tr_TR
24 | vi_VN
25 | zh_CN
26 | af_ZA
27 | az_AZ
28 | bn_IN
29 | fa_IR
30 | he_IL
31 | hr_HR
32 | id_ID
33 | ka_GE
34 | km_KH
35 | mk_MK
36 | ml_IN
37 | mn_MN
38 | mr_IN
39 | pl_PL
40 | ps_AF
41 | pt_XX
42 | sv_SE
43 | sw_KE
44 | ta_IN
45 | te_IN
46 | th_TH
47 | tl_XX
48 | uk_UA
49 | ur_PK
50 | xh_ZA
51 | gl_ES
52 | sl_SI
--------------------------------------------------------------------------------
/fairseq/examples/multilingual/data_scripts/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Install dependency
3 | ```bash
4 | pip install -r requirement.txt
5 | ```
6 |
7 | # Download the data set
8 | ```bash
9 | export WORKDIR_ROOT=
10 |
11 | ```
12 | The downloaded data will be at $WORKDIR_ROOT/ML50
13 |
14 | # preprocess the data
15 | Install SPM [here](https://github.com/google/sentencepiece)
16 | ```bash
17 | export WORKDIR_ROOT=
18 | export SPM_PATH=
19 | ```
20 | * $WORKDIR_ROOT/ML50/raw: extracted raw data
21 | * $WORKDIR_ROOT/ML50/dedup: dedup data
22 | * $WORKDIR_ROOT/ML50/clean: data with valid and test sentences removed from the dedup data
23 |
24 |
25 |
--------------------------------------------------------------------------------
/fairseq/examples/multilingual/data_scripts/requirement.txt:
--------------------------------------------------------------------------------
1 | wget
2 | pandas
--------------------------------------------------------------------------------
/fairseq/examples/multilingual/data_scripts/utils/strip_sgm.sh:
--------------------------------------------------------------------------------
1 | grep "seg id" | sed 's///g' | sed 's/<\/seg>//g'
2 |
--------------------------------------------------------------------------------
/fairseq/examples/noisychannel/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .rerank_options import * # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/pointer_generator/pointer_generator_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import transformer_pg # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/roberta/commonsense_qa/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import commonsense_qa_task # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/roberta/commonsense_qa/download_cqa_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | OUTDIR=data/CommonsenseQA
8 |
9 | mkdir -p $OUTDIR
10 |
11 | wget -O $OUTDIR/train.jsonl https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl
12 | wget -O $OUTDIR/valid.jsonl https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl
13 | wget -O $OUTDIR/test.jsonl https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl
14 | wget -O $OUTDIR/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt
15 |
--------------------------------------------------------------------------------
/fairseq/examples/roberta/config/pretraining/base.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | common:
3 | fp16: true
4 | log_format: json
5 | log_interval: 200
6 |
7 | checkpoint:
8 | no_epoch_checkpoints: true
9 |
10 | task:
11 | _name: masked_lm
12 | data: ???
13 | sample_break_mode: complete
14 | tokens_per_sample: 512
15 |
16 | criterion: masked_lm
17 |
18 | dataset:
19 | batch_size: 16
20 | ignore_unused_valid_subsets: true
21 |
22 | optimizer:
23 | _name: adam
24 | weight_decay: 0.01
25 | adam_betas: (0.9,0.98)
26 | adam_eps: 1e-06
27 |
28 | lr_scheduler:
29 | _name: polynomial_decay
30 | warmup_updates: 10000
31 |
32 | optimization:
33 | clip_norm: 0
34 | lr: [0.0005]
35 | max_update: 125000
36 | update_freq: [16]
37 |
38 | model:
39 | _name: roberta
40 | max_positions: 512
41 | dropout: 0.1
42 | attention_dropout: 0.1
43 |
--------------------------------------------------------------------------------
/fairseq/examples/roberta/wsc/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import wsc_criterion # noqa
7 | from . import wsc_task # noqa
8 |
--------------------------------------------------------------------------------
/fairseq/examples/rxf/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import rxf_src # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/rxf/rxf_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/simultaneous_translation/README.md:
--------------------------------------------------------------------------------
1 | # Simultaneous Translation
2 | Examples of simultaneous translation in fairseq
3 | - [English-to-Japanese text-to-text wait-k model](docs/enja-waitk.md)
4 | - [English-to-Germen text-to-text monotonic multihead attention model](docs/ende-mma.md)
5 | - [English-to-Germen speech-to-text simultaneous translation model](../speech_to_text/docs/simulst_mustc_example.md)
6 |
--------------------------------------------------------------------------------
/fairseq/examples/simultaneous_translation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import models # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/simultaneous_translation/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | for file in sorted(os.listdir(os.path.dirname(__file__))):
11 | if file.endswith(".py") and not file.startswith("_"):
12 | model_name = file[: file.find(".py")]
13 | importlib.import_module(
14 | "examples.simultaneous_translation.models." + model_name
15 | )
16 |
--------------------------------------------------------------------------------
/fairseq/examples/simultaneous_translation/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | import os
8 | import importlib
9 | from fairseq import registry
10 |
11 | (
12 | build_monotonic_attention,
13 | register_monotonic_attention,
14 | MONOTONIC_ATTENTION_REGISTRY,
15 | _,
16 | ) = registry.setup_registry("--simul-type")
17 |
18 | for file in sorted(os.listdir(os.path.dirname(__file__))):
19 | if file.endswith(".py") and not file.startswith("_"):
20 | model_name = file[: file.find(".py")]
21 | importlib.import_module(
22 | "examples.simultaneous_translation.modules." + model_name
23 | )
24 |
--------------------------------------------------------------------------------
/fairseq/examples/simultaneous_translation/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | # automatically import any Python files in the criterions/ directory
11 | for file in sorted(os.listdir(os.path.dirname(__file__))):
12 | if file.endswith(".py") and not file.startswith("_"):
13 | module = file[: file.find(".py")]
14 | importlib.import_module("examples.simultaneous_translation.utils." + module)
15 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/__init__.py:
--------------------------------------------------------------------------------
1 | from . import criterions, models, tasks # noqa
2 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 |
4 |
5 | # ASG loss requires flashlight bindings
6 | files_to_skip = set()
7 | try:
8 | import flashlight.lib.sequence.criterion
9 | except ImportError:
10 | files_to_skip.add("ASG_loss.py")
11 |
12 | for file in sorted(os.listdir(os.path.dirname(__file__))):
13 | if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
14 | criterion_name = file[: file.find(".py")]
15 | importlib.import_module(
16 | "examples.speech_recognition.criterions." + criterion_name
17 | )
18 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .asr_dataset import AsrDataset
7 |
8 |
9 | __all__ = [
10 | "AsrDataset",
11 | ]
12 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/kaldi/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/speech_recognition/kaldi/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/kaldi/config/kaldi_initializer.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | data_dir: ???
4 | fst_dir: ???
5 | in_labels: ???
6 | kaldi_root: ???
7 | lm_arpa: ???
8 | blank_symbol:
9 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/models/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 |
4 |
5 | for file in sorted(os.listdir(os.path.dirname(__file__))):
6 | if file.endswith(".py") and not file.startswith("_"):
7 | model_name = file[: file.find(".py")]
8 | importlib.import_module("examples.speech_recognition.models." + model_name)
9 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/new/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/speech_recognition/new/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/new/conf/hydra/sweeper/ax.yaml:
--------------------------------------------------------------------------------
1 | # @package hydra.sweeper
2 | _target_: hydra_plugins.hydra_ax_sweeper.ax_sweeper.AxSweeper
3 | max_batch_size: null
4 | ax_config:
5 | max_trials: 128
6 | early_stop:
7 | minimize: true
8 | max_epochs_without_improvement: 32
9 | epsilon: 1.0e-05
10 | experiment:
11 | name: ${dataset.gen_subset}
12 | objective_name: wer
13 | minimize: true
14 | parameter_constraints: null
15 | outcome_constraints: null
16 | status_quo: null
17 | client:
18 | verbose_logging: false
19 | random_seed: null
20 | params:
21 | decoding.lmweight:
22 | type: range
23 | bounds: [0.0, 5.0]
24 | decoding.wordscore:
25 | type: range
26 | bounds: [-5.0, 5.0]
27 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/new/conf/infer.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | defaults:
4 | - task: null
5 | - model: null
6 |
7 | hydra:
8 | run:
9 | dir: ${common_eval.results_path}/${dataset.gen_subset}
10 | sweep:
11 | dir: ${common_eval.results_path}
12 | subdir: ${dataset.gen_subset}
13 | common_eval:
14 | results_path: null
15 | path: null
16 | post_process: letter
17 | quiet: true
18 | dataset:
19 | max_tokens: 1000000
20 | gen_subset: test
21 | distributed_training:
22 | distributed_world_size: 1
23 | decoding:
24 | beam: 5
25 | type: viterbi
26 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/new/decoders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/speech_recognition/new/decoders/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/new/decoders/viterbi_decoder.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) Facebook, Inc. and its affiliates.
4 | #
5 | # This source code is licensed under the MIT license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | import torch
9 |
10 | from typing import List, Dict
11 |
12 | from .base_decoder import BaseDecoder
13 |
14 |
15 | class ViterbiDecoder(BaseDecoder):
16 | def decode(
17 | self,
18 | emissions: torch.FloatTensor,
19 | ) -> List[List[Dict[str, torch.LongTensor]]]:
20 | def get_pred(e):
21 | toks = e.argmax(dim=-1).unique_consecutive()
22 | return toks[toks != self.blank]
23 |
24 | return [[{"tokens": get_pred(x), "score": 0}] for x in emissions]
25 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_recognition/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 |
4 |
5 | for file in sorted(os.listdir(os.path.dirname(__file__))):
6 | if file.endswith(".py") and not file.startswith("_"):
7 | task_name = file[: file.find(".py")]
8 | importlib.import_module("examples.speech_recognition.tasks." + task_name)
9 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_synthesis/README.md:
--------------------------------------------------------------------------------
1 | Speech Synthesis (S^2)
2 | ===
3 |
4 | Speech synthesis with fairseq.
5 |
6 | - Autoregressive and non-autoregressive models
7 | - Multi-speaker synthesis
8 | - Audio preprocessing
9 | - Automatic metrics
10 | - Similar data configuration as [S2T](../speech_to_text/README.md)
11 |
12 |
13 | ## Examples
14 | - [Single-speaker synthesis on LJSpeech](docs/ljspeech_example.md)
15 | - [Multi-speaker synthesis on VCTK](docs/vctk_example.md)
16 | - [Multi-speaker synthesis on Common Voice](docs/common_voice_example.md)
17 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_synthesis/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_synthesis/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_synthesis/preprocessing/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_synthesis/preprocessing/denoiser/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_text_joint_to_text/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import tasks, criterions, models # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_text_joint_to_text/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | for file in os.listdir(os.path.dirname(__file__)):
11 | if file.endswith(".py") and not file.startswith("_"):
12 | criterion_name = file[: file.find(".py")]
13 | importlib.import_module(
14 | "examples.speech_text_joint_to_text.criterions." + criterion_name
15 | )
16 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_text_joint_to_text/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | for file in os.listdir(os.path.dirname(__file__)):
10 | if file.endswith(".py") and not file.startswith("_"):
11 | model_name = file[: file.find(".py")]
12 | importlib.import_module(
13 | "examples.speech_text_joint_to_text.models." + model_name
14 | )
15 |
--------------------------------------------------------------------------------
/fairseq/examples/speech_text_joint_to_text/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | for file in os.listdir(os.path.dirname(__file__)):
10 | if file.endswith(".py") and not file.startswith("_"):
11 | task_name = file[: file.find(".py")]
12 | importlib.import_module("examples.speech_text_joint_to_text.tasks." + task_name)
13 |
--------------------------------------------------------------------------------
/fairseq/examples/textless_nlp/gslm/metrics/README.md:
--------------------------------------------------------------------------------
1 | # GSLM Metrics
2 |
3 | ## ASR Metrics
4 | The suite of metrics here uses an ASR model to transcribe the synthesized speech into text, and then uses text-based metrics. We also use word error rate from ASR transcription itself as one of the metrics. [More details](asr_metrics)
5 |
6 | ## ABX Metrics
7 | We use [ABX](https://www.semanticscholar.org/paper/ABX-Discriminability-Measures-and-Applications-Schatz/13d3537228f728c1063cc83743cb118bba3367a0) to evaluate how well-separated phonetic categories are with quantized representations. [More details](abx_metrics)
8 |
9 | ## sWUGGY and sBLIMP
10 | We refer to [ZeroSpeech challenge](https://www.zerospeech.com/2021/track_s.html#scoring-based-metrics) for details on the sWUGGY and sBLIMP metrics.
11 |
--------------------------------------------------------------------------------
/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/dict.ltr.txt:
--------------------------------------------------------------------------------
1 | | 94802
2 | E 51860
3 | T 38431
4 | A 33152
5 | O 31495
6 | N 28855
7 | I 28794
8 | H 27187
9 | S 26071
10 | R 23546
11 | D 18289
12 | L 16308
13 | U 12400
14 | M 10685
15 | W 10317
16 | C 9844
17 | F 9062
18 | G 8924
19 | Y 8226
20 | P 6890
21 | B 6339
22 | V 3936
23 | K 3456
24 | ' 1023
25 | X 636
26 | J 598
27 | Q 437
28 | Z 213
29 |
--------------------------------------------------------------------------------
/fairseq/examples/textless_nlp/gslm/speech2unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/textless_nlp/gslm/speech2unit/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import List, Tuple
7 |
8 |
9 | def get_audio_files(manifest_path: str) -> Tuple[str, List[str], List[int]]:
10 | fnames, sizes = [], []
11 | with open(manifest_path, "r") as f:
12 | root_dir = f.readline().strip()
13 | for line in f:
14 | items = line.strip().split("\t")
15 | assert (
16 | len(items) == 2
17 | ), f"File must have two columns separated by tab. Got {line}"
18 | fnames.append(items[0])
19 | sizes.append(int(items[1]))
20 | return root_dir, fnames, sizes
21 |
--------------------------------------------------------------------------------
/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py:
--------------------------------------------------------------------------------
1 | """ from https://github.com/keithito/tacotron """
2 |
3 | '''
4 | Defines the set of symbols used in text input to the model.
5 |
6 | The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
7 | from . import cmudict
8 |
9 | _pad = '_'
10 | _punctuation = '!\'(),.:;? '
11 | _special = '-'
12 | _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
13 |
14 | # Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
15 | _arpabet = ['@' + s for s in cmudict.valid_symbols]
16 |
17 | # Export all symbols:
18 | symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
19 |
--------------------------------------------------------------------------------
/fairseq/examples/translation_moe/translation_moe_src/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import translation_moe # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/truncated_bptt/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import transformer_xl_model, truncated_bptt_lm_task # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/wav2vec/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/examples/wav2vec/unsupervised/__init__.py
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/config/generate/viterbi.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | fairseq:
4 | task:
5 | _name: unpaired_audio_text
6 | labels: phn
7 | data: ???
8 | sort_by_length: false
9 | shuffle: false
10 | text_data: ''
11 |
12 | common_eval:
13 | path: ???
14 | quiet: true
15 |
16 | dataset:
17 | gen_subset: valid
18 | batch_size: 1
19 |
20 | w2l_decoder: VITERBI
21 | post_process: silence
22 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .extracted_features_dataset import ExtractedFeaturesDataset
7 | from .random_input_dataset import RandomInputDataset
8 |
9 |
10 | __all__ = [
11 | "ExtractedFeaturesDataset",
12 | "RandomInputDataset",
13 | ]
14 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | for idx, line in enumerate(sys.stdin):
4 | print(f"utt{idx:010d} {line}", end='')
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -u
4 |
5 | val_sets="dev_other"
6 | graph_name=graph
7 | decode_suffix=""
8 | decode_script="steps/decode_fmllr.sh"
9 | decode_args=""
10 | nj=60
11 |
12 | . ./cmd.sh
13 | . ./path.sh
14 | . parse_options.sh
15 |
16 | set -x
17 | exp_dir=$1
18 | data_root=$2
19 | lang_test=$3
20 |
21 | graph=$exp_dir/$graph_name
22 |
23 | if [ ! -d $graph ]; then
24 | utils/mkgraph.sh $lang_test $exp_dir $graph
25 | fi
26 |
27 | for part in $val_sets; do
28 | dec_dir=$exp_dir/decode${decode_suffix}_${part}
29 | if [ ! -d $dec_dir ]; then
30 | echo "decoding $part for $exp_dir"
31 | $decode_script --nj $nj --cmd "$decode_cmd" $decode_args \
32 | $graph $data_root/$part $dec_dir &
33 | else
34 | echo "$dec_dir exists. skip"
35 | fi
36 | done
37 |
38 | wait
39 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh:
--------------------------------------------------------------------------------
1 | export KALDI_ROOT=`pwd`/../../..
2 | export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH
3 | [ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1
4 | . $KALDI_ROOT/tools/config/common_path.sh
5 | export LC_ALL=C
6 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .wav2vec_u import Wav2vec_U
7 |
8 |
9 | __all__ = [
10 | "Wav2vec_U",
11 | ]
12 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/scripts/copy_labels.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | import sys
8 |
9 | for idx, line in enumerate(sys.stdin):
10 | print(f"utt{idx:010d} {line}", end="")
11 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | import sys
8 |
9 |
10 | def main():
11 | for line in sys.stdin:
12 | print(line.replace(" ", "").replace("|", " ").strip())
13 |
14 |
15 | if __name__ == "__main__":
16 | main()
17 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/scripts/normalize_text.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | import regex
8 | import sys
9 |
10 |
11 | def main():
12 | filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]")
13 |
14 | for line in sys.stdin:
15 | line = line.strip()
16 | line = filter_r.sub(" ", line)
17 | line = " ".join(line.split())
18 | print(line)
19 |
20 |
21 | if __name__ == "__main__":
22 | main()
23 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | import sys
8 |
9 |
10 | def main():
11 | for line in sys.stdin:
12 | print(" ".join(list(line.strip().replace(" ", "|"))) + " |")
13 |
14 |
15 | if __name__ == "__main__":
16 | main()
17 |
--------------------------------------------------------------------------------
/fairseq/examples/wav2vec/unsupervised/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .unpaired_audio_text import UnpairedAudioText
7 |
8 |
9 | __all__ = [
10 | "UnpairedAudioText",
11 | ]
12 |
--------------------------------------------------------------------------------
/fairseq/fairseq/benchmark/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | # import models/tasks to register them
7 | from . import dummy_dataset, dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa
8 |
--------------------------------------------------------------------------------
/fairseq/fairseq/clib/libnat_cuda/edit_dist.h:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2017-present, Facebook, Inc.
3 | * All rights reserved.
4 | *
5 | * This source code is licensed under the license found in the
6 | * LICENSE file in the root directory of this source tree.
7 | */
8 |
9 | #pragma once
10 |
11 | #include
12 |
13 | torch::Tensor LevenshteinDistanceCuda(
14 | torch::Tensor source,
15 | torch::Tensor target,
16 | torch::Tensor source_length,
17 | torch::Tensor target_length);
18 |
19 | torch::Tensor GenerateDeletionLabelCuda(
20 | torch::Tensor source,
21 | torch::Tensor operations);
22 |
23 | std::pair GenerateInsertionLabelCuda(
24 | torch::Tensor source,
25 | torch::Tensor operations);
26 |
--------------------------------------------------------------------------------
/fairseq/fairseq/config/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/fairseq/fairseq/config/config.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | hydra:
4 | run:
5 | dir: .
6 |
7 | defaults:
8 | - _self_
9 | - task: null
10 | - model: null
11 | - criterion: cross_entropy
12 | - optimizer: null
13 | - lr_scheduler: fixed
14 | - bpe: null
15 | - tokenizer: null
16 | - scoring: null
17 | - generation: null
18 | - common_eval: null
19 | - eval_lm: null
20 |
--------------------------------------------------------------------------------
/fairseq/fairseq/config/model/wav2vec/vq_wav2vec_gumbel.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 | activation: gelu
3 | vq_type: gumbel
4 | vq_depth: 2
5 | combine_groups: true
6 |
--------------------------------------------------------------------------------
/fairseq/fairseq/config/model/wav2vec2/wav2vec2_base.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | quantize_targets: true
4 | final_dim: 256
5 | encoder_layerdrop: 0.05
6 | dropout_input: 0.1
7 | dropout_features: 0.1
8 | feature_grad_mult: 0.1
9 |
--------------------------------------------------------------------------------
/fairseq/fairseq/config/model/wav2vec2/wav2vec2_large.yaml:
--------------------------------------------------------------------------------
1 | # @package _group_
2 |
3 | quantize_targets: true
4 | extractor_mode: layer_norm
5 | layer_norm_first: true
6 | final_dim: 768
7 | latent_temp: [2.0,0.1,0.999995]
8 | encoder_layerdrop: 0.0
9 | dropout_input: 0.0
10 | dropout_features: 0.0
11 | dropout: 0.0
12 | attention_dropout: 0.0
13 | conv_bias: true
14 |
15 | encoder_layers: 24
16 | encoder_embed_dim: 1024
17 | encoder_ffn_embed_dim: 4096
18 | encoder_attention_heads: 16
19 |
20 | feature_grad_mult: 1.0
21 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/audio/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/fairseq/data/audio/__init__.py
--------------------------------------------------------------------------------
/fairseq/fairseq/data/encoders/characters.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | from fairseq.data.encoders import register_bpe
8 |
9 |
10 | SPACE = chr(32)
11 | SPACE_ESCAPE = chr(9601)
12 |
13 |
14 | @register_bpe("characters")
15 | class Characters(object):
16 | def __init__(self, *unused):
17 | pass
18 |
19 | @staticmethod
20 | def add_args(parser):
21 | pass
22 |
23 | @staticmethod
24 | def encode(x: str) -> str:
25 | escaped = x.replace(SPACE, SPACE_ESCAPE)
26 | return SPACE.join(list(escaped))
27 |
28 | @staticmethod
29 | def decode(x: str) -> str:
30 | return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
31 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/encoders/space_tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import re
7 |
8 | from fairseq.data.encoders import register_tokenizer
9 | from fairseq.dataclass import FairseqDataclass
10 |
11 |
12 | @register_tokenizer("space", dataclass=FairseqDataclass)
13 | class SpaceTokenizer(object):
14 | def __init__(self, *unused):
15 | self.space_tok = re.compile(r"\s+")
16 |
17 | def encode(self, x: str) -> str:
18 | return self.space_tok.sub(" ", x)
19 |
20 | def decode(self, x: str) -> str:
21 | return x
22 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/huffman/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .huffman_coder import HuffmanCodeBuilder, HuffmanCoder
7 | from .huffman_mmap_indexed_dataset import (
8 | HuffmanMMapIndex,
9 | HuffmanMMapIndexedDataset,
10 | HuffmanMMapIndexedDatasetBuilder,
11 | vocab_file_path,
12 | )
13 |
14 | __all__ = [
15 | "HuffmanCoder",
16 | "HuffmanCodeBuilder",
17 | "HuffmanMMapIndexedDatasetBuilder",
18 | "HuffmanMMapIndexedDataset",
19 | "HuffmanMMapIndex",
20 | "vocab_file_path",
21 | ]
22 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/id_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class IdDataset(FairseqDataset):
12 | def __getitem__(self, index):
13 | return index
14 |
15 | def __len__(self):
16 | return 0
17 |
18 | def collater(self, samples):
19 | return torch.tensor(samples)
20 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/legacy/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .block_pair_dataset import BlockPairDataset
7 | from .masked_lm_dataset import MaskedLMDataset
8 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary
9 |
10 |
11 | __all__ = [
12 | "BertDictionary",
13 | "BlockPairDataset",
14 | "MaskedLMDataset",
15 | "MaskedLMDictionary",
16 | ]
17 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/lru_cache_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from functools import lru_cache
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class LRUCacheDataset(BaseWrapperDataset):
12 | def __init__(self, dataset, token=None):
13 | super().__init__(dataset)
14 |
15 | @lru_cache(maxsize=8)
16 | def __getitem__(self, index):
17 | return self.dataset[index]
18 |
19 | @lru_cache(maxsize=8)
20 | def collater(self, samples):
21 | return self.dataset.collater(samples)
22 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/multilingual/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/num_samples_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import FairseqDataset
7 |
8 |
9 | class NumSamplesDataset(FairseqDataset):
10 | def __getitem__(self, index):
11 | return 1
12 |
13 | def __len__(self):
14 | return 0
15 |
16 | def collater(self, samples):
17 | return sum(samples)
18 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/offset_tokens_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class OffsetTokensDataset(BaseWrapperDataset):
10 | def __init__(self, dataset, offset):
11 | super().__init__(dataset)
12 | self.offset = offset
13 |
14 | def __getitem__(self, idx):
15 | return self.dataset[idx] + self.offset
16 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/raw_label_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import FairseqDataset
9 |
10 |
11 | class RawLabelDataset(FairseqDataset):
12 | def __init__(self, labels):
13 | super().__init__()
14 | self.labels = labels
15 |
16 | def __getitem__(self, index):
17 | return self.labels[index]
18 |
19 | def __len__(self):
20 | return len(self.labels)
21 |
22 | def collater(self, samples):
23 | return torch.tensor(samples)
24 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/roll_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class RollDataset(BaseWrapperDataset):
12 | def __init__(self, dataset, shifts):
13 | super().__init__(dataset)
14 | self.shifts = shifts
15 |
16 | def __getitem__(self, index):
17 | item = self.dataset[index]
18 | return torch.roll(item, self.shifts)
19 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/sort_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 |
8 | from . import BaseWrapperDataset
9 |
10 |
11 | class SortDataset(BaseWrapperDataset):
12 | def __init__(self, dataset, sort_order):
13 | super().__init__(dataset)
14 | if not isinstance(sort_order, (list, tuple)):
15 | sort_order = [sort_order]
16 | self.sort_order = sort_order
17 |
18 | assert all(len(so) == len(dataset) for so in sort_order)
19 |
20 | def ordered_indices(self):
21 | return np.lexsort(self.sort_order)
22 |
--------------------------------------------------------------------------------
/fairseq/fairseq/data/strip_token_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import BaseWrapperDataset
7 |
8 |
9 | class StripTokenDataset(BaseWrapperDataset):
10 | def __init__(self, dataset, id_to_strip):
11 | super().__init__(dataset)
12 | self.id_to_strip = id_to_strip
13 |
14 | def __getitem__(self, index):
15 | item = self.dataset[index]
16 | while len(item) > 0 and item[-1] == self.id_to_strip:
17 | item = item[:-1]
18 | while len(item) > 0 and item[0] == self.id_to_strip:
19 | item = item[1:]
20 | return item
21 |
--------------------------------------------------------------------------------
/fairseq/fairseq/dataclass/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .configs import FairseqDataclass
7 | from .constants import ChoiceEnum
8 |
9 |
10 | __all__ = [
11 | "FairseqDataclass",
12 | "ChoiceEnum",
13 | ]
14 |
--------------------------------------------------------------------------------
/fairseq/fairseq/distributed/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .distributed_timeout_wrapper import DistributedTimeoutWrapper
7 | from .fully_sharded_data_parallel import fsdp_enable_wrap, fsdp_wrap, FullyShardedDataParallel
8 | from .legacy_distributed_data_parallel import LegacyDistributedDataParallel
9 | from .module_proxy_wrapper import ModuleProxyWrapper
10 | from .tpu_distributed_data_parallel import TPUDistributedDataParallel
11 |
12 |
13 | __all__ = [
14 | "DistributedTimeoutWrapper",
15 | "fsdp_enable_wrap",
16 | "fsdp_wrap",
17 | "FullyShardedDataParallel",
18 | "LegacyDistributedDataParallel",
19 | "ModuleProxyWrapper",
20 | "TPUDistributedDataParallel",
21 | ]
22 |
--------------------------------------------------------------------------------
/fairseq/fairseq/logging/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/fairseq/logging/__init__.py
--------------------------------------------------------------------------------
/fairseq/fairseq/model_parallel/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from . import criterions, models, modules # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/fairseq/model_parallel/criterions/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | # automatically import any Python files in the criterions/ directory
11 | for file in sorted(os.listdir(os.path.dirname(__file__))):
12 | if file.endswith(".py") and not file.startswith("_"):
13 | module = file[: file.find(".py")]
14 | importlib.import_module("fairseq.model_parallel.criterions." + module)
15 |
--------------------------------------------------------------------------------
/fairseq/fairseq/model_parallel/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | # automatically import any Python files in the models/ directory
11 | models_dir = os.path.dirname(__file__)
12 | for file in os.listdir(models_dir):
13 | path = os.path.join(models_dir, file)
14 | if (
15 | not file.startswith("_")
16 | and not file.startswith(".")
17 | and (file.endswith(".py") or os.path.isdir(path))
18 | ):
19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file
20 | module = importlib.import_module("fairseq.model_parallel.models." + model_name)
21 |
--------------------------------------------------------------------------------
/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .model import * # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/fairseq/model_parallel/models/roberta/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .model import * # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/fairseq/model_parallel/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """isort:skip_file"""
6 |
7 | from .multihead_attention import ModelParallelMultiheadAttention
8 | from .transformer_layer import (
9 | ModelParallelTransformerEncoderLayer,
10 | ModelParallelTransformerDecoderLayer,
11 | )
12 |
13 | __all__ = [
14 | "ModelParallelMultiheadAttention",
15 | "ModelParallelTransformerEncoderLayer",
16 | "ModelParallelTransformerDecoderLayer",
17 | ]
18 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/bart/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .hub_interface import * # noqa
7 | from .model import * # noqa
8 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/ema/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 | from .ema import EMA
10 |
11 |
12 | def build_ema(model, cfg, device):
13 | return EMA(model, cfg, device)
14 |
15 |
16 | # automatically import any Python files in the models/ema/ directory
17 | for file in sorted(os.listdir(os.path.dirname(__file__))):
18 | if file.endswith(".py") and not file.startswith("_"):
19 | file_name = file[: file.find(".py")]
20 | importlib.import_module("fairseq.models.ema." + file_name)
21 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/hubert/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .hubert import * # noqa
7 | from .hubert_asr import * # noqa
8 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/huggingface/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import importlib
7 | import os
8 |
9 |
10 | # automatically import any Python files in the models/huggingface/ directory
11 | models_dir = os.path.dirname(__file__)
12 | for file in os.listdir(models_dir):
13 | path = os.path.join(models_dir, file)
14 | if (
15 | not file.startswith("_")
16 | and not file.startswith(".")
17 | and (file.endswith(".py") or os.path.isdir(path))
18 | ):
19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file
20 | module = importlib.import_module("fairseq.models.huggingface." + model_name)
21 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/nat/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """isort:skip_file"""
6 |
7 | from .fairseq_nat_model import *
8 | from .nonautoregressive_transformer import *
9 | from .nat_crf_transformer import *
10 | from .iterative_nonautoregressive_transformer import *
11 | from .cmlm_transformer import *
12 | from .levenshtein_transformer import *
13 | from .insertion_transformer import *
14 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/roberta/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .hub_interface import * # noqa
7 | from .model import * # noqa
8 | from .enc_dec import * # noqa
9 | from .model_camembert import * # noqa
10 | from .model_gottbert import * # noqa
11 | from .model_xlmr import * # noqa
12 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/speech_to_text/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .berard import * # noqa
7 | from .convtransformer import * # noqa
8 | from .s2t_transformer import * # noqa
9 | from .xm_transformer import * # noqa
10 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/text_to_speech/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .tacotron2 import * # noqa
7 | from .tts_transformer import * # noqa
8 | from .fastspeech2 import * # noqa
9 |
--------------------------------------------------------------------------------
/fairseq/fairseq/models/wav2vec/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .wav2vec import * # noqa
7 | from .wav2vec2 import * # noqa
8 | from .wav2vec2_asr import * # noqa
9 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/dynamicconv_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .dynamicconv_layer import DynamicconvLayer # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/dynamicconv_layer/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | from setuptools import setup
8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension
9 |
10 |
11 | setup(
12 | name="dynamicconv_layer",
13 | ext_modules=[
14 | CUDAExtension(
15 | name="dynamicconv_cuda",
16 | sources=[
17 | "dynamicconv_cuda.cpp",
18 | "dynamicconv_cuda_kernel.cu",
19 | ],
20 | ),
21 | ],
22 | cmdclass={"build_ext": BuildExtension},
23 | )
24 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/fp32_group_norm.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | Layer norm done in fp32 (for fp16 training)
7 | """
8 |
9 | import torch.nn as nn
10 | import torch.nn.functional as F
11 |
12 |
13 | class Fp32GroupNorm(nn.GroupNorm):
14 | def __init__(self, *args, **kwargs):
15 | super().__init__(*args, **kwargs)
16 |
17 | def forward(self, input):
18 | output = F.group_norm(
19 | input.float(),
20 | self.num_groups,
21 | self.weight.float() if self.weight is not None else None,
22 | self.bias.float() if self.bias is not None else None,
23 | self.eps,
24 | )
25 | return output.type_as(input)
26 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/gelu.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with
7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs
8 | """
9 |
10 | import math
11 |
12 | import torch
13 | import torch.nn as nn
14 |
15 |
16 | def gelu_accurate(x):
17 | if not hasattr(gelu_accurate, "_a"):
18 | gelu_accurate._a = math.sqrt(2 / math.pi)
19 | return (
20 | 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
21 | )
22 |
23 |
24 | def gelu(x: torch.Tensor) -> torch.Tensor:
25 | return torch.nn.functional.gelu(x.float()).type_as(x)
26 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/grad_multiply.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 |
9 | class GradMultiply(torch.autograd.Function):
10 | @staticmethod
11 | def forward(ctx, x, scale):
12 | ctx.scale = scale
13 | res = x.new(x)
14 | return res
15 |
16 | @staticmethod
17 | def backward(ctx, grad):
18 | return grad * ctx.scale, None
19 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/lightconv_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .lightconv_layer import LightconvLayer # noqa
7 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/lightconv_layer/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 |
7 | from setuptools import setup
8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension
9 |
10 |
11 | setup(
12 | name="lightconv_layer",
13 | ext_modules=[
14 | CUDAExtension(
15 | "lightconv_cuda",
16 | [
17 | "lightconv_cuda.cpp",
18 | "lightconv_cuda_kernel.cu",
19 | ],
20 | ),
21 | ],
22 | cmdclass={"build_ext": BuildExtension},
23 | )
24 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/quantization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/fairseq/modules/quantization/__init__.py
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/quantization/pq/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .utils import SizeTracker, get_param, attrsetter, quantize_model_ # NOQA
7 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/quantization/pq/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .qconv import PQConv2d # NOQA
7 | from .qemb import PQEmbedding # NOQA
8 | from .qlinear import PQLinear # NOQA
9 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/quantization/scalar/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .utils import quantize_model_ # NOQA
7 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/quantization/scalar/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .qact import ActivationQuantizer # NOQA
7 | from .qconv import IntConv2d # NOQA
8 | from .qemb import IntEmbedding # NOQA
9 | from .qlinear import IntLinear # NOQA
10 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/same_pad.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 |
7 | from torch import nn
8 |
9 |
10 | class SamePad(nn.Module):
11 | def __init__(self, kernel_size, causal=False):
12 | super().__init__()
13 | if causal:
14 | self.remove = kernel_size - 1
15 | else:
16 | self.remove = 1 if kernel_size % 2 == 0 else 0
17 |
18 | def forward(self, x):
19 | if self.remove > 0:
20 | x = x[:, :, : -self.remove]
21 | return x
22 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/transpose_last.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | """
6 | transpose last 2 dimensions of the input
7 | """
8 |
9 | import torch.nn as nn
10 |
11 |
12 | class TransposeLast(nn.Module):
13 | def __init__(self, deconstruct_idx=None):
14 | super().__init__()
15 | self.deconstruct_idx = deconstruct_idx
16 |
17 | def forward(self, x):
18 | if self.deconstruct_idx is not None:
19 | x = x[self.deconstruct_idx]
20 | return x.transpose(-2, -1)
21 |
--------------------------------------------------------------------------------
/fairseq/fairseq/modules/unfold.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch.nn.functional as F
7 |
8 |
9 | def unfold1d(x, kernel_size, padding_l, pad_value=0):
10 | """unfold T x B x C to T x B x C x K"""
11 | if kernel_size > 1:
12 | T, B, C = x.size()
13 | x = F.pad(
14 | x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value
15 | )
16 | x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C))
17 | else:
18 | x = x.unsqueeze(3)
19 | return x
20 |
--------------------------------------------------------------------------------
/fairseq/fairseq/tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import re
7 |
8 |
9 | SPACE_NORMALIZER = re.compile(r"\s+")
10 |
11 |
12 | def tokenize_line(line):
13 | line = SPACE_NORMALIZER.sub(" ", line)
14 | line = line.strip()
15 | return line.split()
16 |
--------------------------------------------------------------------------------
/fairseq/fairseq/version.txt:
--------------------------------------------------------------------------------
1 | 1.0.0a0
2 |
--------------------------------------------------------------------------------
/fairseq/fairseq_cli/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/fairseq_cli/__init__.py
--------------------------------------------------------------------------------
/fairseq/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", "wheel", "cython"]
3 | build-backend = "setuptools.build_meta"
4 |
--------------------------------------------------------------------------------
/fairseq/scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/scripts/__init__.py
--------------------------------------------------------------------------------
/fairseq/scripts/compound_split_bleu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -ne 1 ]; then
4 | echo "usage: $0 GENERATE_PY_OUTPUT"
5 | exit 1
6 | fi
7 |
8 | GEN=$1
9 |
10 | SYS=$GEN.sys
11 | REF=$GEN.ref
12 |
13 | if [ $(tail -n 1 $GEN | grep BLEU | wc -l) -ne 1 ]; then
14 | echo "not done generating"
15 | exit
16 | fi
17 |
18 | grep ^H $GEN | awk -F '\t' '{print $NF}' | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $SYS
19 | grep ^T $GEN | cut -f2- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $REF
20 | fairseq-score --sys $SYS --ref $REF
21 |
--------------------------------------------------------------------------------
/fairseq/scripts/sacrebleu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -ne 4 ]; then
4 | echo "usage: $0 TESTSET SRCLANG TGTLANG GEN"
5 | exit 1
6 | fi
7 |
8 | TESTSET=$1
9 | SRCLANG=$2
10 | TGTLANG=$3
11 |
12 | GEN=$4
13 |
14 | if ! command -v sacremoses &> /dev/null
15 | then
16 | echo "sacremoses could not be found, please install with: pip install sacremoses"
17 | exit
18 | fi
19 |
20 | grep ^H $GEN \
21 | | sed 's/^H\-//' \
22 | | sort -n -k 1 \
23 | | cut -f 3 \
24 | | sacremoses detokenize \
25 | > $GEN.sorted.detok
26 |
27 | sacrebleu --test-set $TESTSET --language-pair "${SRCLANG}-${TGTLANG}" < $GEN.sorted.detok
28 |
--------------------------------------------------------------------------------
/fairseq/scripts/spm_train.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | # All rights reserved.
4 | #
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | from __future__ import absolute_import, division, print_function, unicode_literals
9 |
10 | import sys
11 |
12 | import sentencepiece as spm
13 |
14 |
15 | if __name__ == "__main__":
16 | spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
17 |
--------------------------------------------------------------------------------
/fairseq/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/tests/__init__.py
--------------------------------------------------------------------------------
/fairseq/tests/distributed/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/tests/distributed/__init__.py
--------------------------------------------------------------------------------
/fairseq/tests/gpu/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/tests/gpu/__init__.py
--------------------------------------------------------------------------------
/fairseq/tests/speech_recognition/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/fairseq/tests/speech_recognition/__init__.py
--------------------------------------------------------------------------------
/fairseq/train.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | """
7 | Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
8 | """
9 |
10 | from fairseq_cli.train import cli_main
11 |
12 |
13 | if __name__ == "__main__":
14 | cli_main()
15 |
--------------------------------------------------------------------------------
/models/.ipynb_checkpoints/__init__-checkpoint.py:
--------------------------------------------------------------------------------
1 | from .ofa import OFAModel, ofa_base_architecture, ofa_large_architecture, ofa_huge_architecture
2 | from .t5 import OFAT5Model, T5OFAModel
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .unival import UnIVALModel, unival_base_architecture
--------------------------------------------------------------------------------
/models/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/models/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/models/__pycache__/search.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/__pycache__/search.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/search.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/__pycache__/search.cpython-38.pyc
--------------------------------------------------------------------------------
/models/__pycache__/search.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/__pycache__/search.cpython-39.pyc
--------------------------------------------------------------------------------
/models/__pycache__/sequence_generator.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/__pycache__/sequence_generator.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/sequence_generator.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/__pycache__/sequence_generator.cpython-39.pyc
--------------------------------------------------------------------------------
/models/clip/__init__.py:
--------------------------------------------------------------------------------
1 | from .clip import *
2 |
--------------------------------------------------------------------------------
/models/clip/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/models/clip/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/models/clip/__pycache__/clip.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/__pycache__/clip.cpython-37.pyc
--------------------------------------------------------------------------------
/models/clip/__pycache__/clip.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/__pycache__/clip.cpython-38.pyc
--------------------------------------------------------------------------------
/models/clip/__pycache__/model.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/__pycache__/model.cpython-37.pyc
--------------------------------------------------------------------------------
/models/clip/__pycache__/model.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/__pycache__/model.cpython-38.pyc
--------------------------------------------------------------------------------
/models/clip/__pycache__/simple_tokenizer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/__pycache__/simple_tokenizer.cpython-37.pyc
--------------------------------------------------------------------------------
/models/clip/__pycache__/simple_tokenizer.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/__pycache__/simple_tokenizer.cpython-38.pyc
--------------------------------------------------------------------------------
/models/clip/bpe_simple_vocab_16e6.txt.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/clip/bpe_simple_vocab_16e6.txt.gz
--------------------------------------------------------------------------------
/models/taming/__pycache__/lr_scheduler.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/__pycache__/lr_scheduler.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/__pycache__/util.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/__pycache__/util.cpython-37.pyc
--------------------------------------------------------------------------------
/models/taming/__pycache__/util.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/__pycache__/util.cpython-38.pyc
--------------------------------------------------------------------------------
/models/taming/__pycache__/util.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/__pycache__/util.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/models/__pycache__/vqgan.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/models/__pycache__/vqgan.cpython-37.pyc
--------------------------------------------------------------------------------
/models/taming/models/__pycache__/vqgan.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/models/__pycache__/vqgan.cpython-38.pyc
--------------------------------------------------------------------------------
/models/taming/models/__pycache__/vqgan.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/models/__pycache__/vqgan.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/modules/__pycache__/util.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/__pycache__/util.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/modules/diffusionmodules/__pycache__/model.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/diffusionmodules/__pycache__/model.cpython-37.pyc
--------------------------------------------------------------------------------
/models/taming/modules/diffusionmodules/__pycache__/model.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/diffusionmodules/__pycache__/model.cpython-38.pyc
--------------------------------------------------------------------------------
/models/taming/modules/diffusionmodules/__pycache__/model.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/diffusionmodules/__pycache__/model.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/modules/discriminator/__pycache__/model.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/discriminator/__pycache__/model.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/modules/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from models.taming.modules.losses.vqperceptual import DummyLoss
2 |
3 |
--------------------------------------------------------------------------------
/models/taming/modules/losses/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/losses/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/modules/losses/__pycache__/lpips.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/losses/__pycache__/lpips.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/modules/losses/__pycache__/vqperceptual.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/losses/__pycache__/vqperceptual.cpython-39.pyc
--------------------------------------------------------------------------------
/models/taming/modules/vqvae/__pycache__/quantize.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/vqvae/__pycache__/quantize.cpython-37.pyc
--------------------------------------------------------------------------------
/models/taming/modules/vqvae/__pycache__/quantize.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/vqvae/__pycache__/quantize.cpython-38.pyc
--------------------------------------------------------------------------------
/models/taming/modules/vqvae/__pycache__/quantize.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/taming/modules/vqvae/__pycache__/quantize.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/.ipynb_checkpoints/__init__-checkpoint.py:
--------------------------------------------------------------------------------
1 | from .ofa import OFAModel, ofa_base_architecture, ofa_large_architecture, ofa_huge_architecture
--------------------------------------------------------------------------------
/models/unival/__init__.py:
--------------------------------------------------------------------------------
1 | from .unival import UnIVALModel, unival_base_architecture, unival_large_architecture
--------------------------------------------------------------------------------
/models/unival/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/frozen_bn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/frozen_bn.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/frozen_bn.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/frozen_bn.cpython-38.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/frozen_bn.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/frozen_bn.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/ofa.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/ofa.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/ofa.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/ofa.cpython-38.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/ofa.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/ofa.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/resnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/resnet.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/resnet.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/resnet.cpython-38.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/resnet.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/resnet.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/resnet3d.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/resnet3d.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/timesformer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/timesformer.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_multihead_attention.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_multihead_attention.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_multihead_attention.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_multihead_attention.cpython-38.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_multihead_attention.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_multihead_attention.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_transformer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_transformer.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_transformer.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_transformer.cpython-38.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_transformer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_transformer.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_transformer_layer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_transformer_layer.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_transformer_layer.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_transformer_layer.cpython-38.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unify_transformer_layer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unify_transformer_layer.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/unival.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/unival.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/__pycache__/vit.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/__pycache__/vit.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/ast.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/ast.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/audio_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/audio_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/audio_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/audio_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/clip.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/clip.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/feature_fusion.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/feature_fusion.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/feature_fusion.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/feature_fusion.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/htsat.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/htsat.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/pann.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/pann.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/pann.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/pann.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/resnext3d.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/resnext3d.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/resnext3d.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/resnext3d.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/s3d.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/s3d.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/simple_tokenizer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/simple_tokenizer.cpython-39.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/timm_resnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/timm_resnet.cpython-37.pyc
--------------------------------------------------------------------------------
/models/unival/encoders/__pycache__/timm_resnet.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/models/unival/encoders/__pycache__/timm_resnet.cpython-39.pyc
--------------------------------------------------------------------------------
/ofa_module/__init__.py:
--------------------------------------------------------------------------------
1 | import data
2 | import models
3 | import tasks
4 | import criterions
5 | import utils
--------------------------------------------------------------------------------
/ofa_module/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/ofa_module/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ofa_module/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/ofa_module/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/ofa_module/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/ofa_module/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/preprocess/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/preprocess/__init__.py
--------------------------------------------------------------------------------
/preprocess/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/preprocess/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/preprocess/__pycache__/average_save_models.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/preprocess/__pycache__/average_save_models.cpython-39.pyc
--------------------------------------------------------------------------------
/preprocess/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/preprocess/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/preprocess/__pycache__/utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/preprocess/__pycache__/utils.cpython-39.pyc
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -e ./fairseq/
2 | opencv-python-headless
3 | ###pytorch_lightning
4 | datasets
5 | rouge_score
6 | ftfy==6.0.3
7 | tensorboardX==2.4.1
8 | pycocotools==2.0.4
9 | pycocoevalcap==1.2
10 | torchvision
11 | einops
12 | decord==0.6.0
13 | h5py==3.8.0
14 | librosa==0.9.2
15 | mapcalc==0.2.2
16 | matplotlib==3.5.3
17 | nltk==3.7
18 | numpy==1.21.6
19 | pandas==1.3.5
20 | Pillow==10.0.0
21 | PyYAML==6.0
22 | sentencepiece==0.1.99
23 | setuptools==68.0.0
24 | soundfile==0.12.1
25 | spacy==3.5.4
26 | timm==0.6.12
27 | torchaudio
28 | torchlibrosa
29 | tqdm==4.64.1
30 | transformers==4.23.1
31 | av==10.0.0
32 |
33 |
--------------------------------------------------------------------------------
/run_scripts/image_gen/eval_utils/__pycache__/dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/run_scripts/image_gen/eval_utils/__pycache__/dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/run_scripts/image_gen/eval_utils/__pycache__/inceptionV3.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/run_scripts/image_gen/eval_utils/__pycache__/inceptionV3.cpython-39.pyc
--------------------------------------------------------------------------------
/slurm_adastra/averaging/branching/refcoco/ofa_ratarefcocoplus_branchground.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_ratarefcocoplus_branchground
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_ratarefcocoplus_branchground.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/branching/refcoco/ofa_ratarefcocoplus_branchground.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/averaging/branching/vqa/ofa_mini_vqa_pretrain_branvqa.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_vqa_pretrain_branvqa
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_vqa_pretrain_branvqa.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash averaging/branching/vqa/ofa_mini_vqa_pretrain_branvqa.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/averaging/eval/._eval_refcocoplus.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/averaging/eval/._eval_refcocoplus.sh
--------------------------------------------------------------------------------
/slurm_adastra/averaging/fusing/t.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/averaging/fusing/t.sh
--------------------------------------------------------------------------------
/slurm_adastra/averaging/ratatouille/caption/video/t.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/averaging/ratatouille/caption/video/t.sh
--------------------------------------------------------------------------------
/slurm_adastra/averaging/ratatouille/eval/eval_vqa.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_vqa_base_best_ratacapgroundsnlivqalr5e5
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_vqa_base_best_ratacapgroundsnlivqalr5e5.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash averaging/ratatouille/eval/eval_vqa.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/averaging/refcoco/ofa_long_refcocoplus.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_long_refcocoplus
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_long_refcocoplus.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/refcoco/ofa_long_refcocoplus.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/eval/._eval_caption_base_best.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/caption/eval/._eval_caption_base_best.sh
--------------------------------------------------------------------------------
/slurm_adastra/caption/eval/._eval_nocaps_base.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/caption/eval/._eval_nocaps_base.sh
--------------------------------------------------------------------------------
/slurm_adastra/caption/eval/audio/eval_audiocaps_audio_caption.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_audio_caption_ofaplus_s2_onlylinear
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=10:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_audio_caption_ofaplus_s2_onlylinear.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash caption/eval/audio/eval_audiocaps_audio_caption.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/eval/eval_caption_base_best.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_caption_stage_2_ofaplus_s1_onlylinearylinear
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=1:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_caption_stage_2_ofaplus_s1_onlylinearylinear.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash caption/eval/eval_caption_base_best.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/eval/eval_caption_base_best_avg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_caption_base_best_avg_postfuse_capvqa
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=10:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_caption_base_best_avg_postfuse_capvqa.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash caption/eval/eval_caption_base_best_avg.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/eval/eval_nocaps_base.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_nocaps_avg_postratafusevanilla
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=1:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_nocaps_avg_postratafusevanilla.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash caption/eval/eval_nocaps_base.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/eval/video/._eval_msrvtt_video_caption.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/caption/eval/video/._eval_msrvtt_video_caption.sh
--------------------------------------------------------------------------------
/slurm_adastra/caption/eval/video/eval_msrvtt_video_caption.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_video_caption_avg_postfuse_vidcapvqa
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=00:20:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_video_caption_avg_postfuse_vidcapvqa.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash caption/eval/video/eval_msrvtt_video_caption.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/scaling/._caption_stage_1_ofa_base_pretrain_s2.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/caption/scaling/._caption_stage_1_ofa_base_pretrain_s2.sh
--------------------------------------------------------------------------------
/slurm_adastra/caption/scaling_best/video/activitynet/._video_caption_activitynet_stage_1_ofaplus_base_pretrain_s2_shuf_el_db_da.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/caption/scaling_best/video/activitynet/._video_caption_activitynet_stage_1_ofaplus_base_pretrain_s2_shuf_el_db_da.sh
--------------------------------------------------------------------------------
/slurm_adastra/caption/video/ofa_mini_video_caption_stage_1_bart_res_4f_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_caption_stage_1_bart_res_4f_nosample_multinodes
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_caption_stage_1_bart_res_4f_nosample_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash caption/video/ofa_mini_video_caption_stage_1_bart_res_4f_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/video/ofa_mini_video_caption_stage_1_bart_res_4f_vids2_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_caption_stage_1_bart_res_4f_vids2_nosample_multinodes
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_caption_stage_1_bart_res_4f_vids2_nosample_multinodes_fix.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash caption/video/ofa_mini_video_caption_stage_1_bart_res_4f_vids2_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/video/ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_multinodes
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash caption/video/ofa_mini_video_caption_stage_1_bart_vit_hres_enceval.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/caption/video/ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_scratch_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_scratch_multinodes
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_scratch_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash caption/video/ofa_mini_video_caption_stage_1_bart_vit_hres_enceval_scratch.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/image_gen/eval/._eval_image_gen.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/image_gen/eval/._eval_image_gen.sh
--------------------------------------------------------------------------------
/slurm_adastra/image_gen/eval/eval_image_gen.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_image_gen_ofa_stage_2_base
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks=4
6 | #SBATCH --gpus=32
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=10:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_image_gen_ofa_stage_2_base.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 4 -n 4 -c 128 --gpus=32 bash image_gen/eval/eval_image_gen.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/image_gen/ofa_mini_image_gen_stage_1_initbart.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_image_gen_stage_1_initbart
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks=4
6 | #SBATCH --gpus=32
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=14:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_image_gen_stage_1_initbart.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 4 -n 4 -c 128 --gpus=32 bash image_gen/ofa_mini_image_gen_stage_1_initbart.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/image_gen/scaling_best/image_gen_ofa_stage_1_base.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=image_gen_ofa_stage_1_base
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks=4
6 | #SBATCH --gpus=32
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/image_gen_ofa_stage_1_base.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 4 -n 4 -c 128 --gpus=32 bash image_gen/scaling_best/image_gen_ofa_stage_1_base.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/image_gen/scaling_best/image_gen_ofa_stage_2_base.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=image_gen_ofa_stage_2_base
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks=4
6 | #SBATCH --gpus=32
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=1:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/image_gen_ofa_stage_2_base.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 4 -n 4 -c 128 --gpus=32 bash image_gen/scaling_best/image_gen_ofa_stage_2_base.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/refcoco/eval/eval_refcoco.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_refcoco_ofainit_base_pretrain_s2_hs_fix_lr5e5_bs8_4_shuf
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=1:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_refcoco_ofainit_base_pretrain_s2_hs_fix_lr5e5_bs8_4_shuf.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash refcoco/eval/eval_refcoco.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/refcoco/eval/eval_refcocog.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_refcocog_ofainit_base_pretrain_s2_hs_fix_lr5e5_bs8_4_shuf
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=1:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_refcocog_ofainit_base_pretrain_s2_hs_fix_lr5e5_bs8_4_shuf.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash refcoco/eval/eval_refcocog.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/refcoco/eval/eval_refcocoplus.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_refcocoplus_avg_postratafuse
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=1:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_refcocoplus_avg_postratafuse.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash refcoco/eval/eval_refcocoplus.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/refcoco/eval/eval_refcocoplus_avg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_refcocoplus_base_best_avg_postfuse_refvqa
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=1:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_refcocoplus_base_best_avg_postfuse_refvqa.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-*
25 |
26 |
27 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash refcoco/eval/eval_refcocoplus_avg.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/refcoco/scst/._ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1norm_lreinf10.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/refcoco/scst/._ofa_mini_qa_ground_cc12m_balanced_refcocoplus_fixedscst_l1norm_lreinf10.sh
--------------------------------------------------------------------------------
/slurm_adastra/scaling/._ofa_base_pretrain_s1_ret_startonlylinear.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/scaling/._ofa_base_pretrain_s1_ret_startonlylinear.sh
--------------------------------------------------------------------------------
/slurm_adastra/scaling/._ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/scaling/._ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata.sh
--------------------------------------------------------------------------------
/slurm_adastra/scaling/._ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata_2.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/scaling/._ofa_base_pretrain_s2_long_lr1e4_50ep_initccs1_startonlylinear_nolsdata_2.sh
--------------------------------------------------------------------------------
/slurm_adastra/scaling/._ofa_base_pretrain_s2_long_lr1e4_50ep_startonlylinear.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/scaling/._ofa_base_pretrain_s2_long_lr1e4_50ep_startonlylinear.sh
--------------------------------------------------------------------------------
/slurm_adastra/snli_ve/eval/eval_snli_ve_base_best.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_snli_ve_avg_postratafuse
4 | #SBATCH --nodes=1
5 | #SBATCH --ntasks=1
6 | #SBATCH --gpus=8
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | ####SBATCH --nodelist=x1004c4s2b0n0
10 | #SBATCH --time=1:00:00
11 | #SBATCH -C MI250
12 | #SBATCH -A gda2204
13 | #SBATCH --mail-type=END,FAIL
14 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_snli_ve_avg_postratafuse.out
15 | #SBATCH --exclusive
16 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
17 |
18 |
19 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
20 | source /lus/home/NAT/gda2204/mshukor/.bashrc
21 |
22 | conda activate main
23 |
24 |
25 | rm core-python3*
26 |
27 |
28 | srun -l -N 1 -n 1 -c 128 --gpus=8 bash snli_ve/eval/eval_snli_ve_base_best.sh
29 |
30 |
31 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/eval/._eval_okvqa_base_best.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/vqa/eval/._eval_okvqa_base_best.sh
--------------------------------------------------------------------------------
/slurm_adastra/vqa/eval/._eval_vizwiz_base_best.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/vqa/eval/._eval_vizwiz_base_best.sh
--------------------------------------------------------------------------------
/slurm_adastra/vqa/eval/eval_okvqa_base_best.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_okvqa_zs_avg_postratafusevanilla
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_okvqa_zs_avg_postratafusevanilla.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/eval/eval_okvqa_base_best.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/eval/eval_vizwiz_base_best.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_vizwiz_zs_avg_postratafuse
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_vizwiz_zs_avg_postratafuse.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/eval/eval_vizwiz_base_best.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/eval/eval_vqa_base_best.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_vqa_ofaplus_s1_onlylinear
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=1:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_vqa_ofaplus_s1_onlylinear.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/eval/eval_vqa_base_best.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/eval/eval_vqa_base_best_avg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_vqa_base_best_avg_postfuse_vqacap
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_vqa_base_best_avg_postfuse_vqacap.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/eval/eval_vqa_base_best_avg.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/eval/t.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/vqa/eval/t.sh
--------------------------------------------------------------------------------
/slurm_adastra/vqa/eval/video/eval_video_qa_avg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=eval_video_vqa_base_best_avg_vid_vqacap_prevout
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=2:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/eval_video_vqa_base_best_avg_vid_vqacap_prevout.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/eval/video/eval_video_qa_avg.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/scaling_best/onlylinear/vqa_ofaplus_s1_onlylinear.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=vqa_ofaplus_s1_onlylinear
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/vqa_ofaplus_s1_onlylinear.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/scaling_best/onlylinear/vqa_ofaplus_s1_onlylinear.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/scaling_best/onlylinear/vqa_ofaplus_s2_onlylinear.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=vqa_ofaplus_s2_onlylinear
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/vqa_ofaplus_s2_onlylinear.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/scaling_best/onlylinear/vqa_ofaplus_s2_onlylinear.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/scaling_best/video/msvd/t.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/slurm_adastra/vqa/scaling_best/video/msvd/t.sh
--------------------------------------------------------------------------------
/slurm_adastra/vqa/scaling_best/vqa_ofaplus_base_pretrain_s2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=vqa_ofaplus_base_pretrain_s2
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/vqa_ofaplus_base_pretrain_s2.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/scaling_best/vqa_ofaplus_base_pretrain_s2.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/scaling_best/vqa_ofaplus_base_pretrain_s2_bs16.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=vqa_ofaplus_base_pretrain_s2_bs16
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/vqa_ofaplus_base_pretrain_s2_bs16.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/scaling_best/vqa_ofaplus_base_pretrain_s2_bs16.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/scaling_best/vqa_ofaplus_base_pretrain_s2_lr1e4.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=vqa_ofaplus_base_pretrain_s2_lr1e4
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/vqa_ofaplus_base_pretrain_s2_lr1e4.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/scaling_best/vqa_ofaplus_base_pretrain_s2_lr1e4.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_res_4f_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_res_4f_nosample_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_res_4f_nosample_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_res_4f_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_res_4f_vids2_lr4_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_res_4f_vids2_lr4_nosample_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_res_4f_vids2_lr4_nosample_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_res_4f_vids2_lr4_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_res_4f_vids2_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_res_4f_vids2_nosample_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_res_4f_vids2_nosample_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_res_4f_vids2_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_vids2_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_vids2_nosample_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_vids2_nosample_multinodes_4k.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_res_resnxtvid_init_4f_vids2_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_enceval_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_vitb16_hres_enceval_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_vitb16_hres_enceval_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_enceval.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_enceval_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_vitb16_hres_enceval_nosample_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_vitb16_hres_enceval_nosample_multinodes_2.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_enceval_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_enceval_scratch_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_vitb16_hres_enceval_scratch_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_vitb16_hres_enceval_scratch_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_enceval_scratch.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_enceval_withcls_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_vitb16_hres_enceval_withcls_nosample_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_vitb16_hres_enceval_withcls_nosample_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_enceval_withcls_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_unfreeze_nosample_multinodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_bart_vitb16_hres_unfreeze_nosample_multinodes
4 | #SBATCH --nodes=4
5 | #SBATCH --ntasks-per-node=1
6 | #SBATCH --gpus-per-node=8
7 | #SBATCH --mail-type=END,FAIL
8 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_bart_vitb16_hres_unfreeze_nosample_multinodes.out
9 | #SBATCH --exclusive
10 | #SBATCH --time=100:00:00
11 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
12 |
13 |
14 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
15 | source /lus/home/NAT/gda2204/mshukor/.bashrc
16 |
17 | conda activate main
18 |
19 |
20 | rm core-python3*
21 |
22 |
23 | srun bash vqa/video/ofa_mini_video_vqa_bart_vitb16_hres_unfreeze_nosample.sh
24 |
25 |
26 |
--------------------------------------------------------------------------------
/slurm_adastra/vqa/video/ofa_mini_video_vqa_initlmscratch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #SBATCH --job-name=ofa_mini_video_vqa_initlmscratch
4 | #SBATCH --nodes=2
5 | #SBATCH --ntasks=2
6 | #SBATCH --gpus=16
7 | #SBATCH --threads-per-core=2
8 | #SBATCH --gpu-bind=closest
9 | #SBATCH -C MI250
10 | #SBATCH -A gda2204
11 | #SBATCH --time=24:00:00
12 | #SBATCH --mail-type=END,FAIL
13 | #SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_video_vqa_initlmscratch.out
14 | #SBATCH --exclusive
15 | #SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
16 |
17 |
18 | cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
19 | source /lus/home/NAT/gda2204/mshukor/.bashrc
20 |
21 | conda activate main
22 |
23 |
24 | rm core-python3*
25 |
26 |
27 | srun -l -N 2 -n 2 -c 128 --gpus=16 bash vqa/video/ofa_mini_video_vqa_initlmscratch.sh
28 |
29 |
30 |
--------------------------------------------------------------------------------
/tasks/.ipynb_checkpoints/__init__-checkpoint.py:
--------------------------------------------------------------------------------
1 | from .cv_tasks import *
2 | from .mm_tasks import *
3 | from .nlg_tasks import *
4 | from .nlu_tasks import *
5 | from .pretrain_tasks import *
6 | from .ofa_task import OFATask
--------------------------------------------------------------------------------
/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | from .mm_tasks import *
2 | from .pretrain_tasks import *
3 | from .ofa_task import OFATask
4 |
5 |
--------------------------------------------------------------------------------
/tasks/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/__pycache__/ofa_task.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/__pycache__/ofa_task.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/__pycache__/ofa_task.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/__pycache__/ofa_task.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/__pycache__/ofa_task.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/__pycache__/ofa_task.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__init__.py:
--------------------------------------------------------------------------------
1 | from .caption import CaptionTask
2 | from .image_gen import ImageGenTask
3 | from .refcoco import RefcocoTask
4 | from .snli_ve import SnliVeTask
5 | from .vqa_gen import VqaGenTask
6 | from .video_vqa_gen import VidVqaGenTask
7 | from .video_caption import VidCaptionTask
8 | from .audio_caption import AudCaptionTask
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/audio_caption.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/audio_caption.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/audio_caption.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/audio_caption.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/caption.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/caption.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/caption.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/caption.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/caption.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/caption.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/image_gen.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/image_gen.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/image_gen.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/image_gen.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/image_gen.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/image_gen.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/refcoco.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/refcoco.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/refcoco.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/refcoco.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/refcoco.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/refcoco.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/snli_ve.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/snli_ve.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/snli_ve.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/snli_ve.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/snli_ve.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/snli_ve.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/video_caption.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/video_caption.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/video_caption.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/video_caption.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/video_vqa_gen.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/video_vqa_gen.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/video_vqa_gen.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/video_vqa_gen.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/vqa_gen.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/vqa_gen.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/vqa_gen.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/vqa_gen.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/mm_tasks/__pycache__/vqa_gen.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/mm_tasks/__pycache__/vqa_gen.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/pretrain_tasks/__init__.py:
--------------------------------------------------------------------------------
1 | from .unify_task import UnifyTask
--------------------------------------------------------------------------------
/tasks/pretrain_tasks/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/pretrain_tasks/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/pretrain_tasks/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/pretrain_tasks/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/pretrain_tasks/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/pretrain_tasks/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/tasks/pretrain_tasks/__pycache__/unify_task.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/pretrain_tasks/__pycache__/unify_task.cpython-37.pyc
--------------------------------------------------------------------------------
/tasks/pretrain_tasks/__pycache__/unify_task.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/pretrain_tasks/__pycache__/unify_task.cpython-38.pyc
--------------------------------------------------------------------------------
/tasks/pretrain_tasks/__pycache__/unify_task.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/tasks/pretrain_tasks/__pycache__/unify_task.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/BPE/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/BPE/__init__.py
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__init__.py
--------------------------------------------------------------------------------
/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/checkpoint_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/checkpoint_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/checkpoint_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/checkpoint_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/checkpoint_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/checkpoint_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/eval_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/eval_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/eval_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/eval_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/transforms.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/transforms.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/transforms.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/transforms.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/transforms.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/transforms.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/trie.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/trie.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/trie.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/trie.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/trie.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/trie.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/utils.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/vision_helper.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/vision_helper.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/vision_helper.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/vision_helper.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/vision_helper.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/vision_helper.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/zero_shot_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/zero_shot_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/zero_shot_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/__pycache__/zero_shot_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/cider/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD_scorer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD_scorer.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD_scorer.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD_scorer.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD_scorer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mshukor/UnIVAL/01220daa6dc837348fbe36677adb0df1de3ad6e7/utils/cider/pyciderevalcap/ciderD/__pycache__/ciderD_scorer.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/trie.py:
--------------------------------------------------------------------------------
1 | # Copyright 2022 The OFA-Sys Team.
2 | # All rights reserved.
3 | # This source code is licensed under the Apache 2.0 license
4 | # found in the LICENSE file in the root directory.
5 |
6 | from collections import defaultdict
7 |
8 |
9 | class TreeNode():
10 | def __init__(self):
11 | self.child = defaultdict(TreeNode)
12 |
13 | class Trie:
14 |
15 | def __init__(self, eos):
16 | self.root = TreeNode()
17 | self.eos = eos
18 |
19 | def insert(self, word):
20 | cur = self.root
21 | for c in word:
22 | cur = cur.child[c]
23 |
24 | def get_next_layer(self, word):
25 | cur = self.root
26 | for c in word:
27 | cur = cur.child.get(c)
28 | if cur is None:
29 | return [self.eos]
30 | return list(cur.child.keys())
31 |
--------------------------------------------------------------------------------