├── Transparency
├── __init__.py
├── model
│ └── __init__.py
├── Trainers
│ └── __init__.py
├── common_code
│ └── __init__.py
├── preprocess
│ ├── Tweets
│ │ ├── .gitignore
│ │ └── README.md
│ ├── SST
│ │ ├── .gitignore
│ │ └── README.md
│ ├── SNLI
│ │ ├── README.md
│ │ └── .gitignore
│ ├── ag_news
│ │ ├── README.md
│ │ ├── classes.txt
│ │ └── .gitignore
│ ├── 20News
│ │ ├── .gitignore
│ │ └── README.md
│ ├── Babi
│ │ └── .gitignore
│ ├── .gitignore
│ ├── CNN
│ │ ├── README.md
│ │ └── .gitignore
│ ├── IMDB
│ │ ├── README.md
│ │ └── .gitignore
│ └── README.md
├── requirements.txt
├── scripts
│ ├── bc
│ │ ├── sst
│ │ │ ├── lstm_dot.sh
│ │ │ └── cnn_dot.sh
│ │ ├── amazon
│ │ │ ├── lstm_dot.sh
│ │ │ └── cnn_dot.sh
│ │ ├── yelp
│ │ │ ├── lstm_dot.sh
│ │ │ └── cnn_dot.sh
│ │ ├── 20news
│ │ │ ├── lstm_dot.sh
│ │ │ └── cnn_dot.sh
│ │ └── imdb
│ │ │ └── cnn_dot.sh
│ ├── qa
│ │ ├── cnn
│ │ │ ├── lstm_dot.sh
│ │ │ └── cnn_dot.sh
│ │ ├── babi_2
│ │ │ ├── lstm_dot.sh
│ │ │ └── cnn_dot.sh
│ │ └── babi_3
│ │ │ ├── lstm_dot.sh
│ │ │ └── cnn_dot.sh
│ ├── nli
│ │ └── snli
│ │ │ ├── lstm_dot.sh
│ │ │ └── cnn_dot.sh
│ ├── qa_all.sh
│ └── bc_all.sh
├── .gitignore
└── README.md
├── Transformer-MM-Explainability
├── __init__.py
├── data
│ ├── __init__.py
│ ├── gqa
│ │ ├── __init__.py
│ │ ├── process_raw_data_scripts
│ │ │ └── __init__.py
│ │ └── .gitignore
│ ├── nlvr2
│ │ ├── __init__.py
│ │ └── process_raw_data_scripts
│ │ │ └── __init__.py
│ ├── mscoco_imgfeat
│ │ └── __init__.py
│ ├── nlvr2_imgfeat
│ │ └── __init__.py
│ ├── vg_gqa_imgfeat
│ │ ├── __init__.py
│ │ └── README.md
│ ├── vqa
│ │ └── .gitignore
│ └── lxmert
│ │ └── .gitignore
├── lxmert
│ └── lxmert
│ │ ├── __init__.py
│ │ ├── src
│ │ ├── __init__.py
│ │ ├── lxrt
│ │ │ └── __init__.py
│ │ ├── pretrain
│ │ │ └── __init__.py
│ │ ├── tasks
│ │ │ ├── __init__.py
│ │ │ └── __pycache__
│ │ │ │ ├── __init__.cpython-38.pyc
│ │ │ │ └── vqa_data.cpython-38.pyc
│ │ └── __pycache__
│ │ │ ├── layers.cpython-38.pyc
│ │ │ ├── param.cpython-38.pyc
│ │ │ ├── utils.cpython-38.pyc
│ │ │ ├── __init__.cpython-38.pyc
│ │ │ ├── lxmert_lrp.cpython-38.pyc
│ │ │ ├── vqa_utils.cpython-38.pyc
│ │ │ ├── modeling_frcnn.cpython-38.pyc
│ │ │ ├── processing_image.cpython-38.pyc
│ │ │ ├── huggingface_lxmert.cpython-38.pyc
│ │ │ └── ExplanationGenerator.cpython-38.pyc
│ │ ├── .gitignore
│ │ ├── .gitmodules
│ │ ├── .ipynb_checkpoints
│ │ └── Untitled-checkpoint.ipynb
│ │ ├── __pycache__
│ │ └── __init__.cpython-38.pyc
│ │ ├── experiments
│ │ └── paper
│ │ │ ├── COCO_val2014_000000127510
│ │ │ └── COCO_val2014_000000127510.jpg
│ │ │ ├── COCO_val2014_000000185590
│ │ │ └── COCO_val2014_000000185590.jpg
│ │ │ ├── COCO_val2014_000000200717
│ │ │ └── COCO_val2014_000000200717.jpg
│ │ │ └── COCO_val2014_000000324266
│ │ │ └── COCO_val2014_000000324266.jpg
│ │ └── run
│ │ ├── nlvr2_test.bash
│ │ ├── gqa_test.bash
│ │ ├── vqa_test.bash
│ │ ├── gqa_finetune.bash
│ │ ├── vqa_finetune.bash
│ │ ├── nlvr2_finetune.bash
│ │ └── lxmert_pretrain.bash
├── VisualBERT
│ ├── website
│ │ ├── static
│ │ │ ├── .nojekyll
│ │ │ ├── CNAME
│ │ │ ├── img
│ │ │ │ ├── logo.png
│ │ │ │ ├── favicon.png
│ │ │ │ ├── oss_logo.png
│ │ │ │ └── logo_white_f.png
│ │ │ └── .circleci
│ │ │ │ └── config.yml
│ │ ├── .eslintignore
│ │ ├── .prettierignore
│ │ ├── docs
│ │ │ └── getting_started
│ │ │ │ ├── faqs.md
│ │ │ │ └── video_overview.md
│ │ ├── .prettierrc
│ │ ├── .stylelintrc.js
│ │ ├── .gitignore
│ │ └── src
│ │ │ └── pages
│ │ │ └── api_redirect
│ │ │ └── index.js
│ ├── mmf
│ │ ├── datasets
│ │ │ ├── builders
│ │ │ │ ├── clevr
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── coco2017
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── masked_dataset.py
│ │ │ │ ├── okvqa
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── builder.py
│ │ │ │ ├── vqacp_v2
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── builder.py
│ │ │ │ ├── flickr30k
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── masked_dataset.py
│ │ │ │ ├── localized_narratives
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── mmimdb
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── nlvr2
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── ocrvqa
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── dataset.py
│ │ │ │ │ └── builder.py
│ │ │ │ ├── stvqa
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── builder.py
│ │ │ │ ├── textvqa
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── textcaps
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── hateful_memes
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── visual_dialog
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── visual_entailment
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── visual_genome
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── masked_builder.py
│ │ │ │ ├── vqa2
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── ocr_builder.py
│ │ │ │ ├── vizwiz
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── builder.py
│ │ │ │ ├── sbu_captions
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── masked_builder.py
│ │ │ │ │ └── masked_dataset.py
│ │ │ │ ├── coco
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── gqa
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── masked_builder.py
│ │ │ │ └── conceptual_captions
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── builder.py
│ │ │ │ │ ├── masked_builder.py
│ │ │ │ │ └── masked_dataset.py
│ │ │ ├── databases
│ │ │ │ ├── readers
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── __init__.py
│ │ │ │ └── scene_graph_database.py
│ │ │ ├── __init__.py
│ │ │ └── subset_dataset.py
│ │ ├── models
│ │ │ ├── transformers
│ │ │ │ ├── backends
│ │ │ │ │ ├── layer.py
│ │ │ │ │ └── __init__.py
│ │ │ │ └── __init__.py
│ │ │ └── interfaces
│ │ │ │ └── __init__.py
│ │ ├── projects
│ │ │ ├── vilbert
│ │ │ │ └── configs
│ │ │ │ │ ├── mmimdb
│ │ │ │ │ └── pretrain.yaml
│ │ │ │ │ ├── hateful_memes
│ │ │ │ │ ├── direct.yaml
│ │ │ │ │ └── from_cc.yaml
│ │ │ │ │ ├── masked_coco
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ ├── pretrain_train_val.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── masked_vqa2
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ ├── pretrain_train_val.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── masked_conceptual_captions
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── vqa2
│ │ │ │ │ └── train_val.yaml
│ │ │ ├── hateful_memes
│ │ │ │ └── configs
│ │ │ │ │ ├── vilbert
│ │ │ │ │ ├── direct.yaml
│ │ │ │ │ └── from_cc.yaml
│ │ │ │ │ ├── visual_bert
│ │ │ │ │ ├── direct.yaml
│ │ │ │ │ └── from_coco.yaml
│ │ │ │ │ ├── mmbt
│ │ │ │ │ ├── with_features.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── unimodal
│ │ │ │ │ ├── with_features.yaml
│ │ │ │ │ └── bert.yaml
│ │ │ ├── m4c
│ │ │ │ ├── scripts
│ │ │ │ │ └── __init__.py
│ │ │ │ └── configs
│ │ │ │ │ └── textvqa
│ │ │ │ │ └── joint_with_stvqa.yaml
│ │ │ ├── visual_bert
│ │ │ │ └── configs
│ │ │ │ │ ├── masked_coco
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ ├── defaults.yaml
│ │ │ │ │ └── pretrain_train_val.yaml
│ │ │ │ │ ├── masked_sbu
│ │ │ │ │ └── pretrain.yaml
│ │ │ │ │ ├── masked_vqa2
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ ├── pretrain_train_val.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── masked_conceptual_captions
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── hateful_memes
│ │ │ │ │ ├── direct.yaml
│ │ │ │ │ └── from_coco.yaml
│ │ │ │ │ ├── vqa2
│ │ │ │ │ └── with_raw_images.yaml
│ │ │ │ │ ├── mmimdb
│ │ │ │ │ └── pretrain.yaml
│ │ │ │ │ └── masked_gqa
│ │ │ │ │ └── defaults.yaml
│ │ │ ├── m4c_captioner
│ │ │ │ ├── scripts
│ │ │ │ │ └── __init__.py
│ │ │ │ └── configs
│ │ │ │ │ ├── m4c_captioner
│ │ │ │ │ ├── textcaps
│ │ │ │ │ │ ├── without_ocr.yaml
│ │ │ │ │ │ └── with_caffe2_feat.yaml
│ │ │ │ │ └── coco
│ │ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── butd
│ │ │ │ │ └── textcaps
│ │ │ │ │ ├── eval_pretrained_coco_model.yaml
│ │ │ │ │ └── beam_search.yaml
│ │ │ ├── others
│ │ │ │ ├── mmf_bert
│ │ │ │ │ └── configs
│ │ │ │ │ │ ├── masked_coco
│ │ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ │ ├── pretrain_joint_vqa2.yaml
│ │ │ │ │ │ └── defaults.yaml
│ │ │ │ │ │ ├── masked_vqa2
│ │ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ │ └── defaults.yaml
│ │ │ │ │ │ └── masked_conceptual_captions
│ │ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ │ └── defaults.yaml
│ │ │ │ ├── unimodal
│ │ │ │ │ └── configs
│ │ │ │ │ │ └── hateful_memes
│ │ │ │ │ │ ├── with_features.yaml
│ │ │ │ │ │ └── bert.yaml
│ │ │ │ └── cnn_lstm
│ │ │ │ │ ├── clevr
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── hateful_memes
│ │ │ │ │ └── defaults.yaml
│ │ │ ├── lxmert
│ │ │ │ └── configs
│ │ │ │ │ ├── coco
│ │ │ │ │ └── pretrain.yaml
│ │ │ │ │ ├── vqa2
│ │ │ │ │ └── pretrain.yaml
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ ├── visual_genome
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ └── masked.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ ├── pretrain_vl_right
│ │ │ │ └── configs
│ │ │ │ │ ├── vilbert
│ │ │ │ │ ├── masked_coco
│ │ │ │ │ │ ├── full.yaml
│ │ │ │ │ │ ├── ten_pc.yaml
│ │ │ │ │ │ ├── fifty_pc.yaml
│ │ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── masked_vqa2
│ │ │ │ │ │ ├── full.yaml
│ │ │ │ │ │ ├── fifty_pc.yaml
│ │ │ │ │ │ ├── ten_pc.yaml
│ │ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── masked_conceptual_captions
│ │ │ │ │ │ ├── full.yaml
│ │ │ │ │ │ ├── half.yaml
│ │ │ │ │ │ ├── small.yaml
│ │ │ │ │ │ ├── small_fifty_pc.yaml
│ │ │ │ │ │ ├── small_ten_pc.yaml
│ │ │ │ │ │ ├── full_coco_generated.yaml
│ │ │ │ │ │ ├── half_coco_generated.yaml
│ │ │ │ │ │ ├── small_coco_generated.yaml
│ │ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── visual_bert
│ │ │ │ │ ├── masked_coco
│ │ │ │ │ ├── full.yaml
│ │ │ │ │ ├── fifty_pc.yaml
│ │ │ │ │ ├── ten_pc.yaml
│ │ │ │ │ ├── defaults.yaml
│ │ │ │ │ └── full_train_val.yaml
│ │ │ │ │ ├── masked_vqa2
│ │ │ │ │ ├── full.yaml
│ │ │ │ │ ├── fifty_pc.yaml
│ │ │ │ │ ├── ten_pc.yaml
│ │ │ │ │ ├── full_train_val.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── masked_conceptual_captions
│ │ │ │ │ ├── full.yaml
│ │ │ │ │ ├── half.yaml
│ │ │ │ │ ├── small.yaml
│ │ │ │ │ ├── small_ten_pc.yaml
│ │ │ │ │ ├── small_fifty_pc.yaml
│ │ │ │ │ ├── full_coco_generated.yaml
│ │ │ │ │ ├── half_coco_generated.yaml
│ │ │ │ │ ├── small_coco_generated.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ ├── mmbt
│ │ │ │ └── configs
│ │ │ │ │ ├── vqa2
│ │ │ │ │ └── with_raw_images.yaml
│ │ │ │ │ ├── mmimdb
│ │ │ │ │ └── with_features.yaml
│ │ │ │ │ ├── hateful_memes
│ │ │ │ │ ├── with_features.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── masked_coco
│ │ │ │ │ └── defaults.yaml
│ │ │ ├── pythia
│ │ │ │ └── configs
│ │ │ │ │ ├── masked_q_vqa2
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── visual_genome
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── vqa2
│ │ │ │ │ ├── debug.yaml
│ │ │ │ │ ├── resnet_only.yaml
│ │ │ │ │ └── train_val_resnet_only.yaml
│ │ │ ├── butd
│ │ │ │ └── configs
│ │ │ │ │ ├── coco
│ │ │ │ │ ├── beam_search.yaml
│ │ │ │ │ ├── nucleus_sampling.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── conceptual_captions
│ │ │ │ │ ├── beam_search.yaml
│ │ │ │ │ ├── nucleus_sampling.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── textcaps
│ │ │ │ │ ├── eval_pretrained_coco_model.yaml
│ │ │ │ │ └── beam_search.yaml
│ │ │ ├── ban
│ │ │ │ └── configs
│ │ │ │ │ ├── vqa2
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── vizwiz
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── textvqa
│ │ │ │ │ └── defaults.yaml
│ │ │ └── lorra
│ │ │ │ └── configs
│ │ │ │ └── vqa2
│ │ │ │ └── train_val.yaml
│ │ ├── configs
│ │ │ ├── models
│ │ │ │ ├── mmbt
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ ├── classification.yaml
│ │ │ │ │ └── with_features.yaml
│ │ │ │ ├── fusions
│ │ │ │ │ └── defaults.yaml
│ │ │ │ ├── lxmert
│ │ │ │ │ └── pretrain.yaml
│ │ │ │ ├── vilbert
│ │ │ │ │ └── pretrain.yaml
│ │ │ │ ├── visual_bert
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ ├── classification.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ ├── unimodal
│ │ │ │ │ ├── with_features.yaml
│ │ │ │ │ ├── bert.yaml
│ │ │ │ │ └── image.yaml
│ │ │ │ ├── cnn_lstm
│ │ │ │ │ └── defaults.yaml
│ │ │ │ └── ban
│ │ │ │ │ └── defaults.yaml
│ │ │ └── datasets
│ │ │ │ ├── conceptual_captions
│ │ │ │ └── train_small.yaml
│ │ │ │ ├── mmimdb
│ │ │ │ └── with_features.yaml
│ │ │ │ ├── hateful_memes
│ │ │ │ ├── with_features.yaml
│ │ │ │ └── bert.yaml
│ │ │ │ ├── textvqa
│ │ │ │ └── with_resnet.yaml
│ │ │ │ └── vqa2
│ │ │ │ └── with_raw_images.yaml
│ │ ├── utils
│ │ │ ├── __init__.py
│ │ │ ├── phoc
│ │ │ │ └── __init__.py
│ │ │ ├── torchscript.py
│ │ │ └── transform.py
│ │ ├── trainers
│ │ │ ├── core
│ │ │ │ ├── __init__.py
│ │ │ │ └── profiling.py
│ │ │ ├── callbacks
│ │ │ │ └── __init__.py
│ │ │ └── __init__.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ └── optimizers.py
│ │ ├── common
│ │ │ └── __init__.py
│ │ ├── version.py
│ │ └── __init__.py
│ ├── perturbation_arguments.py
│ ├── projects
│ │ ├── vilbert
│ │ │ └── configs
│ │ │ │ ├── mmimdb
│ │ │ │ └── pretrain.yaml
│ │ │ │ ├── hateful_memes
│ │ │ │ ├── direct.yaml
│ │ │ │ └── from_cc.yaml
│ │ │ │ ├── masked_coco
│ │ │ │ ├── pretrain.yaml
│ │ │ │ ├── pretrain_train_val.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ ├── masked_vqa2
│ │ │ │ ├── pretrain.yaml
│ │ │ │ ├── pretrain_train_val.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ ├── masked_conceptual_captions
│ │ │ │ ├── pretrain.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ └── vqa2
│ │ │ │ └── train_val.yaml
│ │ ├── hateful_memes
│ │ │ └── configs
│ │ │ │ ├── vilbert
│ │ │ │ ├── direct.yaml
│ │ │ │ └── from_cc.yaml
│ │ │ │ ├── visual_bert
│ │ │ │ ├── direct.yaml
│ │ │ │ └── from_coco.yaml
│ │ │ │ ├── mmbt
│ │ │ │ ├── with_features.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ └── unimodal
│ │ │ │ ├── with_features.yaml
│ │ │ │ ├── bert.yaml
│ │ │ │ └── text.yaml
│ │ ├── m4c
│ │ │ ├── scripts
│ │ │ │ └── __init__.py
│ │ │ └── configs
│ │ │ │ └── textvqa
│ │ │ │ └── joint_with_stvqa.yaml
│ │ ├── visual_bert
│ │ │ └── configs
│ │ │ │ ├── masked_coco
│ │ │ │ ├── pretrain.yaml
│ │ │ │ ├── defaults.yaml
│ │ │ │ └── pretrain_train_val.yaml
│ │ │ │ ├── masked_sbu
│ │ │ │ └── pretrain.yaml
│ │ │ │ ├── masked_vqa2
│ │ │ │ ├── pretrain.yaml
│ │ │ │ ├── pretrain_train_val.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ ├── masked_conceptual_captions
│ │ │ │ ├── pretrain.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ ├── hateful_memes
│ │ │ │ ├── direct.yaml
│ │ │ │ └── from_coco.yaml
│ │ │ │ ├── vqa2
│ │ │ │ └── with_raw_images.yaml
│ │ │ │ ├── mmimdb
│ │ │ │ └── pretrain.yaml
│ │ │ │ └── masked_gqa
│ │ │ │ └── defaults.yaml
│ │ ├── others
│ │ │ ├── mmf_bert
│ │ │ │ └── configs
│ │ │ │ │ ├── masked_coco
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ ├── pretrain_joint_vqa2.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ ├── masked_vqa2
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ │ └── masked_conceptual_captions
│ │ │ │ │ ├── pretrain.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ ├── unimodal
│ │ │ │ └── configs
│ │ │ │ │ └── hateful_memes
│ │ │ │ │ ├── with_features.yaml
│ │ │ │ │ └── bert.yaml
│ │ │ └── cnn_lstm
│ │ │ │ ├── clevr
│ │ │ │ └── defaults.yaml
│ │ │ │ └── hateful_memes
│ │ │ │ └── defaults.yaml
│ │ ├── lxmert
│ │ │ └── configs
│ │ │ │ ├── coco
│ │ │ │ └── pretrain.yaml
│ │ │ │ ├── vqa2
│ │ │ │ └── pretrain.yaml
│ │ │ │ ├── pretrain.yaml
│ │ │ │ ├── visual_genome
│ │ │ │ ├── pretrain.yaml
│ │ │ │ └── masked.yaml
│ │ │ │ └── defaults.yaml
│ │ ├── m4c_captioner
│ │ │ ├── scripts
│ │ │ │ └── __init__.py
│ │ │ └── configs
│ │ │ │ ├── m4c_captioner
│ │ │ │ ├── textcaps
│ │ │ │ │ ├── without_ocr.yaml
│ │ │ │ │ └── with_caffe2_feat.yaml
│ │ │ │ └── coco
│ │ │ │ │ └── defaults.yaml
│ │ │ │ └── butd
│ │ │ │ └── textcaps
│ │ │ │ ├── eval_pretrained_coco_model.yaml
│ │ │ │ └── beam_search.yaml
│ │ ├── pretrain_vl_right
│ │ │ └── configs
│ │ │ │ ├── vilbert
│ │ │ │ ├── masked_coco
│ │ │ │ │ ├── full.yaml
│ │ │ │ │ ├── ten_pc.yaml
│ │ │ │ │ ├── fifty_pc.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ ├── masked_vqa2
│ │ │ │ │ ├── full.yaml
│ │ │ │ │ ├── fifty_pc.yaml
│ │ │ │ │ ├── ten_pc.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ └── masked_conceptual_captions
│ │ │ │ │ ├── full.yaml
│ │ │ │ │ ├── half.yaml
│ │ │ │ │ ├── small.yaml
│ │ │ │ │ ├── small_fifty_pc.yaml
│ │ │ │ │ ├── small_ten_pc.yaml
│ │ │ │ │ ├── full_coco_generated.yaml
│ │ │ │ │ ├── half_coco_generated.yaml
│ │ │ │ │ ├── small_coco_generated.yaml
│ │ │ │ │ └── defaults.yaml
│ │ │ │ └── visual_bert
│ │ │ │ ├── masked_coco
│ │ │ │ ├── full.yaml
│ │ │ │ ├── fifty_pc.yaml
│ │ │ │ ├── ten_pc.yaml
│ │ │ │ ├── defaults.yaml
│ │ │ │ └── full_train_val.yaml
│ │ │ │ ├── masked_vqa2
│ │ │ │ ├── full.yaml
│ │ │ │ ├── fifty_pc.yaml
│ │ │ │ ├── ten_pc.yaml
│ │ │ │ ├── full_train_val.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ └── masked_conceptual_captions
│ │ │ │ ├── full.yaml
│ │ │ │ ├── half.yaml
│ │ │ │ ├── small.yaml
│ │ │ │ ├── small_ten_pc.yaml
│ │ │ │ ├── small_fifty_pc.yaml
│ │ │ │ ├── full_coco_generated.yaml
│ │ │ │ ├── half_coco_generated.yaml
│ │ │ │ ├── small_coco_generated.yaml
│ │ │ │ └── defaults.yaml
│ │ ├── mmbt
│ │ │ └── configs
│ │ │ │ ├── vqa2
│ │ │ │ └── with_raw_images.yaml
│ │ │ │ ├── mmimdb
│ │ │ │ └── with_features.yaml
│ │ │ │ ├── hateful_memes
│ │ │ │ ├── with_features.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ └── masked_coco
│ │ │ │ └── defaults.yaml
│ │ ├── pythia
│ │ │ └── configs
│ │ │ │ ├── masked_q_vqa2
│ │ │ │ └── defaults.yaml
│ │ │ │ ├── visual_genome
│ │ │ │ └── defaults.yaml
│ │ │ │ └── vqa2
│ │ │ │ ├── debug.yaml
│ │ │ │ ├── resnet_only.yaml
│ │ │ │ └── train_val_resnet_only.yaml
│ │ ├── butd
│ │ │ └── configs
│ │ │ │ ├── coco
│ │ │ │ ├── beam_search.yaml
│ │ │ │ ├── nucleus_sampling.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ ├── conceptual_captions
│ │ │ │ ├── beam_search.yaml
│ │ │ │ ├── nucleus_sampling.yaml
│ │ │ │ └── defaults.yaml
│ │ │ │ └── textcaps
│ │ │ │ ├── eval_pretrained_coco_model.yaml
│ │ │ │ └── beam_search.yaml
│ │ ├── ban
│ │ │ └── configs
│ │ │ │ ├── vqa2
│ │ │ │ └── defaults.yaml
│ │ │ │ ├── vizwiz
│ │ │ │ └── defaults.yaml
│ │ │ │ └── textvqa
│ │ │ │ └── defaults.yaml
│ │ └── lorra
│ │ │ └── configs
│ │ │ └── vqa2
│ │ │ └── train_val.yaml
│ ├── mmf_cli
│ │ ├── __init__.py
│ │ └── predict.py
│ ├── tests
│ │ ├── __init__.py
│ │ ├── common
│ │ │ └── __init__.py
│ │ ├── configs
│ │ │ └── __init__.py
│ │ ├── datasets
│ │ │ └── __init__.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ └── interfaces
│ │ │ │ └── __init__.py
│ │ ├── modules
│ │ │ └── __init__.py
│ │ ├── trainers
│ │ │ ├── __init__.py
│ │ │ └── callbacks
│ │ │ │ └── __init__.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ └── test_distributed.py
│ ├── tools
│ │ ├── __init__.py
│ │ ├── scripts
│ │ │ ├── __init__.py
│ │ │ ├── bert
│ │ │ │ └── extract_bert.sh
│ │ │ └── gqa
│ │ │ │ └── README.md
│ │ └── sweeps
│ │ │ └── README.md
│ ├── MANIFEST.in
│ ├── .flake8
│ ├── requirements.txt
│ ├── .editorconfig
│ ├── .gitignore
│ └── predict.py
├── scripts
│ ├── lxmert
│ │ ├── gqa
│ │ │ ├── extract_imgfeature.sh
│ │ │ └── gqa_save_exp.sh
│ │ └── vqa
│ │ │ └── save_exp.sh
│ └── visualBert
│ │ └── gqa
│ │ └── gqa_finetune.sh
└── requirements.txt
└── figures
├── factor.png
├── boxmap22.png
├── dataset.png
├── examples.png
├── exp_list.png
├── radar_plot.png
└── vio_factor.png
/Transparency/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transparency/model/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transparency/Trainers/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transparency/common_code/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/gqa/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/nlvr2/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/mscoco_imgfeat/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/nlvr2_imgfeat/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/vg_gqa_imgfeat/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/static/.nojekyll:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/lxrt/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/pretrain/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/tasks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transparency/preprocess/Tweets/.gitignore:
--------------------------------------------------------------------------------
1 | vec_adr.p
2 | *.csv
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/static/CNAME:
--------------------------------------------------------------------------------
1 | mmf.sh
2 |
--------------------------------------------------------------------------------
/Transparency/preprocess/SST/.gitignore:
--------------------------------------------------------------------------------
1 | vec_sst.p
2 | .ipynb_checkpoints
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/gqa/process_raw_data_scripts/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/nlvr2/process_raw_data_scripts/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/clevr/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/coco2017/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/okvqa/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/vqacp_v2/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/models/transformers/backends/layer.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/perturbation_arguments.py:
--------------------------------------------------------------------------------
1 | args = None
2 |
--------------------------------------------------------------------------------
/Transparency/preprocess/SNLI/README.md:
--------------------------------------------------------------------------------
1 | Please run all cells in `SNLI.ipynb` notebook
--------------------------------------------------------------------------------
/Transparency/preprocess/SST/README.md:
--------------------------------------------------------------------------------
1 | Please run all cells in `SST.ipynb` notebook.
--------------------------------------------------------------------------------
/Transparency/preprocess/ag_news/README.md:
--------------------------------------------------------------------------------
1 | Please run all cells in `AGNews.ipynb`
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/flickr30k/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/mmimdb/pretrain.yaml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/mmimdb/pretrain.yaml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transparency/preprocess/20News/.gitignore:
--------------------------------------------------------------------------------
1 | 20News_sports_dataset.csv
2 | vec_20news_sports.p
--------------------------------------------------------------------------------
/Transparency/preprocess/Babi/.gitignore:
--------------------------------------------------------------------------------
1 | */**
2 | *
3 | !Babi.ipynb
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/localized_narratives/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/.gitignore:
--------------------------------------------------------------------------------
1 | *.caffemodel
2 | *.tsv
3 | /snap
4 |
--------------------------------------------------------------------------------
/Transparency/preprocess/.gitignore:
--------------------------------------------------------------------------------
1 | .vector_cache
2 | .ipynb_checkpoints
3 | MIMIC_Datasets
4 |
--------------------------------------------------------------------------------
/Transparency/preprocess/CNN/README.md:
--------------------------------------------------------------------------------
1 | Please run all the cells in `CNN.ipynb` notebook.
2 |
3 |
--------------------------------------------------------------------------------
/Transparency/preprocess/ag_news/classes.txt:
--------------------------------------------------------------------------------
1 | World
2 | Sports
3 | Business
4 | Sci/Tech
5 |
--------------------------------------------------------------------------------
/Transparency/preprocess/IMDB/README.md:
--------------------------------------------------------------------------------
1 | Please run all the cells in `IMDB.ipynb` notebook.
2 |
3 |
--------------------------------------------------------------------------------
/figures/factor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/figures/factor.png
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/.eslintignore:
--------------------------------------------------------------------------------
1 | .docusaurus
2 | static/api
3 | build/
4 |
--------------------------------------------------------------------------------
/Transparency/preprocess/20News/README.md:
--------------------------------------------------------------------------------
1 | Please run all the cells in `20News.ipynb` notebook.
2 |
3 |
--------------------------------------------------------------------------------
/figures/boxmap22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/figures/boxmap22.png
--------------------------------------------------------------------------------
/figures/dataset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/figures/dataset.png
--------------------------------------------------------------------------------
/figures/examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/figures/examples.png
--------------------------------------------------------------------------------
/figures/exp_list.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/figures/exp_list.png
--------------------------------------------------------------------------------
/figures/radar_plot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/figures/radar_plot.png
--------------------------------------------------------------------------------
/figures/vio_factor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/figures/vio_factor.png
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/mmbt/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf_cli/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tools/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transparency/preprocess/CNN/.gitignore:
--------------------------------------------------------------------------------
1 | cnn/**
2 | cnn_dataset.csv
3 | cnn.tar.gz
4 | entity_list.txt
5 | vec_cnn.p
--------------------------------------------------------------------------------
/Transparency/preprocess/IMDB/.gitignore:
--------------------------------------------------------------------------------
1 | imdb_dataset.csv
2 | imdb_word_index.json
3 | imdb_full.pkl
4 | vec_imdb.p
--------------------------------------------------------------------------------
/Transparency/preprocess/ag_news/.gitignore:
--------------------------------------------------------------------------------
1 | agnews_dataset.csv
2 | agnews_dataset_split.csv
3 | vec_agnews.p
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/fusions/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./concat_bert.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/common/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/configs/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/trainers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tools/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/gqa/.gitignore:
--------------------------------------------------------------------------------
1 | /submit.json
2 | /testdev.json
3 | /train.json
4 | /valid.json
5 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/vqa/.gitignore:
--------------------------------------------------------------------------------
1 | /minival.json
2 | /nominival.json
3 | /train.json
4 | /test.json
5 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/trainers/core/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/.prettierignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | build
3 | .docusaurus
4 | static/api
5 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/models/interfaces/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/models/transformers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/hateful_memes/configs/vilbert/direct.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/m4c/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/hateful_memes/direct.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/masked_coco/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/masked_vqa2/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/trainers/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/vilbert/direct.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/m4c/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/hateful_memes/direct.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/masked_coco/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/masked_vqa2/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_coco/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_sbu/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_vqa2/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/models/interfaces/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/trainers/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/lxmert/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/lxmert/defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/mmimdb/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/nlvr2/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/ocrvqa/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/stvqa/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/textvqa/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_coco/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_sbu/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_vqa2/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/mmf_bert/configs/masked_coco/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/mmf_bert/configs/masked_vqa2/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transparency/preprocess/SNLI/.gitignore:
--------------------------------------------------------------------------------
1 | *.zip
2 | snli_dataset.csv
3 | entity_list.txt
4 | snli_1.0/**
5 | __MACOSX/**
6 | vec_snli.p
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/vilbert/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/vilbert/defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/textcaps/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/databases/readers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/models/transformers/backends/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/m4c_captioner/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/mmf_bert/configs/masked_coco/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/mmf_bert/configs/masked_vqa2/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/lxmert/configs/coco/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../defaults.yaml
3 | - ./masked.yaml
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/lxmert/configs/vqa2/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../defaults.yaml
3 | - ./masked.yaml
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/m4c_captioner/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_coco/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_vqa2/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/visual_bert/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/visual_bert/defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/hateful_memes/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/visual_dialog/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/visual_entailment/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/visual_genome/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/lxmert/configs/coco/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../defaults.yaml
3 | - ./masked.yaml
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/lxmert/configs/vqa2/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../defaults.yaml
3 | - ./masked.yaml
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_coco/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_vqa2/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_coco/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_coco/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/mmf_bert/configs/masked_conceptual_captions/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/lxmert/.gitignore:
--------------------------------------------------------------------------------
1 | /mscoco_minival.json
2 | /mscoco_nominival.json
3 | /mscoco_train.json
4 | /vgnococo.json
5 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/mmf_bert/configs/masked_conceptual_captions/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/full.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "data/nlvr2/nlvr"]
2 | path = data/nlvr2/nlvr
3 | url = https://github.com/lil-lab/nlvr.git
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/visual_bert/classification.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | visual_bert:
3 | training_head_type: classification
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/utils/phoc/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from .build_phoc import build_phoc # NoQA
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/visual_bert/direct.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | training:
5 | batch_size: 128
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/hateful_memes/direct.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | training:
5 | batch_size: 128
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/hateful_memes/configs/visual_bert/direct.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | training:
5 | batch_size: 128
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/hateful_memes/direct.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | training:
5 | batch_size: 128
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/mmbt/configs/vqa2/with_raw_images.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - ../../../../mmf/configs/datasets/vqa2/with_raw_images.yaml
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/.ipynb_checkpoints/Untitled-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 5
6 | }
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/mmbt/configs/vqa2/with_raw_images.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - ../../../../mmf/configs/datasets/vqa2/with_raw_images.yaml
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/vqa2/with_raw_images.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - ../../../../mmf/configs/datasets/vqa2/with_raw_images.yaml
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/trainers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | __all__ = ["BaseTrainer"]
3 |
4 | from .base_trainer import BaseTrainer
5 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/vqa2/with_raw_images.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - ../../../../mmf/configs/datasets/vqa2/with_raw_images.yaml
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/docs/getting_started/faqs.md:
--------------------------------------------------------------------------------
1 | ---
2 | id: faqs
3 | title: Frequently Asked Questions (FAQ)
4 | sidebar_label: FAQs
5 | ---
6 | ## Coming Soon!
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/static/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/VisualBERT/website/static/img/logo.png
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements.txt
2 | include LICENSE
3 | include NOTICES
4 | recursive-include mmf/configs/ *.yaml
5 | recursive-include projects/ *.yaml
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/static/img/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/VisualBERT/website/static/img/favicon.png
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/static/img/oss_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/VisualBERT/website/static/img/oss_logo.png
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tools/sweeps/README.md:
--------------------------------------------------------------------------------
1 | # Sweep Scripts
2 |
3 | See [https://mmf.sh/docs/tutorials/slurm](https://mmf.sh/docs/tutorials/slurm) for tutorial on how to use these scripts.
4 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/static/img/logo_white_f.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/VisualBERT/website/static/img/logo_white_f.png
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/mmbt/configs/mmimdb/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - configs/models/mmbt/with_features.yaml
4 | - configs/datasets/mmimdb/with_features.yaml
5 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/mmbt/configs/mmimdb/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - configs/models/mmbt/with_features.yaml
4 | - configs/datasets/mmimdb/with_features.yaml
5 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/layers.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/layers.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/param.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/param.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/utils.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/mmbt/classification.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | mmbt:
3 | training_head_type: classification
4 | num_labels: 2
5 | losses:
6 | - type: cross_entropy
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/Transparency/preprocess/README.md:
--------------------------------------------------------------------------------
1 | For each dataset you want to run the experiments for , please go to the corresponding directory to generate its necessary preprocessed files and follow the instruction in corresponding `README.md`.
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/hateful_memes/configs/vilbert/from_cc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | checkpoint:
5 | resume_pretrained: true
6 | resume_zoo: vilbert.pretrained.cc.original
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/hateful_memes/from_cc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | checkpoint:
5 | resume_pretrained: true
6 | resume_zoo: vilbert.pretrained.cc.original
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/vilbert/from_cc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | checkpoint:
5 | resume_pretrained: true
6 | resume_zoo: vilbert.pretrained.cc.original
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/visual_bert/from_coco.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | checkpoint:
5 | resume_pretrained: true
6 | resume_zoo: visual_bert.pretrained.coco
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/hateful_memes/from_cc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | checkpoint:
5 | resume_pretrained: true
6 | resume_zoo: vilbert.pretrained.cc.original
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/hateful_memes/from_coco.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | checkpoint:
5 | resume_pretrained: true
6 | resume_zoo: visual_bert.pretrained.coco
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/lxmert_lrp.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/lxmert_lrp.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/vqa_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/vqa_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/Transparency/preprocess/Tweets/README.md:
--------------------------------------------------------------------------------
1 | For obtaining ADR tweets data, please contact the authors of 'Attention is Not Explanation' paper (https://arxiv.org/abs/1902.10186).
2 |
3 | Then, run all cells in `Tweet_ADR.ipynb` notebook.
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/datasets/conceptual_captions/train_small.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | conceptual_captions:
3 | annotations:
4 | train:
5 | - cc/defaults/annotations/train_small.npy
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/hateful_memes/configs/visual_bert/from_coco.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | checkpoint:
5 | resume_pretrained: true
6 | resume_zoo: visual_bert.pretrained.coco
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/hateful_memes/from_coco.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | checkpoint:
5 | resume_pretrained: true
6 | resume_zoo: visual_bert.pretrained.coco
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/modeling_frcnn.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/modeling_frcnn.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/processing_image.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/processing_image.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/tasks/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/tasks/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/tasks/__pycache__/vqa_data.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/tasks/__pycache__/vqa_data.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/m4c_captioner/configs/m4c_captioner/textcaps/without_ocr.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | dataset_config:
4 | textcaps:
5 | use_ocr: False # remove all the OCRs from each image
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/huggingface_lxmert.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/huggingface_lxmert.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/m4c_captioner/configs/m4c_captioner/textcaps/without_ocr.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | dataset_config:
4 | textcaps:
5 | use_ocr: False # remove all the OCRs from each image
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/ExplanationGenerator.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/src/__pycache__/ExplanationGenerator.cpython-38.pyc
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/vqa2/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | __all__ = ["VQA2Builder", "VQA2Dataset"]
3 |
4 | from .builder import VQA2Builder
5 | from .dataset import VQA2Dataset
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/lxmert/configs/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - coco/masked.yaml
4 | - gqa/masked.yaml
5 | - visual_genome/masked.yaml
6 | - vqa2/masked.yaml
7 | - configs/models/lxmert/defaults.yaml
8 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/mmf_bert/configs/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | return_features_info: true
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/lxmert/configs/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - coco/masked.yaml
4 | - gqa/masked.yaml
5 | - visual_genome/masked.yaml
6 | - vqa2/masked.yaml
7 | - configs/models/lxmert/defaults.yaml
8 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/mmf_bert/configs/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | return_features_info: true
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/.flake8:
--------------------------------------------------------------------------------
1 | # This is an example .flake8 config used when developing *Black* itself.
2 |
3 | [flake8]
4 | max-line-length = 88
5 | max-complexity = 18
6 | select = B,C,E,F,W,T4,B9
7 | ignore = E203, E266, C901, C408, W503
8 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/scripts/lxmert/gqa/extract_imgfeature.sh:
--------------------------------------------------------------------------------
1 | samples=-1
2 | CUDA_VISIBLE_DEVICES=2 PYTHONPATH=`pwd` python lxmert/lxmert/extract_img_features.py \
3 | --COCO_path /home/lyb/vqa_data/gqa/images/ \
4 | --num-samples=$samples \
5 | --task gqa
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/vizwiz/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from .builder import VizWizBuilder
3 | from .dataset import VizWizDataset
4 |
5 |
6 | __all__ = ["VizWizBuilder", "VizWizDataset"]
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tools/scripts/bert/extract_bert.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | N_REM=`expr $3 - 1`
3 |
4 | for i in $(seq 0 $N_REM); do
5 | python tools/scripts/bert/extract_bert_embeddings.py --imdb_path $1 --out_path $2 --group_id $i --n_groups $3 &
6 | done
7 |
--------------------------------------------------------------------------------
/Transparency/requirements.txt:
--------------------------------------------------------------------------------
1 | pandas==0.24.2
2 | nltk==3.4.5
3 | tqdm==4.31.1
4 | typing==3.6.4
5 | numpy==1.16.2
6 | allennlp==0.8.3
7 | scipy==1.2.1
8 | seaborn==0.9.0
9 | gensim==3.7.2
10 | spacy==2.1.3
11 | matplotlib==3.0.3
12 | ipython==7.4.0
13 | scikit_learn==0.20.3
14 |
--------------------------------------------------------------------------------
/Transparency/scripts/bc/sst/lstm_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=sst
2 | output_path=./outputs
3 | encoder_type=lstm
4 | attention_type=dot
5 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | import VisualBERT.mmf.modules.losses
3 | import VisualBERT.mmf.modules.metrics
4 | import VisualBERT.mmf.modules.optimizers
5 | import VisualBERT.mmf.modules.schedulers
6 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/modules/optimizers.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.common.registry import registry
4 | from transformers.optimization import AdamW
5 |
6 |
7 | registry.register_optimizer("adam_w")(AdamW)
8 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "arrowParens": "always",
3 | "bracketSpacing": false,
4 | "jsxBracketSameLine": true,
5 | "printWidth": 80,
6 | "proseWrap": "never",
7 | "singleQuote": true,
8 | "trailingComma": "all"
9 | }
10 |
--------------------------------------------------------------------------------
/Transparency/scripts/bc/amazon/lstm_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=amazon
2 | output_path=./outputs
3 | encoder_type=lstm
4 | attention_type=dot
5 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type}
--------------------------------------------------------------------------------
/Transparency/scripts/bc/yelp/lstm_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=yelp
2 | output_path=./outputs
3 | encoder_type=lstm
4 | attention_type=dot
5 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/common/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from .meter import Meter
3 | from .registry import registry
4 | from .sample import Sample, SampleList
5 |
6 |
7 | __all__ = ["Sample", "SampleList", "Meter", "registry"]
8 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/experiments/paper/COCO_val2014_000000127510/COCO_val2014_000000127510.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/experiments/paper/COCO_val2014_000000127510/COCO_val2014_000000127510.jpg
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/experiments/paper/COCO_val2014_000000185590/COCO_val2014_000000185590.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/experiments/paper/COCO_val2014_000000185590/COCO_val2014_000000185590.jpg
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/experiments/paper/COCO_val2014_000000200717/COCO_val2014_000000200717.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/experiments/paper/COCO_val2014_000000200717/COCO_val2014_000000200717.jpg
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/experiments/paper/COCO_val2014_000000324266/COCO_val2014_000000324266.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BierOne/Attention-Faithfulness/HEAD/Transformer-MM-Explainability/lxmert/lxmert/experiments/paper/COCO_val2014_000000324266/COCO_val2014_000000324266.jpg
--------------------------------------------------------------------------------
/Transparency/scripts/qa/cnn/lstm_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=cnn
2 | output_path=./outputs
3 | encoder_type=lstm
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pythia/configs/masked_q_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./pythia.yaml
3 |
4 | evaluation:
5 | metrics:
6 | - accuracy
7 |
8 | training:
9 | early_stop:
10 | criteria: masked_q_vqa2/accuracy
11 | minimize: false
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pythia/configs/masked_q_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./pythia.yaml
3 |
4 | evaluation:
5 | metrics:
6 | - accuracy
7 |
8 | training:
9 | early_stop:
10 | criteria: masked_q_vqa2/accuracy
11 | minimize: false
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/requirements.txt:
--------------------------------------------------------------------------------
1 | demjson==2.2.4
2 | torchtext==0.5.0
3 | GitPython==3.1.0
4 | requests==2.23.0
5 | fasttext==0.9.1
6 | nltk==3.4.5
7 | editdistance==0.5.3
8 | transformers==3.4.0
9 | omegaconf==2.0.1rc4
10 | lmdb==0.98
11 | termcolor==1.1.0
12 |
--------------------------------------------------------------------------------
/Transparency/scripts/bc/20news/lstm_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=20News_sports
2 | output_path=./outputs
3 | encoder_type=lstm
4 | attention_type=dot
5 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type}
--------------------------------------------------------------------------------
/Transparency/scripts/nli/snli/lstm_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=snli
2 | output_path=./outputs
3 | encoder_type=lstm
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type}
--------------------------------------------------------------------------------
/Transparency/scripts/qa/babi_2/lstm_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=babi_2
2 | output_path=./outputs
3 | encoder_type=lstm
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type}
--------------------------------------------------------------------------------
/Transparency/scripts/qa/babi_3/lstm_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=babi_3
2 | output_path=./outputs
3 | encoder_type=lstm
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/sbu_captions/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | __all__ = ["MaskedSBUBuilder", "MaskedSBUDataset"]
4 |
5 | from .masked_builder import MaskedSBUBuilder
6 | from .masked_dataset import MaskedSBUDataset
7 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/butd/configs/coco/beam_search.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | model_config:
5 | butd:
6 | inference:
7 | type: beam_search
8 | params:
9 | beam_length: 5
10 |
11 | training:
12 | batch_size: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/mmimdb/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_mmimdb:
6 | return_features_info: true
7 |
8 | model_config:
9 | visual_bert:
10 | training_head_type: pretraining
11 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/docs/getting_started/video_overview.md:
--------------------------------------------------------------------------------
1 | ---
2 | id: video_overview
3 | title: Video overview
4 | sidebar_label: Video overview
5 | ---
6 |
7 |
8 |

9 |
10 |
--------------------------------------------------------------------------------
/Transparency/.gitignore:
--------------------------------------------------------------------------------
1 | outputs/*/*
2 | outputs_dev/*/*
3 | caml-mimic/*
4 | __pycache__
5 | .ipynb_checkpoints
6 | .vscode
7 | TACL-Attention
8 | # graph_outputs
9 | Workshop-NAACL-Attention
10 | graph_outputs/adv_*
11 | graph_outputs/*kendalltop*
12 | graph_outputs/**/*.pdf
13 | NAACLAttention*
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*.py]
4 | charset = utf-8
5 | trim_trailing_whitespace = true
6 | end_of_line = lf
7 | insert_final_newline = true
8 | indent_style = space
9 | indent_size = 4
10 |
11 | [*.md]
12 | trim_trailing_whitespace = false
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/butd/configs/coco/beam_search.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | model_config:
5 | butd:
6 | inference:
7 | type: beam_search
8 | params:
9 | beam_length: 5
10 |
11 | training:
12 | batch_size: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/mmimdb/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_mmimdb:
6 | return_features_info: true
7 |
8 | model_config:
9 | visual_bert:
10 | training_head_type: pretraining
11 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/ban/configs/vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../../../mmf/configs/vqa2/defaults.yaml
3 |
4 | evaluation:
5 | metrics:
6 | - vqa_accuracy
7 |
8 | training:
9 | early_stop:
10 | criteria: vqa2/vqa_accuracy
11 | minimize: false
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/ban/configs/vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../../../mmf/configs/vqa2/defaults.yaml
3 |
4 | evaluation:
5 | metrics:
6 | - vqa_accuracy
7 |
8 | training:
9 | early_stop:
10 | criteria: vqa2/vqa_accuracy
11 | minimize: false
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/ban/configs/vizwiz/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../../../mmf/configs/vizwiz/defaults.yaml
3 |
4 | evaluation:
5 | metrics:
6 | - vqa_accuracy
7 |
8 | training:
9 | early_stop:
10 | criteria: vizwiz/vqa_accuracy
11 | minimize: false
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/datasets/mmimdb/with_features.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | mmimdb:
3 | use_images: false
4 | use_features: true
5 | # Disable this in your config if you do not need features info
6 | # and are running out of memory
7 | return_features_info: false
8 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/ban/configs/vizwiz/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../../../mmf/configs/vizwiz/defaults.yaml
3 |
4 | evaluation:
5 | metrics:
6 | - vqa_accuracy
7 |
8 | training:
9 | early_stop:
10 | criteria: vizwiz/vqa_accuracy
11 | minimize: false
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/ban/configs/textvqa/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../../../mmf/configs/textvqa/defaults.yaml
3 |
4 | evaluation:
5 | metrics:
6 | - vqa_accuracy
7 |
8 | training:
9 | early_stop:
10 | criteria: textvqa/vqa_accuracy
11 | minimize: false
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/butd/configs/coco/nucleus_sampling.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | model_config:
5 | butd:
6 | inference:
7 | type: nucleus_sampling
8 | params:
9 | sum_threshold: 0.8
10 |
11 | training:
12 | batch_size: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/ban/configs/textvqa/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../../../mmf/configs/textvqa/defaults.yaml
3 |
4 | evaluation:
5 | metrics:
6 | - vqa_accuracy
7 |
8 | training:
9 | early_stop:
10 | criteria: textvqa/vqa_accuracy
11 | minimize: false
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/butd/configs/coco/nucleus_sampling.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | model_config:
5 | butd:
6 | inference:
7 | type: nucleus_sampling
8 | params:
9 | sum_threshold: 0.8
10 |
11 | training:
12 | batch_size: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | import sys
4 |
5 |
6 | __version__ = "1.0.0rc12"
7 |
8 | msg = "MMF is only compatible with Python 3.6 and newer."
9 |
10 |
11 | if sys.version_info < (3, 6):
12 | raise ImportError(msg)
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/butd/configs/conceptual_captions/beam_search.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | model_config:
5 | butd:
6 | inference:
7 | type: nucleus_sampling
8 | params:
9 | sum_threshold: 0.8
10 |
11 | training:
12 | batch_size: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/datasets/hateful_memes/with_features.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | hateful_memes:
3 | use_images: false
4 | use_features: true
5 | # Disable this in your config if you do not need features info
6 | # and are running out of memory
7 | return_features_info: true
8 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/butd/configs/conceptual_captions/beam_search.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | model_config:
5 | butd:
6 | inference:
7 | type: nucleus_sampling
8 | params:
9 | sum_threshold: 0.8
10 |
11 | training:
12 | batch_size: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/butd/configs/conceptual_captions/nucleus_sampling.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | model_config:
5 | butd:
6 | inference:
7 | type: nucleus_sampling
8 | params:
9 | sum_threshold: 0.8
10 |
11 | training:
12 | batch_size: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/mmbt/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - configs/models/mmbt/with_features.yaml
4 | - configs/datasets/hateful_memes/with_features.yaml
5 |
6 | optimizer:
7 | type: adam_w
8 | params:
9 | lr: 5e-5
10 | eps: 1e-8
11 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/mmbt/configs/hateful_memes/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - configs/models/mmbt/with_features.yaml
4 | - configs/datasets/hateful_memes/with_features.yaml
5 |
6 | optimizer:
7 | type: adam_w
8 | params:
9 | lr: 5e-5
10 | eps: 1e-8
11 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/butd/configs/conceptual_captions/nucleus_sampling.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | model_config:
5 | butd:
6 | inference:
7 | type: nucleus_sampling
8 | params:
9 | sum_threshold: 0.8
10 |
11 | training:
12 | batch_size: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/hateful_memes/configs/mmbt/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - configs/models/mmbt/with_features.yaml
4 | - configs/datasets/hateful_memes/with_features.yaml
5 |
6 | optimizer:
7 | type: adam_w
8 | params:
9 | lr: 5e-5
10 | eps: 1e-8
11 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/mmbt/configs/hateful_memes/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - configs/models/mmbt/with_features.yaml
4 | - configs/datasets/hateful_memes/with_features.yaml
5 |
6 | optimizer:
7 | type: adam_w
8 | params:
9 | lr: 5e-5
10 | eps: 1e-8
11 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_coco/ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | annotations:
7 | train:
8 | - coco/defaults/annotations/imdb_karpathy_train_by_image_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_vqa2/fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_vqa2/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | annotations:
7 | train:
8 | - vqa2/defaults/annotations/imdb_train2014_len_coco_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_coco/ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | annotations:
7 | train:
8 | - coco/defaults/annotations/imdb_karpathy_train_by_image_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_vqa2/fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_vqa2/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | annotations:
7 | train:
8 | - vqa2/defaults/annotations/imdb_train2014_len_coco_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/unimodal/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./image.yaml
3 | - configs/datasets/hateful_memes/with_features.yaml
4 | - configs/models/unimodal/with_features.yaml
5 |
6 |
7 | optimizer:
8 | type: adam_w
9 | params:
10 | lr: 5e-5
11 | eps: 1e-8
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/mmf_bert/configs/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | annotations:
7 | train:
8 | - vqa2/defaults/annotations/imdb_train2014_len_coco.npy
9 | return_features_info: true
10 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_coco/fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | annotations:
7 | train:
8 | - coco/defaults/annotations/imdb_karpathy_train_by_image_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_vqa2/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | annotations:
7 | train:
8 | - vqa2/defaults/annotations/imdb_train2014_len_coco_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_vqa2/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | annotations:
7 | train:
8 | - vqa2/defaults/annotations/imdb_train2014_len_coco_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/static/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | # This config file will prevent tests from being run on the gh-pages branch.
2 | version: 2
3 | jobs:
4 | build:
5 | machine: true
6 | branches:
7 | ignore: gh-pages
8 | steps:
9 | -run: echo "Skipping tests on gh-pages branch"
10 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/hateful_memes/configs/unimodal/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./image.yaml
3 | - configs/datasets/hateful_memes/with_features.yaml
4 | - configs/models/unimodal/with_features.yaml
5 |
6 |
7 | optimizer:
8 | type: adam_w
9 | params:
10 | lr: 5e-5
11 | eps: 1e-8
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/mmf_bert/configs/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | annotations:
7 | train:
8 | - vqa2/defaults/annotations/imdb_train2014_len_coco.npy
9 | return_features_info: true
10 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_coco/fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | annotations:
7 | train:
8 | - coco/defaults/annotations/imdb_karpathy_train_by_image_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_vqa2/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | annotations:
7 | train:
8 | - vqa2/defaults/annotations/imdb_train2014_len_coco_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_vqa2/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | annotations:
7 | train:
8 | - vqa2/defaults/annotations/imdb_train2014_len_coco_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_coco/fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | annotations:
7 | train:
8 | - coco/defaults/annotations/imdb_karpathy_train_by_image_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_coco/ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | annotations:
7 | train:
8 | - coco/defaults/annotations/imdb_karpathy_train_by_image_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_coco/fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | annotations:
7 | train:
8 | - coco/defaults/annotations/imdb_karpathy_train_by_image_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_coco/ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_coco/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | annotations:
7 | train:
8 | - coco/defaults/annotations/imdb_karpathy_train_by_image_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/unimodal/configs/hateful_memes/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./image.yaml
3 | - configs/datasets/hateful_memes/with_features.yaml
4 | - configs/models/unimodal/with_features.yaml
5 |
6 |
7 | optimizer:
8 | type: adam_w
9 | params:
10 | lr: 5e-5
11 | eps: 1e-8
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/unimodal/configs/hateful_memes/with_features.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./image.yaml
3 | - configs/datasets/hateful_memes/with_features.yaml
4 | - configs/models/unimodal/with_features.yaml
5 |
6 |
7 | optimizer:
8 | type: adam_w
9 | params:
10 | lr: 5e-5
11 | eps: 1e-8
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/half.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_mid.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/half.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_mid.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/small.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small.npy
9 |
--------------------------------------------------------------------------------
/Transparency/scripts/bc/imdb/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=imdb # ['imdb', 'sst', 'yelp', '20News_sports', 'amazon']
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transparency/scripts/bc/yelp/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=yelp # ['imdb', 'sst', 'yelp', '20News_sports', 'amazon']
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/small.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/half.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_mid.npy
9 |
--------------------------------------------------------------------------------
/Transparency/scripts/bc/amazon/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=amazon # ['imdb', 'sst', 'yelp', '20News_sports', 'amazon']
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/half.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_mid.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/small.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/requirements.txt:
--------------------------------------------------------------------------------
1 | Pillow>=8.1.1
2 | einops == 0.3.0
3 | imageio == 2.9.0
4 | matplotlib == 3.3.2
5 | opencv_python
6 | scikit_image == 0.17.2
7 | scipy == 1.5.2
8 | sklearn
9 | tqdm == 4.51.0
10 | transformers == 3.5.1
11 | utils == 1.0.1
12 | demjson
13 | torchtext
14 | omegaconf
15 | captum
16 | wget
17 | ftfy
18 | regex
--------------------------------------------------------------------------------
/Transparency/scripts/bc/sst/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=sst # [sst, imdb, 20News_sports, tweet, Anemia, Diabetes, AgNews]
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transparency/scripts/nli/snli/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=snli # [sst, imdb, 20News_sports, tweet, Anemia, Diabetes, AgNews]
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transparency/scripts/qa/cnn/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=cnn # [sst, imdb, 20News_sports, tweet, Anemia, Diabetes, AgNews]
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/small.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/small_fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/small_ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transparency/scripts/qa/babi_2/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=babi_2 # [sst, imdb, 20News_sports, tweet, Anemia, Diabetes, AgNews]
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transparency/scripts/qa/babi_3/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=babi_3 # [sst, imdb, 20News_sports, tweet, Anemia, Diabetes, AgNews]
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/small_fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/small_ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/small_ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transparency/scripts/bc/20news/cnn_dot.sh:
--------------------------------------------------------------------------------
1 | dataset_name=20News_sports # [sst, imdb, 20News_sports, tweet, Anemia, Diabetes, AgNews]
2 | output_path=./outputs
3 | encoder_type=cnn
4 | attention_type=dot
5 | gpu=2
6 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu ${gpu}
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/small_ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small_10_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/small_fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/small_fifty_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | annotations:
7 | train:
8 | - cc/defaults/annotations/train_small_50_pc.npy
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/coco/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | __all__ = ["COCOBuilder", "COCODataset", "MaskedCOCOBuilder", "MaskedCOCODataset"]
3 |
4 | from .builder import COCOBuilder
5 | from .dataset import COCODataset
6 | from .masked_builder import MaskedCOCOBuilder
7 | from .masked_dataset import MaskedCOCODataset
8 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/gqa/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | __all__ = ["GQABuilder", "GQADataset", "MaskedGQABuilder", "MaskedGQADataset"]
4 |
5 | from .builder import GQABuilder
6 | from .dataset import GQADataset
7 | from .masked_builder import MaskedGQABuilder
8 | from .masked_dataset import MaskedGQADataset
9 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/cnn_lstm/clevr/defaults.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | type: Adamax
3 | params:
4 | eps: 1.0e-08
5 | lr: 0.01
6 | weight_decay: 0
7 |
8 | evaluation:
9 | metrics:
10 | - accuracy
11 |
12 | training:
13 | batch_size: 128
14 | snapshot_interval: 6000
15 | early_stop:
16 | criteria: clevr/accuracy
17 | minimize: false
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/unimodal/bert.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./text.yaml
3 | - configs/datasets/hateful_memes/bert.yaml
4 | - configs/models/unimodal/bert.yaml
5 |
6 | model_config:
7 | unimodal_text:
8 | classifier:
9 | type: mlp
10 | params:
11 | in_dim: 768
12 | num_layers: 2
13 |
14 | training:
15 | batch_size: 128
16 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/cnn_lstm/clevr/defaults.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | type: Adamax
3 | params:
4 | eps: 1.0e-08
5 | lr: 0.01
6 | weight_decay: 0
7 |
8 | evaluation:
9 | metrics:
10 | - accuracy
11 |
12 | training:
13 | batch_size: 128
14 | snapshot_interval: 6000
15 | early_stop:
16 | criteria: clevr/accuracy
17 | minimize: false
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/hateful_memes/configs/unimodal/bert.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./text.yaml
3 | - configs/datasets/hateful_memes/bert.yaml
4 | - configs/models/unimodal/bert.yaml
5 |
6 | model_config:
7 | unimodal_text:
8 | classifier:
9 | type: mlp
10 | params:
11 | in_dim: 768
12 | num_layers: 2
13 |
14 | training:
15 | batch_size: 128
16 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/unimodal/configs/hateful_memes/bert.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./text.yaml
3 | - configs/datasets/hateful_memes/bert.yaml
4 | - configs/models/unimodal/bert.yaml
5 |
6 | model_config:
7 | unimodal_text:
8 | classifier:
9 | type: mlp
10 | params:
11 | in_dim: 768
12 | num_layers: 2
13 |
14 | training:
15 | batch_size: 128
16 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/unimodal/configs/hateful_memes/bert.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./text.yaml
3 | - configs/datasets/hateful_memes/bert.yaml
4 | - configs/models/unimodal/bert.yaml
5 |
6 | model_config:
7 | unimodal_text:
8 | classifier:
9 | type: mlp
10 | params:
11 | in_dim: 768
12 | num_layers: 2
13 |
14 | training:
15 | batch_size: 128
16 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/.stylelintrc.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | module.exports = {
9 | plugins: ['stylelint-copyright'],
10 | rules: {
11 | 'docusaurus/copyright-header': true,
12 | },
13 | };
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/datasets/hateful_memes/bert.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | hateful_memes:
3 | processors:
4 | text_processor:
5 | type: bert_tokenizer
6 | params:
7 | tokenizer_config:
8 | type: bert-base-uncased
9 | params:
10 | do_lower_case: true
11 | mask_probability: 0
12 | max_seq_length: 128
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tools/scripts/gqa/README.md:
--------------------------------------------------------------------------------
1 | # Converstion of GQA to VQA format
2 |
3 | * Download GQA datasets and store as format shown in conversion script
4 | * Download glove embeddings 300D file
5 | * Run the script from the root of the repo as by changing relevant paths:
6 |
7 | ```
8 | python tools/scripts/gqa/convert_gqa_to_vqa.py --gqa_dir --out_dir
9 | ```
10 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/mmf_bert/configs/masked_coco/pretrain_joint_vqa2.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - ../masked_vqa2/defaults.yaml
4 |
5 | model_config:
6 | mmf_bert:
7 | training_head_type: pretraining,vqa
8 |
9 | scheduler:
10 | type: warmup_linear
11 | params:
12 | num_warmup_steps: 3000
13 | num_training_steps: 33000
14 |
15 | training:
16 | max_updates: 34000
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/mmf_bert/configs/masked_coco/pretrain_joint_vqa2.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 | - ../masked_vqa2/defaults.yaml
4 |
5 | model_config:
6 | mmf_bert:
7 | training_head_type: pretraining,vqa
8 |
9 | scheduler:
10 | type: warmup_linear
11 | params:
12 | num_warmup_steps: 3000
13 | num_training_steps: 33000
14 |
15 | training:
16 | max_updates: 34000
17 |
--------------------------------------------------------------------------------
/Transparency/scripts/qa_all.sh:
--------------------------------------------------------------------------------
1 | output_path=./outputs/qa
2 | #qqp snli babi_1 babi_2 babi_3
3 | gpu=0
4 | for dataset_name in qqp snli babi_1
5 | do
6 | for attention_type in tanh dot
7 | do
8 | for encoder_type in cnn lstm
9 | do
10 | python train_and_run_experiments_qa.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu $gpu;
11 | done
12 | done
13 | done
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/.gitignore:
--------------------------------------------------------------------------------
1 | *.log
2 | *.err
3 | *.pyc
4 | *.swp
5 | .idea/*
6 | **/__pycache__/*
7 | **/output/*
8 | data/.DS_Store
9 | docs/build
10 | results/*
11 | build
12 | dist
13 | boards/*
14 | *.egg-info/
15 | checkpoint
16 | *.pth
17 | *.ckpt
18 | *_cache
19 | .cache
20 | data
21 | save
22 | *.eggs
23 | .eggs
24 | eggs/
25 | *.egg
26 | .DS_Store
27 | .vscode
28 | .vscode/*
29 | *.so
30 | *-checkpoint.ipynb
31 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/utils/torchscript.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from typing import Dict, Optional
4 |
5 | from torch import Tensor
6 |
7 |
8 | def getattr_torchscriptable(
9 | dictionary: Dict[str, Tensor], key: str, default: Optional[Tensor] = None
10 | ) -> Optional[Tensor]:
11 | if key in dictionary:
12 | return dictionary[key]
13 | else:
14 | return default
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pythia/configs/visual_genome/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | visual_genome:
3 | return_scene_graph: false
4 | return_objects: false
5 | return_relationships: false
6 | return_features_info: false
7 | no_unk: true
8 |
9 | evaluation:
10 | metrics:
11 | - vqa_accuracy
12 |
13 | training:
14 | early_stop:
15 | criteria: visual_genome/vqa_accuracy
16 | minimize: false
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pythia/configs/visual_genome/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | visual_genome:
3 | return_scene_graph: false
4 | return_objects: false
5 | return_relationships: false
6 | return_features_info: false
7 | no_unk: true
8 |
9 | evaluation:
10 | metrics:
11 | - vqa_accuracy
12 |
13 | training:
14 | early_stop:
15 | criteria: visual_genome/vqa_accuracy
16 | minimize: false
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/.gitignore:
--------------------------------------------------------------------------------
1 | # Dependencies
2 | /node_modules
3 |
4 | # Production
5 | /build
6 |
7 | # Generated files
8 | .docusaurus
9 | .cache-loader
10 |
11 | # Misc
12 | .DS_Store
13 | .env.local
14 | .env.development.local
15 | .env.test.local
16 | .env.production.local
17 |
18 | npm-debug.log*
19 | yarn-debug.log*
20 | yarn-error.log*
21 |
22 | # ESLint
23 | .eslintcache
24 |
25 | # Static Docs
26 | static/api
27 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/predict.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 |
4 | import sys
5 |
6 | from run import run
7 |
8 |
9 | def predict(opts=None):
10 | if opts is None:
11 | sys.argv.extend(["evaluation.predict=true"])
12 | else:
13 | opts.extend(["evaluation.predict=true"])
14 |
15 | run(predict=True)
16 |
17 |
18 | if __name__ == "__main__":
19 | predict()
20 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/data/vg_gqa_imgfeat/README.md:
--------------------------------------------------------------------------------
1 | # VG and GQA images
2 |
3 | - Please download the gqa images from [GQA official website](https://cs.stanford.edu/people/dorarad/gqa/download.html) (20 GB).
4 |
5 | - The Visual Genome images are contained in GQA dataset with the same image id as VG.
6 | Thus we only need to extract features for all GQA images.
7 |
8 | - GQA testing images (labeled as 'nXXXXXXX') are collected from MS COCO test sets.
9 |
10 |
11 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/mmbt/with_features.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | mmbt:
3 | model_data_dir: ${env.data_dir}
4 | direct_features_input: true
5 | modal_encoder:
6 | type: finetune_faster_rcnn_fpn_fc7
7 | params:
8 | in_dim: 2048
9 | bias_file: models/detectron.defaults/fc7_b.pkl
10 | weights_file: models/detectron.defaults/fc7_w.pkl
11 | model_data_dir: ${model_config.mmbt.model_data_dir}
12 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf_cli/predict.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # Copyright (c) Facebook, Inc. and its affiliates.
3 |
4 | import sys
5 |
6 | from VisualBERT.mmf_cli.run import run
7 |
8 |
9 | def predict(opts=None):
10 | if opts is None:
11 | sys.argv.extend(["evaluation.predict=true"])
12 | else:
13 | opts.extend(["evaluation.predict=true"])
14 |
15 | run(predict=True)
16 |
17 |
18 | if __name__ == "__main__":
19 | predict()
20 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_gqa/defaults.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | type: adam_w
3 | params:
4 | lr: 5e-5
5 | eps: 1e-8
6 |
7 | scheduler:
8 | type: warmup_linear
9 | params:
10 | num_warmup_steps: 2000
11 | num_training_steps: 88000
12 |
13 | training:
14 | batch_size: 480
15 | lr_scheduler: true
16 | # Don't forget to update schedule_attributes if you update this
17 | max_updates: 88000
18 | find_unused_parameters: true
19 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_gqa/defaults.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | type: adam_w
3 | params:
4 | lr: 5e-5
5 | eps: 1e-8
6 |
7 | scheduler:
8 | type: warmup_linear
9 | params:
10 | num_warmup_steps: 2000
11 | num_training_steps: 88000
12 |
13 | training:
14 | batch_size: 480
15 | lr_scheduler: true
16 | # Don't forget to update schedule_attributes if you update this
17 | max_updates: 88000
18 | find_unused_parameters: true
19 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/run/nlvr2_test.bash:
--------------------------------------------------------------------------------
1 | # The name of this experiment.
2 | name=$2
3 |
4 | # Save logs and models under snap/nlvr2; make backup.
5 | output=snap/nlvr2/$name
6 | mkdir -p $output/src
7 | cp -r src/* $output/src/
8 | cp $0 $output/run.bash
9 |
10 | # See Readme.md for option details.
11 | CUDA_VISIBLE_DEVICES=$1 PYTHONPATH=$PYTHONPATH:./src \
12 | python src/tasks/nlvr2.py \
13 | --tiny --llayers 9 --xlayers 5 --rlayers 5 \
14 | --tqdm --output $output ${@:3}
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/full_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 | annotations:
10 | train:
11 | - cc/coco_generated/annotations/train_all.npy
12 | val:
13 | - cc/coco_generated/annotations/val.npy
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/half_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 | annotations:
10 | train:
11 | - cc/coco_generated/annotations/train_mid.npy
12 | val:
13 | - cc/coco_generated/annotations/val.npy
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/small_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 | annotations:
10 | train:
11 | - cc/coco_generated/annotations/train_small.npy
12 | val:
13 | - cc/coco_generated/annotations/val.npy
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_vqa2/ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_vqa2/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | return_features_info: true
7 | use_images: false
8 | use_features: true
9 | features:
10 | train:
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014_len_coco_10_pc.npy
15 |
--------------------------------------------------------------------------------
/Transparency/scripts/bc_all.sh:
--------------------------------------------------------------------------------
1 | # sst yelp 20News_sports AgNews
2 | # tanh dot
3 | # cnn lstm
4 | gpu=0
5 | output_path=./outputs/mc
6 | echo $output_path
7 |
8 | for dataset_name in sst yelp 20News_sports AgNews
9 | do
10 | for attention_type in tanh dot
11 | do
12 | for encoder_type in cnn lstm
13 | do
14 | python train_and_run_experiments_bc.py --dataset ${dataset_name} --data_dir . --output_dir ${output_path} --attention ${attention_type} --encoder ${encoder_type} --gpu $gpu;
15 | done
16 | done
17 | done
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/full_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 | annotations:
10 | train:
11 | - cc/coco_generated/annotations/train_all.npy
12 | val:
13 | - cc/coco_generated/annotations/val.npy
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/half_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 | annotations:
10 | train:
11 | - cc/coco_generated/annotations/train_mid.npy
12 | val:
13 | - cc/coco_generated/annotations/val.npy
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/small_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 | annotations:
10 | train:
11 | - cc/coco_generated/annotations/train_small.npy
12 | val:
13 | - cc/coco_generated/annotations/val.npy
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_vqa2/ten_pc.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/vilbert/configs/masked_vqa2/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | return_features_info: true
7 | use_images: false
8 | use_features: true
9 | features:
10 | train:
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014_len_coco_10_pc.npy
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/full_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 | annotations:
10 | train:
11 | - cc/coco_generated/annotations/train_all.npy
12 | val:
13 | - cc/coco_generated/annotations/val.npy
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/full_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 | annotations:
10 | train:
11 | - cc/coco_generated/annotations/train_all.npy
12 | val:
13 | - cc/coco_generated/annotations/val.npy
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pythia/configs/vqa2/debug.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/trainval2014.lmdb
11 | val:
12 | - coco/defaults/features/trainval2014.lmdb
13 | annotations:
14 | train:
15 | - vqa2/defaults/annotations/imdb_debug.npy
16 | val:
17 | - vqa2/defaults/annotations/imdb_debug.npy
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/datasets/textvqa/with_resnet.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | textvqa:
3 | features:
4 | train:
5 | - textvqa/defaults/features/open_images/detectron.lmdb,textvqa/defaults/features/open_images/resnet152.lmdb
6 | val:
7 | - textvqa/defaults/features/open_images/detectron.lmdb,textvqa/defaults/features/open_images/resnet152.lmdb
8 | test:
9 | - textvqa/defaults/features/open_images/detectron.lmdb,textvqa/defaults/features/open_images/resnet152.lmdb
10 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/lxmert/configs/visual_genome/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./masked.yaml
3 |
4 | optimizer:
5 | type: adam_w
6 | params:
7 | lr: 1e-4
8 | eps: 1e-8
9 |
10 | scheduler:
11 | type: warmup_linear
12 | params:
13 | num_warmup_steps: 1000
14 | num_training_steps: ${training.max_updates}
15 |
16 | training:
17 | batch_size: 480
18 | lr_scheduler: true
19 | # Don't forget to update schedule_attributes if you update this
20 | max_updates: 11000
21 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pythia/configs/vqa2/debug.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/trainval2014.lmdb
11 | val:
12 | - coco/defaults/features/trainval2014.lmdb
13 | annotations:
14 | train:
15 | - vqa2/defaults/annotations/imdb_debug.npy
16 | val:
17 | - vqa2/defaults/annotations/imdb_debug.npy
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/lxmert/configs/visual_genome/pretrain.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./masked.yaml
3 |
4 | optimizer:
5 | type: adam_w
6 | params:
7 | lr: 1e-4
8 | eps: 1e-8
9 |
10 | scheduler:
11 | type: warmup_linear
12 | params:
13 | num_warmup_steps: 1000
14 | num_training_steps: ${training.max_updates}
15 |
16 | training:
17 | batch_size: 480
18 | lr_scheduler: true
19 | # Don't forget to update schedule_attributes if you update this
20 | max_updates: 11000
21 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/half_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 |
10 | annotations:
11 | train:
12 | - cc/coco_generated/annotations/train_mid.npy
13 | val:
14 | - cc/coco_generated/annotations/val.npy
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/small_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 |
10 | annotations:
11 | train:
12 | - cc/coco_generated/annotations/train_small.npy
13 | val:
14 | - cc/coco_generated/annotations/val.npy
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/half_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 |
10 | annotations:
11 | train:
12 | - cc/coco_generated/annotations/train_mid.npy
13 | val:
14 | - cc/coco_generated/annotations/val.npy
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/small_coco_generated.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ../projects/visual_bert/configs/masked_conceptual_captions/pretrain.yaml
3 |
4 | dataset_config:
5 | masked_conceptual_captions:
6 | zoo_requirements:
7 | - cc.coco_generated
8 | - cc.defaults
9 |
10 | annotations:
11 | train:
12 | - cc/coco_generated/annotations/train_small.npy
13 | val:
14 | - cc/coco_generated/annotations/val.npy
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/butd/configs/textcaps/eval_pretrained_coco_model.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./beam_search.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | processors:
7 | text_processor:
8 | params:
9 | vocab:
10 | vocab_file: textcaps/defaults/extras/vocabs/coco_vocabulary_captioning_thresh5.txt
11 | caption_processor:
12 | params:
13 | vocab:
14 | vocab_file: textcaps/defaults/extras/vocabs/coco_vocabulary_captioning_thresh5.txt
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/unimodal/with_features.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | unimodal_image:
3 | model_data_dir: ${env.data_dir}
4 | direct_features_input: true
5 | modal_encoder:
6 | type: finetune_faster_rcnn_fpn_fc7
7 | params:
8 | in_dim: 2048
9 | bias_file: models/detectron.defaults/fc7_b.pkl
10 | weights_file: models/detectron.defaults/fc7_w.pkl
11 | model_data_dir: ${model_config.unimodal_image.model_data_dir}
12 | num_output_features: 1
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/databases/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | import mmf.datasets.databases.readers # noqa
3 |
4 | from .annotation_database import AnnotationDatabase
5 | from .features_database import FeaturesDatabase
6 | from .image_database import ImageDatabase
7 | from .scene_graph_database import SceneGraphDatabase
8 |
9 |
10 | __all__ = [
11 | "AnnotationDatabase",
12 | "FeaturesDatabase",
13 | "ImageDatabase",
14 | "SceneGraphDatabase",
15 | ]
16 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/butd/configs/textcaps/eval_pretrained_coco_model.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./beam_search.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | processors:
7 | text_processor:
8 | params:
9 | vocab:
10 | vocab_file: textcaps/defaults/extras/vocabs/coco_vocabulary_captioning_thresh5.txt
11 | caption_processor:
12 | params:
13 | vocab:
14 | vocab_file: textcaps/defaults/extras/vocabs/coco_vocabulary_captioning_thresh5.txt
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/butd/configs/textcaps/beam_search.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | zoo_requirements:
7 | - textvqa.defaults
8 | - textcaps.defaults
9 | annotations:
10 | val:
11 | - textcaps/defaults/annotations/imdb_val_filtered_by_image_id.npy
12 |
13 | model_config:
14 | butd: &butd
15 | inference:
16 | type: beam_search
17 | params:
18 | beam_length: 5
19 |
20 | training:
21 | batch_size: 1
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/run/gqa_test.bash:
--------------------------------------------------------------------------------
1 | # The name of this experiment.
2 | name=$2
3 |
4 | # Save logs and models under snap/gqa; make backup.
5 | output=snap/gqa/$name
6 | mkdir -p $output/src
7 | cp -r src/* $output/src/
8 | cp $0 $output/run.bash
9 |
10 | # See Readme.md for option details.
11 | CUDA_VISIBLE_DEVICES=$1 PYTHONPATH=$PYTHONPATH:./src \
12 | python src/tasks/gqa.py \
13 | --tiny --train train --valid "" \
14 | --llayers 9 --xlayers 5 --rlayers 5 \
15 | --tqdm --output $output ${@:3}
16 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/butd/configs/textcaps/beam_search.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | zoo_requirements:
7 | - textvqa.defaults
8 | - textcaps.defaults
9 | annotations:
10 | val:
11 | - textcaps/defaults/annotations/imdb_val_filtered_by_image_id.npy
12 |
13 | model_config:
14 | butd: &butd
15 | inference:
16 | type: beam_search
17 | params:
18 | beam_length: 5
19 |
20 | training:
21 | batch_size: 1
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/m4c_captioner/configs/butd/textcaps/eval_pretrained_coco_model.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./beam_search.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | processors:
7 | text_processor:
8 | params:
9 | vocab:
10 | vocab_file: textcaps/defaults/extras/vocabs/coco_vocabulary_captioning_thresh5.txt
11 | caption_processor:
12 | params:
13 | vocab:
14 | vocab_file: textcaps/defaults/extras/vocabs/coco_vocabulary_captioning_thresh5.txt
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/vqa2/train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/coco_trainval2014.lmdb
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014.npy
15 | - vqa2/defaults/annotations/imdb_val2014.npy
16 | return_features_info: true
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/m4c_captioner/configs/butd/textcaps/eval_pretrained_coco_model.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./beam_search.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | processors:
7 | text_processor:
8 | params:
9 | vocab:
10 | vocab_file: textcaps/defaults/extras/vocabs/coco_vocabulary_captioning_thresh5.txt
11 | caption_processor:
12 | params:
13 | vocab:
14 | vocab_file: textcaps/defaults/extras/vocabs/coco_vocabulary_captioning_thresh5.txt
15 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/vqa2/train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/coco_trainval2014.lmdb
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014.npy
15 | - vqa2/defaults/annotations/imdb_val2014.npy
16 | return_features_info: true
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/m4c_captioner/configs/butd/textcaps/beam_search.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | zoo_requirements:
7 | - textvqa.defaults
8 | - textcaps.defaults
9 | annotations:
10 | val:
11 | - textcaps/defaults/annotations/imdb_val_filtered_by_image_id.npy
12 |
13 | model_config:
14 | butd: &butd
15 | inference:
16 | type: beam_search
17 | params:
18 | beam_length: 5
19 |
20 | training:
21 | batch_size: 1
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_coco:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/tests/utils/test_distributed.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | import unittest
3 |
4 | import mmf.utils.distributed as distributed
5 |
6 |
7 | class TestUtilsDistributed(unittest.TestCase):
8 | def test_object_byte_tensor_conversion(self):
9 | test_obj = [1, "2", {3: 4}, [5]]
10 | test_obj_bytes = distributed.object_to_byte_tensor(test_obj)
11 | test_obj_dec = distributed.byte_tensor_to_object(test_obj_bytes)
12 | self.assertEqual(test_obj_dec, test_obj)
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/conceptual_captions/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | __all__ = [
3 | "ConceptualCaptionsBuilder",
4 | "ConceptualCaptionsDataset",
5 | "MaskedConceptualCaptionsBuilder",
6 | "MaskedConceptualCaptionsDataset",
7 | ]
8 |
9 | from .builder import ConceptualCaptionsBuilder
10 | from .dataset import ConceptualCaptionsDataset
11 | from .masked_builder import MaskedConceptualCaptionsBuilder
12 | from .masked_dataset import MaskedConceptualCaptionsDataset
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/m4c_captioner/configs/butd/textcaps/beam_search.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | zoo_requirements:
7 | - textvqa.defaults
8 | - textcaps.defaults
9 | annotations:
10 | val:
11 | - textcaps/defaults/annotations/imdb_val_filtered_by_image_id.npy
12 |
13 | model_config:
14 | butd: &butd
15 | inference:
16 | type: beam_search
17 | params:
18 | beam_length: 5
19 |
20 | training:
21 | batch_size: 1
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_coco:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/mmf_bert/configs/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_coco:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/mmf_bert/configs/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_coco:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/scripts/visualBert/gqa/gqa_finetune.sh:
--------------------------------------------------------------------------------
1 | # Since VisualBert-GQA is not provided now, we need first finetune the pretrained model
2 |
3 | gpu=2,3
4 | task=gqa
5 | CUDA_VISIBLE_DEVICES=$gpu PYTHONPATH=`pwd` python VisualBERT/run.py \
6 | --task=$task \
7 | config=projects/visual_bert/configs/gqa/defaults.yaml \
8 | model=visual_bert \
9 | dataset=gqa \
10 | run_type=train_val \
11 | checkpoint.resume=True \
12 | env.data_dir=./env/data_dir \
13 | training.num_workers=2 \
14 | training.batch_size=8 \
15 | training.trainer=mmf \
16 | training.seed=1234
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/lxmert/configs/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/lxmert/defaults.yaml
3 |
4 | optimizer:
5 | type: adam_w
6 | params:
7 | lr: 1e-4
8 | eps: 1e-8
9 |
10 | training:
11 | seed: 9595
12 | batch_size: 4
13 | lr_scheduler: false
14 | find_unused_parameters: true
15 | use_warmup: true
16 | warmup_factor: 0.05
17 | warmup_iterations: 1000
18 | max_epochs: 20
19 | max_updates: null
20 | pin_memory: true
21 |
22 |
23 | evaluation:
24 | metrics:
25 | - vqa_accuracy
26 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/lxmert/configs/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/lxmert/defaults.yaml
3 |
4 | optimizer:
5 | type: adam_w
6 | params:
7 | lr: 1e-4
8 | eps: 1e-8
9 |
10 | training:
11 | seed: 9595
12 | batch_size: 4
13 | lr_scheduler: false
14 | find_unused_parameters: true
15 | use_warmup: true
16 | warmup_factor: 0.05
17 | warmup_iterations: 1000
18 | max_epochs: 20
19 | max_updates: null
20 | pin_memory: true
21 |
22 |
23 | evaluation:
24 | metrics:
25 | - vqa_accuracy
26 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/lxmert/configs/visual_genome/masked.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_visual_genome:
3 | use_features: true
4 | add_answer: true
5 | max_features: 36
6 | features:
7 | train:
8 | - visual_genome/detectron_fix_100/fc6/,visual_genome/resnet152/
9 | - visual_genome/detectron_fix_100/fc6/,visual_genome/resnet152/
10 | annotations:
11 | train:
12 | - imdb/visual_genome/vg_question_answers.jsonl
13 | - imdb/visual_genome/vg_question_answers_placeholder.jsonl
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/lxmert/configs/visual_genome/masked.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_visual_genome:
3 | use_features: true
4 | add_answer: true
5 | max_features: 36
6 | features:
7 | train:
8 | - visual_genome/detectron_fix_100/fc6/,visual_genome/resnet152/
9 | - visual_genome/detectron_fix_100/fc6/,visual_genome/resnet152/
10 | annotations:
11 | train:
12 | - imdb/visual_genome/vg_question_answers.jsonl
13 | - imdb/visual_genome/vg_question_answers_placeholder.jsonl
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_coco:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_coco:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_vqa2/pretrain_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/coco_trainval2014.lmdb
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014.npy
15 | - vqa2/defaults/annotations/imdb_val2014.npy
16 | return_features_info: true
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | # isort:skip_file
3 | # flake8: noqa: F401
4 |
5 | from VisualBERT.mmf import utils, common, modules, datasets, models
6 | from VisualBERT.mmf.modules import losses, schedulers, optimizers, metrics
7 | from VisualBERT.mmf.version import __version__
8 |
9 |
10 | __all__ = [
11 | "utils",
12 | "common",
13 | "modules",
14 | "datasets",
15 | "models",
16 | "losses",
17 | "schedulers",
18 | "optimizers",
19 | "metrics",
20 | ]
21 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_vqa2/pretrain_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/coco_trainval2014.lmdb
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014.npy
15 | - vqa2/defaults/annotations/imdb_val2014.npy
16 | return_features_info: true
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/full_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/coco_trainval2014.lmdb
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014.npy
15 | - vqa2/defaults/annotations/imdb_val2014.npy
16 | return_features_info: true
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/full_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/coco_trainval2014.lmdb
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014.npy
15 | - vqa2/defaults/annotations/imdb_val2014.npy
16 | return_features_info: true
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_coco/pretrain_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | return_features_info: true
7 | use_images: false
8 | use_features: true
9 | features:
10 | train:
11 | - coco/defaults/features/trainval2014.lmdb
12 | - coco/defaults/features/trainval2014.lmdb
13 | annotations:
14 | train:
15 | - coco/defaults/annotations/imdb_karpathy_train_by_image.npy
16 | - coco/defaults/annotations/imdb_karpathy_val_by_image.npy
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/ocrvqa/dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.datasets.builders.textvqa.dataset import TextVQADataset
3 |
4 |
5 | class OCRVQADataset(TextVQADataset):
6 | def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
7 | super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs)
8 | self.dataset_name = "ocrvqa"
9 |
10 | def preprocess_sample_info(self, sample_info):
11 | # Do nothing in this case
12 | return sample_info
13 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_coco/pretrain_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | return_features_info: true
7 | use_images: false
8 | use_features: true
9 | features:
10 | train:
11 | - coco/defaults/features/trainval2014.lmdb
12 | - coco/defaults/features/trainval2014.lmdb
13 | annotations:
14 | train:
15 | - coco/defaults/annotations/imdb_karpathy_train_by_image.npy
16 | - coco/defaults/annotations/imdb_karpathy_val_by_image.npy
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/mmbt/configs/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/mmbt/pretrain.yaml
3 | - configs/models/mmbt/with_features.yaml
4 |
5 | scheduler:
6 | type: warmup_linear
7 | params:
8 | num_warmup_steps: 2000
9 | num_training_steps: ${training.max_updates}
10 |
11 | optimizer:
12 | type: adam_w
13 | params:
14 | lr: 5e-5
15 | eps: 1e-8
16 |
17 | training:
18 | batch_size: 128
19 | lr_scheduler: true
20 | max_updates: 22000
21 |
22 | checkpoint:
23 | pretrained_state_mapping:
24 | bert: bert
25 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/masked_coco/pretrain_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | return_features_info: true
7 | use_images: false
8 | use_features: true
9 | features:
10 | train:
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | - coco/defaults/features/coco_trainval2014.lmdb
13 | annotations:
14 | train:
15 | - coco/defaults/annotations/imdb_karpathy_train_by_image.npy
16 | - coco/defaults/annotations/imdb_karpathy_val_by_image.npy
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/unimodal/bert.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | unimodal_text:
3 | bert_model_name: bert-base-uncased
4 | text_hidden_size: 768
5 | num_labels: 2
6 | text_encoder:
7 | type: transformer
8 | params:
9 | bert_model_name: ${model_config.unimodal_text.bert_model_name}
10 | hidden_size: 768
11 | num_hidden_layers: 12
12 | num_attention_heads: 12
13 | output_attentions: false
14 | output_hidden_states: false
15 |
16 | classifier:
17 | params:
18 | in_dim: 768
19 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/mmbt/configs/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/mmbt/pretrain.yaml
3 | - configs/models/mmbt/with_features.yaml
4 |
5 | scheduler:
6 | type: warmup_linear
7 | params:
8 | num_warmup_steps: 2000
9 | num_training_steps: ${training.max_updates}
10 |
11 | optimizer:
12 | type: adam_w
13 | params:
14 | lr: 5e-5
15 | eps: 1e-8
16 |
17 | training:
18 | batch_size: 128
19 | lr_scheduler: true
20 | max_updates: 22000
21 |
22 | checkpoint:
23 | pretrained_state_mapping:
24 | bert: bert
25 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/masked_coco/pretrain_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | return_features_info: true
7 | use_images: false
8 | use_features: true
9 | features:
10 | train:
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | - coco/defaults/features/coco_trainval2014.lmdb
13 | annotations:
14 | train:
15 | - coco/defaults/annotations/imdb_karpathy_train_by_image.npy
16 | - coco/defaults/annotations/imdb_karpathy_val_by_image.npy
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/run/vqa_test.bash:
--------------------------------------------------------------------------------
1 | # The name of this experiment.
2 | name=$2
3 |
4 | # Save logs and models under snap/vqa; make backup.
5 | output=snap/vqa/$name
6 | mkdir -p $output/src
7 | cp -r src/* $output/src/
8 | cp $0 $output/run.bash
9 |
10 | # See Readme.md for option details.
11 | CUDA_VISIBLE_DEVICES=$1 PYTHONPATH=$PYTHONPATH:./src \
12 | python src/tasks/vqa.py \
13 | --tiny --train train --valid "" \
14 | --llayers 9 --xlayers 5 --rlayers 5 \
15 | --batchSize 32 --optim bert --lr 5e-5 --epochs 4 \
16 | --tqdm --output $output ${@:3}
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from .base_dataset import BaseDataset
3 | from .base_dataset_builder import BaseDatasetBuilder
4 | from .concat_dataset import ConcatDataset
5 | from .mmf_dataset import MMFDataset
6 | from .mmf_dataset_builder import MMFDatasetBuilder
7 | from .multi_dataset_loader import MultiDatasetLoader
8 |
9 |
10 | __all__ = [
11 | "BaseDataset",
12 | "BaseDatasetBuilder",
13 | "ConcatDataset",
14 | "MultiDatasetLoader",
15 | "MMFDataset",
16 | "MMFDatasetBuilder",
17 | ]
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_coco/full_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | return_features_info: true
7 | use_images: false
8 | use_features: true
9 | features:
10 | train:
11 | - coco/defaults/features/trainval2014.lmdb
12 | - coco/defaults/features/trainval2014.lmdb
13 | annotations:
14 | train:
15 | - coco/defaults/annotations/imdb_karpathy_train_by_image.npy
16 | - coco/defaults/annotations/imdb_karpathy_val_by_image.npy
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_coco/full_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_coco:
6 | return_features_info: true
7 | use_images: false
8 | use_features: true
9 | features:
10 | train:
11 | - coco/defaults/features/trainval2014.lmdb
12 | - coco/defaults/features/trainval2014.lmdb
13 | annotations:
14 | train:
15 | - coco/defaults/annotations/imdb_karpathy_train_by_image.npy
16 | - coco/defaults/annotations/imdb_karpathy_val_by_image.npy
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/masked_vqa2/pretrain_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/coco_trainval2014.lmdb
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014.npy
15 | - vqa2/defaults/annotations/imdb_val2014.npy
16 | return_features_info: true
17 | use_image_feature_masks: true
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/masked_vqa2/pretrain_train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | masked_vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/defaults/features/coco_trainval2014.lmdb
11 | - coco/defaults/features/coco_trainval2014.lmdb
12 | annotations:
13 | train:
14 | - vqa2/defaults/annotations/imdb_train2014.npy
15 | - vqa2/defaults/annotations/imdb_val2014.npy
16 | return_features_info: true
17 | use_image_feature_masks: true
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_conceptual_captions:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 | find_unused_parameters: true
23 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_conceptual_captions:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 | find_unused_parameters: true
23 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/cnn_lstm/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | cnn_lstm:
3 | losses:
4 | - type: logit_bce
5 | text_embedding:
6 | embedding_dim: 20
7 | lstm:
8 | input_size: 20
9 | hidden_size: 50
10 | bidirectional: true
11 | batch_first: true
12 | cnn:
13 | layers:
14 | input_dims: [3, 64, 128, 128, 64, 64]
15 | output_dims: [64, 128, 128, 64, 64, 10]
16 | kernel_sizes: [7, 5, 5, 5, 5, 1]
17 | classifier:
18 | type: mlp
19 | params:
20 | in_dim: 450
21 | out_dim: 2
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/databases/scene_graph_database.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.datasets.databases.annotation_database import AnnotationDatabase
3 |
4 |
5 | class SceneGraphDatabase(AnnotationDatabase):
6 | def __init__(self, config, scene_graph_path, *args, **kwargs):
7 | super().__init__(config, scene_graph_path, *args, **kwargs)
8 | self.data_dict = {}
9 | for item in self.data:
10 | self.data_dict[item["image_id"]] = item
11 |
12 | def __getitem__(self, idx):
13 | return self.data_dict[idx]
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pythia/configs/vqa2/resnet_only.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | vqa2:
6 | use_images: false
7 | use_features: true
8 | zoo_requirements:
9 | - coco.resnet152
10 | - vqa2.defaults
11 | features:
12 | train:
13 | - coco/resnet152/features/trainval2014.lmdb
14 | val:
15 | - coco/resnet152/features/trainval2014.lmdb
16 | test:
17 | - coco/resnet152/features/test2015.lmdb
18 | model_config:
19 | pythia:
20 | image_feature_encodings:
21 | - type: default
22 | params: {}
23 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pythia/configs/vqa2/resnet_only.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | vqa2:
6 | use_images: false
7 | use_features: true
8 | zoo_requirements:
9 | - coco.resnet152
10 | - vqa2.defaults
11 | features:
12 | train:
13 | - coco/resnet152/features/trainval2014.lmdb
14 | val:
15 | - coco/resnet152/features/trainval2014.lmdb
16 | test:
17 | - coco/resnet152/features/test2015.lmdb
18 | model_config:
19 | pythia:
20 | image_feature_encodings:
21 | - type: default
22 | params: {}
23 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_conceptual_captions:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 | find_unused_parameters: true
23 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_conceptual_captions:
3 | return_features_info: true
4 |
5 | optimizer:
6 | type: adam_w
7 | params:
8 | lr: 5e-5
9 | eps: 1e-8
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 1000
15 | num_training_steps: 11000
16 |
17 | training:
18 | batch_size: 480
19 | lr_scheduler: true
20 | # Don't forget to update schedule_attributes if you update this
21 | max_updates: 11000
22 | find_unused_parameters: true
23 |
--------------------------------------------------------------------------------
/Transparency/README.md:
--------------------------------------------------------------------------------
1 | ## Reproduction of Results
2 |
3 | 1. Pls follow the original [repo](https://github.com/akashkm99/Interpretable-Attention) to prepare required data and environments. Note that all datasets should be firstly preprocessed by runing .ipynb files in *preprocess folder*.
4 |
5 | 2. Run the shell script in [scripts folder](https://github.com/BierOne/Attention-Faithfulness/tree/master/Transparency/scripts) to **train the model**. For example,
6 | ```
7 | bash ./scripts/bc_all.sh
8 | ```
9 |
10 | 3. Run the notebook (*nlp_baseline_check_bc.ipynb* or *nlp_baseline_check_qa.ipynb*) to evaluate explanations and check results.
11 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/trainers/core/profiling.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | import logging
4 | from abc import ABC
5 | from typing import Type
6 |
7 | from VisualBERT.mmf.utils.timer import Timer
8 |
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | class TrainerProfilingMixin(ABC):
14 | profiler: Type[Timer] = Timer()
15 |
16 | def profile(self, text: str) -> None:
17 | if self.training_config.logger_level != "debug":
18 | return
19 | logging.debug(f"{text}: {self.profiler.get_time_since_start()}")
20 | self.profiler.reset()
21 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/visual_bert/configs/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_vqa2:
3 | annotations:
4 | train:
5 | - vqa2/defaults/annotations/imdb_train2014.npy
6 | return_features_info: true
7 |
8 | optimizer:
9 | type: adam_w
10 | params:
11 | lr: 5e-5
12 | eps: 1e-8
13 |
14 | scheduler:
15 | type: warmup_linear
16 | params:
17 | num_warmup_steps: 1000
18 | num_training_steps: 11000
19 |
20 | training:
21 | batch_size: 480
22 | lr_scheduler: true
23 | # Don't forget to update schedule_attributes if you update this
24 | max_updates: 11000
25 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/visual_bert/configs/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_vqa2:
3 | annotations:
4 | train:
5 | - vqa2/defaults/annotations/imdb_train2014.npy
6 | return_features_info: true
7 |
8 | optimizer:
9 | type: adam_w
10 | params:
11 | lr: 5e-5
12 | eps: 1e-8
13 |
14 | scheduler:
15 | type: warmup_linear
16 | params:
17 | num_warmup_steps: 1000
18 | num_training_steps: 11000
19 |
20 | training:
21 | batch_size: 480
22 | lr_scheduler: true
23 | # Don't forget to update schedule_attributes if you update this
24 | max_updates: 11000
25 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/run/gqa_finetune.bash:
--------------------------------------------------------------------------------
1 | # The name of this experiment.
2 | name=$2
3 |
4 | # Save logs and models under snap/gqa; make backup.
5 | output=snap/gqa/$name
6 | mkdir -p $output/src
7 | cp -r src/* $output/src/
8 | cp $0 $output/run.bash
9 |
10 | # See Readme.md for option details.
11 | CUDA_VISIBLE_DEVICES=$1 PYTHONPATH=$PYTHONPATH:./src \
12 | python src/tasks/gqa.py \
13 | --train train,valid --valid testdev \
14 | --llayers 9 --xlayers 5 --rlayers 5 \
15 | --loadLXMERTQA snap/pretrained/model \
16 | --batchSize 32 --optim bert --lr 1e-5 --epochs 4 \
17 | --tqdm --output $output ${@:3}
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/run/vqa_finetune.bash:
--------------------------------------------------------------------------------
1 | # The name of this experiment.
2 | name=$2
3 |
4 | # Save logs and models under snap/vqa; make backup.
5 | output=snap/vqa/$name
6 | mkdir -p $output/src
7 | cp -r src/* $output/src/
8 | cp $0 $output/run.bash
9 |
10 | # See Readme.md for option details.
11 | CUDA_VISIBLE_DEVICES=$1 PYTHONPATH=$PYTHONPATH:./src \
12 | python src/tasks/vqa.py \
13 | --train train,nominival --valid minival \
14 | --llayers 9 --xlayers 5 --rlayers 5 \
15 | --loadLXMERTQA snap/pretrained/model \
16 | --batchSize 32 --optim bert --lr 5e-5 --epochs 4 \
17 | --tqdm --output $output ${@:3}
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/run/nlvr2_finetune.bash:
--------------------------------------------------------------------------------
1 | # The name of this experiment.
2 | name=$2
3 |
4 | # Save logs and models under snap/nlvr2; Make backup.
5 | output=snap/nlvr2/$name
6 | mkdir -p $output/src
7 | cp -r src/* $output/src/
8 | cp $0 $output/run.bash
9 |
10 | # See run/Readme.md for option details.
11 | CUDA_VISIBLE_DEVICES=$1 PYTHONPATH=$PYTHONPATH:./src \
12 | python src/tasks/nlvr2.py \
13 | --train train --valid valid \
14 | --llayers 9 --xlayers 5 --rlayers 5 \
15 | --loadLXMERT snap/pretrained/model \
16 | --batchSize 32 --optim bert --lr 5e-5 --epochs 4 \
17 | --tqdm --output $output ${@:3}
18 |
19 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/vqa2/ocr_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.common.registry import Registry
3 | from VisualBERT.mmf.datasets.builders.vizwiz import VizWizBuilder
4 | from VisualBERT.mmf.datasets.builders.vqa2.ocr_dataset import VQA2OCRDataset
5 |
6 |
7 | @Registry.register_builder("vqa2_ocr")
8 | class TextVQABuilder(VizWizBuilder):
9 | def __init__(self):
10 | super().__init__()
11 | self.dataset_name = "VQA2_OCR"
12 | self.set_dataset_class(VQA2OCRDataset)
13 |
14 | @classmethod
15 | def config_path(self):
16 | return None
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_vqa2:
3 | annotations:
4 | train:
5 | - vqa2/defaults/annotations/imdb_train2014.npy
6 | return_features_info: true
7 |
8 | optimizer:
9 | type: adam_w
10 | params:
11 | lr: 5e-5
12 | eps: 1e-8
13 |
14 | scheduler:
15 | type: warmup_linear
16 | params:
17 | num_warmup_steps: 1000
18 | num_training_steps: 11000
19 |
20 | training:
21 | batch_size: 480
22 | lr_scheduler: true
23 | # Don't forget to update schedule_attributes if you update this
24 | max_updates: 11000
25 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/visual_bert/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | masked_vqa2:
3 | annotations:
4 | train:
5 | - vqa2/defaults/annotations/imdb_train2014.npy
6 | return_features_info: true
7 |
8 | optimizer:
9 | type: adam_w
10 | params:
11 | lr: 5e-5
12 | eps: 1e-8
13 |
14 | scheduler:
15 | type: warmup_linear
16 | params:
17 | num_warmup_steps: 1000
18 | num_training_steps: 11000
19 |
20 | training:
21 | batch_size: 480
22 | lr_scheduler: true
23 | # Don't forget to update schedule_attributes if you update this
24 | max_updates: 11000
25 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/visual_bert/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | visual_bert:
3 | bert_model_name: bert-base-uncased
4 | training_head_type: pretraining
5 | visual_embedding_dim: 2048
6 | special_visual_initialize: true
7 | embedding_strategy: plain
8 | bypass_transformer: false
9 | output_attentions: false
10 | output_hidden_states: false
11 | random_initialize: false
12 | freeze_base: false
13 | finetune_lr_multiplier: 1
14 | # Default points to BERT pooler strategy which is to take
15 | # representation of CLS token after passing it through a dense layer
16 | pooler_strategy: default
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/utils/transform.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from torch import Tensor
4 |
5 |
6 | def transform_to_batch_sequence(tensor: Tensor) -> Tensor:
7 | if len(tensor.size()) == 2:
8 | return tensor
9 | else:
10 | assert len(tensor.size()) == 3
11 | return tensor.contiguous().view(-1, tensor.size(-1))
12 |
13 |
14 | def transform_to_batch_sequence_dim(tensor: Tensor) -> Tensor:
15 | if len(tensor.size()) == 3:
16 | return tensor
17 | else:
18 | assert len(tensor.size()) == 4
19 | return tensor.contiguous().view(-1, tensor.size(-2), tensor.size(-1))
20 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/scripts/lxmert/vqa/save_exp.sh:
--------------------------------------------------------------------------------
1 |
2 | # All exp methods: ours_no_lrp transformer_att partial_lrp raw_attn rollout attn_grad attn_norm inputGrad ig rand
3 |
4 | samples=10000
5 | for text in False
6 | do
7 | for method_name in ours_no_lrp transformer_att partial_lrp raw_attn rollout attn_grad attn_norm inputGrad ig
8 | do
9 | CUDA_VISIBLE_DEVICES=2 PYTHONPATH=`pwd` python lxmert/lxmert/save_exp.py \
10 | --COCO_path /home/lyb/vqa_data/mscoco/val2014/ \
11 | --method $method_name \
12 | --is-text-pert $text \
13 | --is-positive-pert True \
14 | --load_cached_exp=False \
15 | --num-samples=$samples \
16 | --load_raw_img False
17 | done
18 | done
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/stvqa/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.common.registry import Registry
3 | from VisualBERT.mmf.datasets.builders.stvqa.dataset import STVQADataset
4 | from VisualBERT.mmf.datasets.builders.textvqa.builder import TextVQABuilder
5 |
6 |
7 | @Registry.register_builder("stvqa")
8 | class STVQABuilder(TextVQABuilder):
9 | def __init__(self):
10 | super().__init__()
11 | self.dataset_name = "stvqa"
12 | self.set_dataset_class(STVQADataset)
13 |
14 | @classmethod
15 | def config_path(cls):
16 | return "configs/datasets/stvqa/defaults.yaml"
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/m4c_captioner/configs/m4c_captioner/textcaps/with_caffe2_feat.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | zoo_requirements:
7 | - textvqa.caffe2
8 | - textvqa.ocr_en
9 | - textcaps.defaults
10 | features:
11 | train:
12 | - textvqa/caffe2/features/open_images/detectron.lmdb,textvqa/ocr_en/features/ocr_en_frcn_features.lmdb
13 | val:
14 | - textvqa/caffe2/features/open_images/detectron.lmdb,textvqa/ocr_en/features/ocr_en_frcn_features.lmdb
15 | test:
16 | - textvqa/caffe2/features/open_images/detectron.lmdb,textvqa/ocr_en/features/ocr_en_frcn_features.lmdb
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/ocrvqa/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.common.registry import Registry
3 | from VisualBERT.mmf.datasets.builders.ocrvqa.dataset import OCRVQADataset
4 | from VisualBERT.mmf.datasets.builders.textvqa.builder import TextVQABuilder
5 |
6 |
7 | @Registry.register_builder("ocrvqa")
8 | class OCRVQABuilder(TextVQABuilder):
9 | def __init__(self):
10 | super().__init__()
11 | self.dataset_name = "ocrvqa"
12 | self.set_dataset_class(OCRVQADataset)
13 |
14 | @classmethod
15 | def config_path(cls):
16 | return "configs/datasets/ocrvqa/defaults.yaml"
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/m4c_captioner/configs/m4c_captioner/textcaps/with_caffe2_feat.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | textcaps:
6 | zoo_requirements:
7 | - textvqa.caffe2
8 | - textvqa.ocr_en
9 | - textcaps.defaults
10 | features:
11 | train:
12 | - textvqa/caffe2/features/open_images/detectron.lmdb,textvqa/ocr_en/features/ocr_en_frcn_features.lmdb
13 | val:
14 | - textvqa/caffe2/features/open_images/detectron.lmdb,textvqa/ocr_en/features/ocr_en_frcn_features.lmdb
15 | test:
16 | - textvqa/caffe2/features/open_images/detectron.lmdb,textvqa/ocr_en/features/ocr_en_frcn_features.lmdb
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/scripts/lxmert/gqa/gqa_save_exp.sh:
--------------------------------------------------------------------------------
1 | # All exp methods: ours_no_lrp transformer_att partial_lrp raw_attn rollout attn_grad attn_norm inputGrad ig rand
2 |
3 | samples=10000
4 | for text in False
5 | do
6 | for method_name in ours_no_lrp transformer_att partial_lrp raw_attn rollout attn_grad attn_norm inputGrad ig
7 | do
8 | CUDA_VISIBLE_DEVICES=2 PYTHONPATH=`pwd` python lxmert/lxmert/save_exp.py \
9 | --COCO_path /home/lyb/vqa_data/gqa/images/ \
10 | --method $method_name \
11 | --is-text-pert $text \
12 | --is-positive-pert True \
13 | --load_cached_exp=False \
14 | --num-samples=$samples \
15 | --load_raw_img False \
16 | --task gqa
17 | done
18 | done
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_coco:
7 | return_features_info: true
8 | use_image_feature_masks: true
9 |
10 | optimizer:
11 | type: adam_w
12 | params:
13 | lr: 5e-5
14 | eps: 1e-8
15 |
16 | scheduler:
17 | type: warmup_linear
18 | params:
19 | num_warmup_steps: 1000
20 | num_training_steps: 11000
21 |
22 | training:
23 | batch_size: 480
24 | lr_scheduler: true
25 | # Don't forget to update schedule_attributes if you update this
26 | max_updates: 11000
27 | find_unused_parameters: true
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/gqa/masked_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.common.registry import registry
4 | from VisualBERT.mmf.datasets.builders.gqa.builder import GQABuilder
5 | from VisualBERT.mmf.datasets.builders.gqa.masked_dataset import MaskedGQADataset
6 |
7 |
8 | @registry.register_builder("masked_gqa")
9 | class MaskedGQABuilder(GQABuilder):
10 | def __init__(self):
11 | super().__init__()
12 | self.dataset_name = "masked_gqa"
13 | self.dataset_class = MaskedGQADataset
14 |
15 | @classmethod
16 | def config_path(cls):
17 | return "configs/datasets/gqa/masked.yaml"
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/sbu_captions/masked_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.common.registry import registry
4 | from VisualBERT.mmf.datasets.builders.coco import MaskedCOCOBuilder
5 |
6 | from .masked_dataset import MaskedSBUDataset
7 |
8 |
9 | @registry.register_builder("masked_sbu")
10 | class MaskedSBUBuilder(MaskedCOCOBuilder):
11 | def __init__(self):
12 | super().__init__()
13 | self.dataset_name = "masked_sbu"
14 | self.set_dataset_class(MaskedSBUDataset)
15 |
16 | @classmethod
17 | def config_path(cls):
18 | return "configs/datasets/sbu_captions/masked.yaml"
19 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_coco:
7 | return_features_info: true
8 | use_image_feature_masks: true
9 |
10 | optimizer:
11 | type: adam_w
12 | params:
13 | lr: 5e-5
14 | eps: 1e-8
15 |
16 | scheduler:
17 | type: warmup_linear
18 | params:
19 | num_warmup_steps: 1000
20 | num_training_steps: 11000
21 |
22 | training:
23 | batch_size: 480
24 | lr_scheduler: true
25 | # Don't forget to update schedule_attributes if you update this
26 | max_updates: 11000
27 | find_unused_parameters: true
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/subset_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from torch.utils.data.dataset import Subset
4 |
5 |
6 | class MMFSubset(Subset):
7 | def __init__(self, dataset, indices):
8 | super().__init__(dataset, indices)
9 | self._dir_representation = dir(self)
10 |
11 | def __getattr__(self, name):
12 | if "_dir_representation" in self.__dict__ and name in self._dir_representation:
13 | return getattr(self, name)
14 | elif "dataset" in self.__dict__ and hasattr(self.dataset, name):
15 | return getattr(self.dataset, name)
16 | else:
17 | raise AttributeError(name)
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_coco:
7 | return_features_info: true
8 | use_image_feature_masks: true
9 |
10 | optimizer:
11 | type: adam_w
12 | params:
13 | lr: 5e-5
14 | eps: 1e-8
15 |
16 | scheduler:
17 | type: warmup_linear
18 | params:
19 | num_warmup_steps: 1000
20 | num_training_steps: 11000
21 |
22 | training:
23 | batch_size: 480
24 | lr_scheduler: true
25 | # Don't forget to update schedule_attributes if you update this
26 | max_updates: 11000
27 | find_unused_parameters: true
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_coco:
7 | return_features_info: true
8 | use_image_feature_masks: true
9 |
10 | optimizer:
11 | type: adam_w
12 | params:
13 | lr: 5e-5
14 | eps: 1e-8
15 |
16 | scheduler:
17 | type: warmup_linear
18 | params:
19 | num_warmup_steps: 1000
20 | num_training_steps: 11000
21 |
22 | training:
23 | batch_size: 480
24 | lr_scheduler: true
25 | # Don't forget to update schedule_attributes if you update this
26 | max_updates: 11000
27 | find_unused_parameters: true
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/okvqa/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.common.registry import registry
3 | from VisualBERT.mmf.datasets.builders.okvqa.dataset import OKVQADataset
4 | from VisualBERT.mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
5 |
6 |
7 | @registry.register_builder("okvqa")
8 | class OKVQABuilder(MMFDatasetBuilder):
9 | def __init__(
10 | self, dataset_name="okvqa", dataset_class=OKVQADataset, *args, **kwargs
11 | ):
12 | super().__init__(dataset_name, dataset_class, *args, **kwargs)
13 |
14 | @classmethod
15 | def config_path(cls):
16 | return "configs/datasets/okvqa/defaults.yaml"
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_conceptual_captions:
7 | return_features_info: true
8 | use_image_feature_masks: true
9 |
10 | optimizer:
11 | type: adam_w
12 | params:
13 | lr: 5e-5
14 | eps: 1e-8
15 |
16 | scheduler:
17 | type: warmup_linear
18 | params:
19 | num_warmup_steps: 1000
20 | num_training_steps: 11000
21 |
22 | training:
23 | batch_size: 480
24 | lr_scheduler: true
25 | # Don't forget to update schedule_attributes if you update this
26 | max_updates: 11000
27 | find_unused_parameters: true
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_conceptual_captions:
7 | return_features_info: true
8 | use_image_feature_masks: true
9 |
10 | optimizer:
11 | type: adam_w
12 | params:
13 | lr: 5e-5
14 | eps: 1e-8
15 |
16 | scheduler:
17 | type: warmup_linear
18 | params:
19 | num_warmup_steps: 1000
20 | num_training_steps: 11000
21 |
22 | training:
23 | batch_size: 480
24 | lr_scheduler: true
25 | # Don't forget to update schedule_attributes if you update this
26 | max_updates: 11000
27 | find_unused_parameters: true
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pythia/configs/vqa2/train_val_resnet_only.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./resnet_only.yaml
3 |
4 | dataset_config:
5 | vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/resnet152/features/trainval2014.lmdb
11 | - coco/resnet152/features/trainval2014.lmdb
12 | val:
13 | - coco/resnet152/features/trainval2014.lmdb
14 | test:
15 | - coco/resnet152/features/test2015.lmdb
16 | annotations:
17 | train:
18 | - vqa2/defaults/annotations/imdb_train2014.npy
19 | - vqa2/defaults/annotations/imdb_valminusminival2014.npy
20 | val:
21 | - vqa2/defaults/annotations/imdb_minival2014.npy
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/conceptual_captions/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.common.registry import registry
4 | from VisualBERT.mmf.datasets.builders.coco import COCOBuilder
5 |
6 | from .dataset import ConceptualCaptionsDataset
7 |
8 |
9 | @registry.register_builder("conceptual_captions")
10 | class ConceptualCaptionsBuilder(COCOBuilder):
11 | def __init__(self):
12 | super().__init__()
13 | self.dataset_name = "conceptual_captions"
14 | self.set_dataset_class(ConceptualCaptionsDataset)
15 |
16 | @classmethod
17 | def config_path(cls):
18 | return "configs/datasets/conceptual_captions/defaults.yaml"
19 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pythia/configs/vqa2/train_val_resnet_only.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./resnet_only.yaml
3 |
4 | dataset_config:
5 | vqa2:
6 | use_images: false
7 | use_features: true
8 | features:
9 | train:
10 | - coco/resnet152/features/trainval2014.lmdb
11 | - coco/resnet152/features/trainval2014.lmdb
12 | val:
13 | - coco/resnet152/features/trainval2014.lmdb
14 | test:
15 | - coco/resnet152/features/test2015.lmdb
16 | annotations:
17 | train:
18 | - vqa2/defaults/annotations/imdb_train2014.npy
19 | - vqa2/defaults/annotations/imdb_valminusminival2014.npy
20 | val:
21 | - vqa2/defaults/annotations/imdb_minival2014.npy
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_conceptual_captions:
7 | return_features_info: true
8 | use_image_feature_masks: true
9 |
10 | optimizer:
11 | type: adam_w
12 | params:
13 | lr: 5e-5
14 | eps: 1e-8
15 |
16 | scheduler:
17 | type: warmup_linear
18 | params:
19 | num_warmup_steps: 1000
20 | num_training_steps: 11000
21 |
22 | training:
23 | batch_size: 480
24 | lr_scheduler: true
25 | # Don't forget to update schedule_attributes if you update this
26 | max_updates: 11000
27 | find_unused_parameters: true
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/vqacp_v2/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.common.registry import registry
3 | from VisualBERT.mmf.datasets.builders.vqacp_v2.dataset import VQACPv2Dataset
4 | from VisualBERT.mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
5 |
6 |
7 | @registry.register_builder("vqacp_v2")
8 | class VQACPv2Builder(MMFDatasetBuilder):
9 | def __init__(
10 | self, dataset_name="vqacp_v2", dataset_class=VQACPv2Dataset, *args, **kwargs
11 | ):
12 | super().__init__(dataset_name, dataset_class, *args, **kwargs)
13 |
14 | @classmethod
15 | def config_path(cls):
16 | return "configs/datasets/vqacp_v2/defaults.yaml"
17 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/m4c/configs/textvqa/joint_with_stvqa.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | textvqa:
6 | use_images: false
7 | use_features: true
8 | zoo_requirements:
9 | - textvqa.defaults
10 | - textvqa.ocr_en
11 | - stvqa.defaults
12 | - stvqa.ocr_en
13 | features:
14 | train:
15 | - textvqa/defaults/features/open_images/detectron.lmdb,textvqa/ocr_en/features/ocr_en_frcn_features.lmdb
16 | - stvqa/defaults/features/detectron.lmdb,stvqa/ocr_en/features/ocr_en_frcn_features.lmdb
17 | annotations:
18 | train:
19 | - textvqa/defaults/annotations/imdb_train_ocr_en.npy
20 | - stvqa/defaults/annotations/imdb_subtrain.npy
21 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_conceptual_captions:
7 | return_features_info: true
8 | use_image_feature_masks: true
9 |
10 | optimizer:
11 | type: adam_w
12 | params:
13 | lr: 5e-5
14 | eps: 1e-8
15 |
16 | scheduler:
17 | type: warmup_linear
18 | params:
19 | num_warmup_steps: 1000
20 | num_training_steps: 11000
21 |
22 | training:
23 | batch_size: 480
24 | lr_scheduler: true
25 | # Don't forget to update schedule_attributes if you update this
26 | max_updates: 11000
27 | find_unused_parameters: true
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/m4c/configs/textvqa/joint_with_stvqa.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | dataset_config:
5 | textvqa:
6 | use_images: false
7 | use_features: true
8 | zoo_requirements:
9 | - textvqa.defaults
10 | - textvqa.ocr_en
11 | - stvqa.defaults
12 | - stvqa.ocr_en
13 | features:
14 | train:
15 | - textvqa/defaults/features/open_images/detectron.lmdb,textvqa/ocr_en/features/ocr_en_frcn_features.lmdb
16 | - stvqa/defaults/features/detectron.lmdb,stvqa/ocr_en/features/ocr_en_frcn_features.lmdb
17 | annotations:
18 | train:
19 | - textvqa/defaults/annotations/imdb_train_ocr_en.npy
20 | - stvqa/defaults/annotations/imdb_subtrain.npy
21 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/butd/configs/coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | type: Adamax
3 | params:
4 | eps: 1.0e-08
5 | lr: 0.01
6 | weight_decay: 0
7 |
8 | evaluation:
9 | metrics:
10 | - caption_bleu4
11 |
12 | training:
13 | clip_norm_mode: all
14 | clip_gradients: true
15 | lr_ratio: 0.1
16 | lr_scheduler: true
17 | lr_steps:
18 | - 15000
19 | - 25000
20 | - 35000
21 | - 45000
22 | max_grad_l2_norm: 0.25
23 | max_updates: 50000
24 | use_warmup: true
25 | warmup_factor: 0.2
26 | warmup_iterations: 1000
27 | batch_size: 256
28 | num_workers: 7
29 | task_size_proportional_sampling: true
30 | early_stop:
31 | criteria: coco/caption_bleu4
32 | minimize: false
33 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/datasets/vqa2/with_raw_images.yaml:
--------------------------------------------------------------------------------
1 | dataset_config:
2 | vqa2:
3 | use_images: true
4 | use_features: false
5 | processors:
6 | image_processor:
7 | type: torchvision_transforms
8 | params:
9 | transforms:
10 | - type: Resize
11 | params:
12 | size: [256, 256]
13 | - type: CenterCrop
14 | params:
15 | size: [224, 224]
16 | - ToTensor
17 | - GrayScaleTo3Channels
18 | - type: Normalize
19 | params:
20 | mean: [0.46777044, 0.44531429, 0.40661017]
21 | std: [0.12221994, 0.12145835, 0.14380469]
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/ban/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | ban:
3 | losses:
4 | - type: logit_bce
5 | text_embedding:
6 | num_hidden: 1280
7 | vocab_size: 1280
8 | emb_size: 300
9 | num_layers: 1
10 | dropout: 0.0
11 | bidirectional: False
12 | rnn_type: 'GRU'
13 | bilinear_attention:
14 | bc_net:
15 | k: 1
16 | dropout: [0.2, 0.5]
17 | h_out:
18 | fc_net:
19 | dims: 600
20 | activation:
21 | dropout: 0.2
22 | gamma: 4
23 | visual_feat_dim: 2048
24 | classifier:
25 | # out dim will be taken from registry as set by dataset builder
26 | hidden_size: 600
27 | dropout: 0.5
28 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/butd/configs/coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | type: Adamax
3 | params:
4 | eps: 1.0e-08
5 | lr: 0.01
6 | weight_decay: 0
7 |
8 | evaluation:
9 | metrics:
10 | - caption_bleu4
11 |
12 | training:
13 | clip_norm_mode: all
14 | clip_gradients: true
15 | lr_ratio: 0.1
16 | lr_scheduler: true
17 | lr_steps:
18 | - 15000
19 | - 25000
20 | - 35000
21 | - 45000
22 | max_grad_l2_norm: 0.25
23 | max_updates: 50000
24 | use_warmup: true
25 | warmup_factor: 0.2
26 | warmup_iterations: 1000
27 | batch_size: 256
28 | num_workers: 7
29 | task_size_proportional_sampling: true
30 | early_stop:
31 | criteria: coco/caption_bleu4
32 | minimize: false
33 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/website/src/pages/api_redirect/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Facebook, Inc. and its affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | *
7 | * @format
8 | */
9 | import React from 'react';
10 | import BrowserOnly from '@docusaurus/BrowserOnly';
11 | import {useHistory} from 'react-router-dom';
12 |
13 | const API = () => {
14 | const history = useHistory();
15 | history.push('/');
16 | return (
17 | Some Fallback Content}>
18 | {() => {
19 | window.location.href = '/api';
20 | }}
21 |
22 | );
23 | };
24 |
25 | export default API;
26 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/configs/models/unimodal/image.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | unimodal_image:
3 | # Either pretraining or classification
4 | direct_features_input: false
5 | freeze_base: false
6 | finetune_lr_multiplier: 1
7 | # Dimension of the embedding finally returned by the modal encoder
8 | modal_hidden_size: 2048
9 | # Used when classification head is activated
10 | num_labels: 2
11 | modal_encoder:
12 | type: resnet152
13 | params:
14 | pretrained: true
15 | pool_type: avg
16 | num_output_features: 1
17 |
18 | classifier:
19 | type: mlp
20 | params:
21 | in_dim: 2048
22 | out_dim: 2
23 | hidden_dim: 768
24 | num_layers: 0
25 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/butd/configs/conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | type: Adamax
3 | params:
4 | eps: 1.0e-08
5 | lr: 0.01
6 | weight_decay: 0
7 |
8 | evaluation:
9 | metrics:
10 | - caption_bleu4
11 |
12 | training:
13 | clip_norm_mode: all
14 | clip_gradients: true
15 | lr_ratio: 0.1
16 | lr_scheduler: true
17 | lr_steps:
18 | - 15000
19 | - 25000
20 | - 35000
21 | - 45000
22 | max_grad_l2_norm: 0.25
23 | max_updates: 50000
24 | use_warmup: true
25 | warmup_factor: 0.2
26 | warmup_iterations: 1000
27 | batch_size: 256
28 | num_workers: 7
29 | task_size_proportional_sampling: true
30 | early_stop:
31 | criteria: conceptual_captions/caption_bleu4
32 | minimize: false
33 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/conceptual_captions/masked_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.common.registry import registry
3 | from VisualBERT.mmf.datasets.builders.coco import MaskedCOCOBuilder
4 |
5 | from .masked_dataset import MaskedConceptualCaptionsDataset
6 |
7 |
8 | @registry.register_builder("masked_conceptual_captions")
9 | class MaskedConceptualCaptionsBuilder(MaskedCOCOBuilder):
10 | def __init__(self):
11 | super().__init__()
12 | self.dataset_name = "masked_conceptual_captions"
13 | self.set_dataset_class(MaskedConceptualCaptionsDataset)
14 |
15 | @classmethod
16 | def config_path(cls):
17 | return "configs/datasets/conceptual_captions/masked.yaml"
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/sbu_captions/masked_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.datasets.builders.coco import MaskedCOCODataset
4 |
5 |
6 | class MaskedSBUDataset(MaskedCOCODataset):
7 | def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
8 | super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs)
9 | self.dataset_name = "masked_sbu"
10 | self._two_sentence = config.get("two_sentence", True)
11 | self._false_caption = config.get("false_caption", True)
12 | self._two_sentence_probability = config.get("two_sentence_probability", 0.5)
13 | self._false_caption_probability = config.get("false_caption_probability", 0.5)
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/butd/configs/conceptual_captions/defaults.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | type: Adamax
3 | params:
4 | eps: 1.0e-08
5 | lr: 0.01
6 | weight_decay: 0
7 |
8 | evaluation:
9 | metrics:
10 | - caption_bleu4
11 |
12 | training:
13 | clip_norm_mode: all
14 | clip_gradients: true
15 | lr_ratio: 0.1
16 | lr_scheduler: true
17 | lr_steps:
18 | - 15000
19 | - 25000
20 | - 35000
21 | - 45000
22 | max_grad_l2_norm: 0.25
23 | max_updates: 50000
24 | use_warmup: true
25 | warmup_factor: 0.2
26 | warmup_iterations: 1000
27 | batch_size: 256
28 | num_workers: 7
29 | task_size_proportional_sampling: true
30 | early_stop:
31 | criteria: conceptual_captions/caption_bleu4
32 | minimize: false
33 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/hateful_memes/configs/mmbt/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/mmbt/classification.yaml
3 | - configs/datasets/hateful_memes/bert.yaml
4 |
5 | scheduler:
6 | type: warmup_linear
7 | params:
8 | num_warmup_steps: 2000
9 | num_training_steps: ${training.max_updates}
10 |
11 | optimizer:
12 | type: adam_w
13 | params:
14 | lr: 1e-5
15 | eps: 1e-8
16 |
17 | evaluation:
18 | metrics:
19 | - accuracy
20 | - binary_f1
21 | - roc_auc
22 |
23 | training:
24 | batch_size: 32
25 | lr_scheduler: true
26 | max_updates: 22000
27 | early_stop:
28 | criteria: hateful_memes/roc_auc
29 | minimize: false
30 |
31 | checkpoint:
32 | pretrained_state_mapping:
33 | bert: bert
34 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/mmbt/configs/hateful_memes/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/mmbt/classification.yaml
3 | - configs/datasets/hateful_memes/bert.yaml
4 |
5 | scheduler:
6 | type: warmup_linear
7 | params:
8 | num_warmup_steps: 2000
9 | num_training_steps: ${training.max_updates}
10 |
11 | optimizer:
12 | type: adam_w
13 | params:
14 | lr: 1e-5
15 | eps: 1e-8
16 |
17 | evaluation:
18 | metrics:
19 | - accuracy
20 | - binary_f1
21 | - roc_auc
22 |
23 | training:
24 | batch_size: 32
25 | lr_scheduler: true
26 | max_updates: 22000
27 | early_stop:
28 | criteria: hateful_memes/roc_auc
29 | minimize: false
30 |
31 | checkpoint:
32 | pretrained_state_mapping:
33 | bert: bert
34 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/mmbt/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/mmbt/classification.yaml
3 | - configs/datasets/hateful_memes/bert.yaml
4 |
5 | scheduler:
6 | type: warmup_linear
7 | params:
8 | num_warmup_steps: 2000
9 | num_training_steps: ${training.max_updates}
10 |
11 | optimizer:
12 | type: adam_w
13 | params:
14 | lr: 1e-5
15 | eps: 1e-8
16 |
17 | evaluation:
18 | metrics:
19 | - accuracy
20 | - binary_f1
21 | - roc_auc
22 |
23 | training:
24 | batch_size: 32
25 | lr_scheduler: true
26 | max_updates: 22000
27 | early_stop:
28 | criteria: hateful_memes/roc_auc
29 | minimize: false
30 |
31 | checkpoint:
32 | pretrained_state_mapping:
33 | bert: bert
34 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/mmbt/configs/hateful_memes/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/models/mmbt/classification.yaml
3 | - configs/datasets/hateful_memes/bert.yaml
4 |
5 | scheduler:
6 | type: warmup_linear
7 | params:
8 | num_warmup_steps: 2000
9 | num_training_steps: ${training.max_updates}
10 |
11 | optimizer:
12 | type: adam_w
13 | params:
14 | lr: 1e-5
15 | eps: 1e-8
16 |
17 | evaluation:
18 | metrics:
19 | - accuracy
20 | - binary_f1
21 | - roc_auc
22 |
23 | training:
24 | batch_size: 32
25 | lr_scheduler: true
26 | max_updates: 22000
27 | early_stop:
28 | criteria: hateful_memes/roc_auc
29 | minimize: false
30 |
31 | checkpoint:
32 | pretrained_state_mapping:
33 | bert: bert
34 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/vizwiz/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from VisualBERT.mmf.common.registry import registry
3 | from VisualBERT.mmf.datasets.builders.vizwiz.dataset import VizWizDataset
4 | from VisualBERT.mmf.datasets.builders.vqa2 import VQA2Builder
5 |
6 |
7 | @registry.register_builder("vizwiz")
8 | class VizWizBuilder(VQA2Builder):
9 | def __init__(self):
10 | super().__init__()
11 | self.dataset_name = "vizwiz"
12 | self.set_dataset_class(VizWizDataset)
13 |
14 | @classmethod
15 | def config_path(cls):
16 | return "configs/datasets/vizwiz/defaults.yaml"
17 |
18 | def update_registry_for_model(self, config):
19 | super().update_registry_for_model(config)
20 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/m4c_captioner/configs/m4c_captioner/coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/datasets/coco/ocr_en.yaml
3 | optimizer:
4 | params:
5 | eps: 1.0e-08
6 | lr: 1e-4
7 | weight_decay: 0
8 | type: Adam
9 |
10 | evaluation:
11 | metrics:
12 | - textcaps_bleu4
13 |
14 | training:
15 | clip_norm_mode: all
16 | clip_gradients: true
17 | max_grad_l2_norm: 0.25
18 | lr_scheduler: true
19 | lr_steps:
20 | - 14000
21 | - 19000
22 | lr_ratio: 0.1
23 | use_warmup: true
24 | warmup_factor: 0.2
25 | warmup_iterations: 1000
26 | max_iterations: 24000
27 | batch_size: 128
28 | num_workers: 8
29 | early_stop:
30 | criteria: coco/textcaps_bleu4
31 | minimize: false
32 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/others/cnn_lstm/hateful_memes/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | cnn_lstm:
3 | losses:
4 | - type: cross_entropy
5 | classifier:
6 | type: mlp
7 | params:
8 | in_dim: 190
9 | out_dim: 2
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 2000
15 | num_training_steps: ${training.max_updates}
16 |
17 | optimizer:
18 | type: adam_w
19 | params:
20 | lr: 5e-5
21 | eps: 1e-8
22 |
23 | evaluation:
24 | metrics:
25 | - accuracy
26 | - binary_f1
27 | - roc_auc
28 |
29 | training:
30 | batch_size: 480
31 | lr_scheduler: true
32 | max_updates: 60000
33 | early_stop:
34 | criteria: hateful_memes/roc_auc
35 | minimize: false
36 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/m4c_captioner/configs/m4c_captioner/coco/defaults.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - configs/datasets/coco/ocr_en.yaml
3 | optimizer:
4 | params:
5 | eps: 1.0e-08
6 | lr: 1e-4
7 | weight_decay: 0
8 | type: Adam
9 |
10 | evaluation:
11 | metrics:
12 | - textcaps_bleu4
13 |
14 | training:
15 | clip_norm_mode: all
16 | clip_gradients: true
17 | max_grad_l2_norm: 0.25
18 | lr_scheduler: true
19 | lr_steps:
20 | - 14000
21 | - 19000
22 | lr_ratio: 0.1
23 | use_warmup: true
24 | warmup_factor: 0.2
25 | warmup_iterations: 1000
26 | max_iterations: 24000
27 | batch_size: 128
28 | num_workers: 8
29 | early_stop:
30 | criteria: coco/textcaps_bleu4
31 | minimize: false
32 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/others/cnn_lstm/hateful_memes/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | cnn_lstm:
3 | losses:
4 | - type: cross_entropy
5 | classifier:
6 | type: mlp
7 | params:
8 | in_dim: 190
9 | out_dim: 2
10 |
11 | scheduler:
12 | type: warmup_linear
13 | params:
14 | num_warmup_steps: 2000
15 | num_training_steps: ${training.max_updates}
16 |
17 | optimizer:
18 | type: adam_w
19 | params:
20 | lr: 5e-5
21 | eps: 1e-8
22 |
23 | evaluation:
24 | metrics:
25 | - accuracy
26 | - binary_f1
27 | - roc_auc
28 |
29 | training:
30 | batch_size: 480
31 | lr_scheduler: true
32 | max_updates: 60000
33 | early_stop:
34 | criteria: hateful_memes/roc_auc
35 | minimize: false
36 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/vilbert/configs/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_vqa2:
7 | annotations:
8 | train:
9 | - vqa2/defaults/annotations/imdb_train2014.npy
10 | return_features_info: true
11 | use_image_feature_masks: true
12 |
13 | optimizer:
14 | type: adam_w
15 | params:
16 | lr: 5e-5
17 | eps: 1e-8
18 |
19 | scheduler:
20 | type: warmup_linear
21 | params:
22 | num_warmup_steps: 1000
23 | num_training_steps: 11000
24 |
25 | training:
26 | batch_size: 480
27 | lr_scheduler: true
28 | # Don't forget to update schedule_attributes if you update this
29 | max_updates: 11000
30 | find_unused_parameters: true
31 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/lxmert/lxmert/run/lxmert_pretrain.bash:
--------------------------------------------------------------------------------
1 | # The name of experiment
2 | name=lxmert
3 |
4 | # Create dirs and make backup
5 | output=snap/pretrain/$name
6 | mkdir -p $output/src
7 | cp -r src/* $output/src/
8 | cp $0 $output/run.bash
9 |
10 | # Pre-training
11 | CUDA_VISIBLE_DEVICES=$1 PYTHONPATH=$PYTHONPATH:./src \
12 | python src/pretrain/lxmert_pretrain.py \
13 | --taskMaskLM --taskObjPredict --taskMatched --taskQA \
14 | --visualLosses obj,attr,feat \
15 | --wordMaskRate 0.15 --objMaskRate 0.15 \
16 | --train mscoco_train,mscoco_nominival,vgnococo --valid mscoco_minival \
17 | --llayers 9 --xlayers 5 --rlayers 5 \
18 | --fromScratch \
19 | --batchSize 256 --optim bert --lr 1e-4 --epochs 20 \
20 | --tqdm --output $output ${@:2}
21 |
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/vilbert/configs/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_vqa2:
7 | annotations:
8 | train:
9 | - vqa2/defaults/annotations/imdb_train2014.npy
10 | return_features_info: true
11 | use_image_feature_masks: true
12 |
13 | optimizer:
14 | type: adam_w
15 | params:
16 | lr: 5e-5
17 | eps: 1e-8
18 |
19 | scheduler:
20 | type: warmup_linear
21 | params:
22 | num_warmup_steps: 1000
23 | num_training_steps: 11000
24 |
25 | training:
26 | batch_size: 480
27 | lr_scheduler: true
28 | # Don't forget to update schedule_attributes if you update this
29 | max_updates: 11000
30 | find_unused_parameters: true
31 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/visual_genome/masked_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.common.registry import registry
4 | from VisualBERT.mmf.datasets.builders.visual_genome.builder import VisualGenomeBuilder
5 | from VisualBERT.mmf.datasets.builders.visual_genome.masked_dataset import MaskedVisualGenomeDataset
6 |
7 |
8 | @registry.register_builder("masked_visual_genome")
9 | class MaskedVisualGenomeBuilder(VisualGenomeBuilder):
10 | def __init__(self):
11 | super().__init__()
12 | self.dataset_name = "masked_visual_genome"
13 | self.dataset_class = MaskedVisualGenomeDataset
14 |
15 | @classmethod
16 | def config_path(cls):
17 | return "configs/datasets/visual_genome/masked.yaml"
18 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/lorra/configs/vqa2/train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | # Use soft copy
5 | dataset_config:
6 | vqa2_train_val:
7 | use_ocr: true
8 | processors:
9 | context_processor:
10 | type: fasttext
11 | params:
12 | download_initially: true
13 | max_length: 50
14 | model_file: wiki.en.bin
15 | answer_processor:
16 | type: soft_copy_answer
17 | params:
18 | vocab_file: vqa2/defaults/extras/vocabs/answers_vqa.txt
19 | preprocessor:
20 | type: simple_word
21 | params: {}
22 | context_preprocessor:
23 | type: simple_word
24 | params: {}
25 | max_length: 50
26 | num_answers: 10
27 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/lorra/configs/vqa2/train_val.yaml:
--------------------------------------------------------------------------------
1 | includes:
2 | - ./defaults.yaml
3 |
4 | # Use soft copy
5 | dataset_config:
6 | vqa2_train_val:
7 | use_ocr: true
8 | processors:
9 | context_processor:
10 | type: fasttext
11 | params:
12 | download_initially: true
13 | max_length: 50
14 | model_file: wiki.en.bin
15 | answer_processor:
16 | type: soft_copy_answer
17 | params:
18 | vocab_file: vqa2/defaults/extras/vocabs/answers_vqa.txt
19 | preprocessor:
20 | type: simple_word
21 | params: {}
22 | context_preprocessor:
23 | type: simple_word
24 | params: {}
25 | max_length: 50
26 | num_answers: 10
27 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/projects/pretrain_vl_right/configs/vilbert/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_vqa2:
7 | annotations:
8 | train:
9 | - vqa2/defaults/annotations/imdb_train2014.npy
10 | return_features_info: true
11 | use_image_feature_masks: true
12 |
13 | optimizer:
14 | type: adam_w
15 | params:
16 | lr: 5e-5
17 | eps: 1e-8
18 |
19 | scheduler:
20 | type: warmup_linear
21 | params:
22 | num_warmup_steps: 1000
23 | num_training_steps: 11000
24 |
25 | training:
26 | batch_size: 480
27 | lr_scheduler: true
28 | # Don't forget to update schedule_attributes if you update this
29 | max_updates: 11000
30 | find_unused_parameters: true
31 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/pretrain_vl_right/configs/vilbert/masked_vqa2/defaults.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | vilbert:
3 | training_head_type: pretraining
4 |
5 | dataset_config:
6 | masked_vqa2:
7 | annotations:
8 | train:
9 | - vqa2/defaults/annotations/imdb_train2014.npy
10 | return_features_info: true
11 | use_image_feature_masks: true
12 |
13 | optimizer:
14 | type: adam_w
15 | params:
16 | lr: 5e-5
17 | eps: 1e-8
18 |
19 | scheduler:
20 | type: warmup_linear
21 | params:
22 | num_warmup_steps: 1000
23 | num_training_steps: 11000
24 |
25 | training:
26 | batch_size: 480
27 | lr_scheduler: true
28 | # Don't forget to update schedule_attributes if you update this
29 | max_updates: 11000
30 | find_unused_parameters: true
31 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/coco2017/masked_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.common.typings import MMFDatasetConfigType
4 | from VisualBERT.mmf.datasets.builders.localized_narratives.masked_dataset import (
5 | MaskedLocalizedNarrativesDatasetMixin,
6 | )
7 | from VisualBERT.mmf.datasets.mmf_dataset import MMFDataset
8 |
9 |
10 | class MaskedCoco2017Dataset(MaskedLocalizedNarrativesDatasetMixin, MMFDataset):
11 | def __init__(
12 | self,
13 | config: MMFDatasetConfigType,
14 | dataset_type: str,
15 | index: int,
16 | *args,
17 | **kwargs,
18 | ):
19 | super().__init__(
20 | "masked_coco2017", config, dataset_type, index, *args, **kwargs
21 | )
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/flickr30k/masked_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.common.typings import MMFDatasetConfigType
4 | from VisualBERT.mmf.datasets.builders.localized_narratives.masked_dataset import (
5 | MaskedLocalizedNarrativesDatasetMixin,
6 | )
7 | from VisualBERT.mmf.datasets.mmf_dataset import MMFDataset
8 |
9 |
10 | class MaskedFlickr30kDataset(MaskedLocalizedNarrativesDatasetMixin, MMFDataset):
11 | def __init__(
12 | self,
13 | config: MMFDatasetConfigType,
14 | dataset_type: str,
15 | index: int,
16 | *args,
17 | **kwargs,
18 | ):
19 | super().__init__(
20 | "masked_flickr30k", config, dataset_type, index, *args, **kwargs
21 | )
22 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/mmf/datasets/builders/conceptual_captions/masked_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | from VisualBERT.mmf.datasets.builders.coco import MaskedCOCODataset
4 |
5 |
6 | class MaskedConceptualCaptionsDataset(MaskedCOCODataset):
7 | def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
8 | super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs)
9 | self.dataset_name = "masked_conceptual_captions"
10 | self._two_sentence = config.get("two_sentence", True)
11 | self._false_caption = config.get("false_caption", True)
12 | self._two_sentence_probability = config.get("two_sentence_probability", 0.5)
13 | self._false_caption_probability = config.get("false_caption_probability", 0.5)
14 |
--------------------------------------------------------------------------------
/Transformer-MM-Explainability/VisualBERT/projects/hateful_memes/configs/unimodal/text.yaml:
--------------------------------------------------------------------------------
1 | model_config:
2 | unimodal_text:
3 | classifier:
4 | type: mlp
5 | params:
6 | num_layers: 2
7 | losses:
8 | - type: cross_entropy
9 |
10 | scheduler:
11 | type: warmup_linear
12 | params:
13 | num_warmup_steps: 2000
14 | num_training_steps: ${training.max_updates}
15 |
16 | optimizer:
17 | type: adam_w
18 | params:
19 | lr: 5e-5
20 | eps: 1e-8
21 |
22 | evaluation:
23 | metrics:
24 | - accuracy
25 | - binary_f1
26 | - roc_auc
27 |
28 | training:
29 | batch_size: 32
30 | lr_scheduler: true
31 | max_updates: 22000
32 | early_stop:
33 | criteria: hateful_memes/roc_auc
34 | minimize: false
35 |
36 | checkpoint:
37 | pretrained_state_mapping:
38 | base: base
39 |
--------------------------------------------------------------------------------