├── Two-branch Transformer ├── molecule │ ├── __init__.py │ ├── dict_label.txt │ ├── shuffle_head.py │ ├── merge_state_dict.py │ └── detokenize_re.py ├── scripts │ ├── __init__.py │ ├── spm_train.py │ ├── compound_split_bleu.sh │ ├── sacrebleu.sh │ ├── convert_dictionary.lua │ ├── constraints │ │ └── validate.py │ └── compare_namespaces.py ├── tests │ ├── __init__.py │ ├── gpu │ │ ├── __init__.py │ │ └── transformer_quantization_config.yaml │ ├── distributed │ │ └── __init__.py │ ├── speech_recognition │ │ └── __init__.py │ └── test_iopath.py ├── fairseq_cli │ └── __init__.py ├── fairseq │ ├── data │ │ ├── audio │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── audio_utils.cpython-36.pyc │ │ │ │ ├── raw_audio_dataset.cpython-36.pyc │ │ │ │ └── speech_to_text_dataset.cpython-36.pyc │ │ │ └── feature_transforms │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── global_cmvn.cpython-36.pyc │ │ │ │ ├── specaugment.cpython-36.pyc │ │ │ │ └── utterance_cmvn.cpython-36.pyc │ │ │ │ └── global_cmvn.py │ │ ├── molecule │ │ │ ├── __init__.py │ │ │ └── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── molecule.cpython-36.pyc │ │ │ │ ├── indexed_dataset.cpython-36.pyc │ │ │ │ └── graphseq_pair_dataset.cpython-36.pyc │ │ ├── __pycache__ │ │ │ ├── noising.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── iterators.cpython-36.pyc │ │ │ ├── data_utils.cpython-36.pyc │ │ │ ├── dictionary.cpython-36.pyc │ │ │ ├── id_dataset.cpython-36.pyc │ │ │ ├── list_dataset.cpython-36.pyc │ │ │ ├── pad_dataset.cpython-36.pyc │ │ │ ├── plasma_utils.cpython-36.pyc │ │ │ ├── roll_dataset.cpython-36.pyc │ │ │ ├── sort_dataset.cpython-36.pyc │ │ │ ├── concat_dataset.cpython-36.pyc │ │ │ ├── fasta_dataset.cpython-36.pyc │ │ │ ├── numel_dataset.cpython-36.pyc │ │ │ ├── colorize_dataset.cpython-36.pyc │ │ │ ├── denoising_dataset.cpython-36.pyc │ │ │ ├── fairseq_dataset.cpython-36.pyc │ │ │ ├── indexed_dataset.cpython-36.pyc │ │ │ ├── lru_cache_dataset.cpython-36.pyc │ │ │ ├── prepend_dataset.cpython-36.pyc │ │ │ ├── raw_label_dataset.cpython-36.pyc │ │ │ ├── replace_dataset.cpython-36.pyc │ │ │ ├── shorten_dataset.cpython-36.pyc │ │ │ ├── subsample_dataset.cpython-36.pyc │ │ │ ├── add_target_dataset.cpython-36.pyc │ │ │ ├── mask_tokens_dataset.cpython-36.pyc │ │ │ ├── monolingual_dataset.cpython-36.pyc │ │ │ ├── num_samples_dataset.cpython-36.pyc │ │ │ ├── resampling_dataset.cpython-36.pyc │ │ │ ├── strip_token_dataset.cpython-36.pyc │ │ │ ├── token_block_dataset.cpython-36.pyc │ │ │ ├── append_token_dataset.cpython-36.pyc │ │ │ ├── base_wrapper_dataset.cpython-36.pyc │ │ │ ├── language_pair_dataset.cpython-36.pyc │ │ │ ├── offset_tokens_dataset.cpython-36.pyc │ │ │ ├── prepend_token_dataset.cpython-36.pyc │ │ │ ├── transform_eos_dataset.cpython-36.pyc │ │ │ ├── backtranslation_dataset.cpython-36.pyc │ │ │ ├── concat_sentences_dataset.cpython-36.pyc │ │ │ ├── round_robin_zip_datasets.cpython-36.pyc │ │ │ ├── bucket_pad_length_dataset.cpython-36.pyc │ │ │ ├── lm_context_window_dataset.cpython-36.pyc │ │ │ ├── nested_dictionary_dataset.cpython-36.pyc │ │ │ ├── multi_corpus_sampled_dataset.cpython-36.pyc │ │ │ └── transform_eos_lang_pair_dataset.cpython-36.pyc │ │ ├── encoders │ │ │ ├── __pycache__ │ │ │ │ ├── bytes.cpython-36.pyc │ │ │ │ ├── utils.cpython-36.pyc │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── byte_bpe.cpython-36.pyc │ │ │ │ ├── fastbpe.cpython-36.pyc │ │ │ │ ├── gpt2_bpe.cpython-36.pyc │ │ │ │ ├── byte_utils.cpython-36.pyc │ │ │ │ ├── characters.cpython-36.pyc │ │ │ │ ├── hf_bert_bpe.cpython-36.pyc │ │ │ │ ├── hf_byte_bpe.cpython-36.pyc │ │ │ │ ├── gpt2_bpe_utils.cpython-36.pyc │ │ │ │ ├── moses_tokenizer.cpython-36.pyc │ │ │ │ ├── nltk_tokenizer.cpython-36.pyc │ │ │ │ ├── space_tokenizer.cpython-36.pyc │ │ │ │ ├── subword_nmt_bpe.cpython-36.pyc │ │ │ │ └── sentencepiece_bpe.cpython-36.pyc │ │ │ ├── space_tokenizer.py │ │ │ ├── characters.py │ │ │ ├── nltk_tokenizer.py │ │ │ ├── __init__.py │ │ │ ├── bytes.py │ │ │ ├── utils.py │ │ │ └── fastbpe.py │ │ ├── legacy │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── masked_lm_dataset.cpython-36.pyc │ │ │ │ ├── block_pair_dataset.cpython-36.pyc │ │ │ │ └── masked_lm_dictionary.cpython-36.pyc │ │ │ └── __init__.py │ │ ├── data_utils_fast.cpython-36m-x86_64-linux-gnu.so │ │ ├── multilingual │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── sampling_method.cpython-36.pyc │ │ │ │ ├── multilingual_utils.cpython-36.pyc │ │ │ │ ├── sampled_multi_dataset.cpython-36.pyc │ │ │ │ ├── multilingual_data_manager.cpython-36.pyc │ │ │ │ └── sampled_multi_epoch_dataset.cpython-36.pyc │ │ │ └── __init__.py │ │ ├── token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so │ │ ├── num_samples_dataset.py │ │ ├── id_dataset.py │ │ ├── offset_tokens_dataset.py │ │ ├── roll_dataset.py │ │ ├── raw_label_dataset.py │ │ ├── lru_cache_dataset.py │ │ ├── sort_dataset.py │ │ ├── strip_token_dataset.py │ │ ├── list_dataset.py │ │ ├── colorize_dataset.py │ │ ├── pad_dataset.py │ │ ├── numel_dataset.py │ │ ├── prepend_dataset.py │ │ ├── append_token_dataset.py │ │ └── prepend_token_dataset.py │ ├── logging │ │ └── __init__.py │ ├── models │ │ ├── gnn_kdd │ │ │ └── __init__.py │ │ ├── bart │ │ │ └── __init__.py │ │ ├── wav2vec │ │ │ └── __init__.py │ │ ├── speech_to_text │ │ │ └── __init__.py │ │ ├── roberta │ │ │ └── __init__.py │ │ ├── nat │ │ │ └── __init__.py │ │ └── huggingface │ │ │ └── __init__.py │ ├── version.txt │ ├── modules │ │ ├── quantization │ │ │ ├── __init__.py │ │ │ ├── scalar │ │ │ │ ├── __init__.py │ │ │ │ └── modules │ │ │ │ │ └── __init__.py │ │ │ └── pq │ │ │ │ ├── __init__.py │ │ │ │ └── modules │ │ │ │ └── __init__.py │ │ ├── lightconv_layer │ │ │ ├── __init__.py │ │ │ └── setup.py │ │ ├── dynamicconv_layer │ │ │ ├── __init__.py │ │ │ ├── setup.py │ │ │ └── dynamiconv_cpu.cpp │ │ ├── grad_multiply.py │ │ ├── same_pad.py │ │ ├── transpose_last.py │ │ ├── unfold.py │ │ ├── gelu.py │ │ ├── fp32_group_norm.py │ │ └── scalar_bias.py │ ├── config │ │ ├── model │ │ │ ├── wav2vec │ │ │ │ └── vq_wav2vec_gumbel.yaml │ │ │ ├── wav2vec2 │ │ │ │ ├── wav2vec2_base.yaml │ │ │ │ └── wav2vec2_large.yaml │ │ │ └── transformer_lm │ │ │ │ ├── transformer_lm_big.yaml │ │ │ │ ├── transformer_lm_gbw.yaml │ │ │ │ ├── transformer_lm_gpt.yaml │ │ │ │ ├── transformer_lm_baevski_gbw.yaml │ │ │ │ ├── transformer_lm_gpt2_big.yaml │ │ │ │ ├── transformer_lm_gpt2_medium.yaml │ │ │ │ ├── transformer_lm_gpt2_small.yaml │ │ │ │ ├── transformer_lm_wiki103.yaml │ │ │ │ └── transformer_lm_baevski_wiki103.yaml │ │ ├── __init__.py │ │ └── config.yaml │ ├── model_parallel │ │ ├── models │ │ │ ├── roberta │ │ │ │ └── __init__.py │ │ │ ├── pipeline_parallel_transformer │ │ │ │ └── __init__.py │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── criterions │ │ │ └── __init__.py │ │ └── modules │ │ │ └── __init__.py │ ├── benchmark │ │ └── __init__.py │ ├── dataclass │ │ └── __init__.py │ ├── tokenizer.py │ ├── clib │ │ ├── libnat_cuda │ │ │ └── edit_dist.h │ │ └── libbleu │ │ │ └── module.cpp │ ├── distributed │ │ └── __init__.py │ ├── scoring │ │ └── chrf.py │ ├── criterions │ │ └── __init__.py │ ├── optim │ │ └── lr_scheduler │ │ │ └── __init__.py │ └── pdb.py ├── examples │ ├── .gitignore │ ├── latent_depth │ │ └── latent_depth_src │ │ │ ├── loss │ │ │ └── __init__.py │ │ │ ├── models │ │ │ └── __init__.py │ │ │ ├── modules │ │ │ └── __init__.py │ │ │ └── __init__.py │ ├── linformer │ │ ├── linformer_src │ │ │ ├── models │ │ │ │ └── __init__.py │ │ │ ├── modules │ │ │ │ └── __init__.py │ │ │ └── __init__.py │ │ └── README.md │ ├── multilingual │ │ ├── data_scripts │ │ │ ├── requirement.txt │ │ │ ├── utils │ │ │ │ └── strip_sgm.sh │ │ │ ├── README.md │ │ │ ├── preprocess_ML50_v1.sh │ │ │ ├── download_ML50_v1.sh │ │ │ └── download_iitb.sh │ │ ├── ML50_langs.txt │ │ └── multilingual_fairseq_gen.sh │ ├── adaptive_span │ │ ├── truncated_bptt_lm_task.py │ │ └── __init__.py │ ├── speech_recognition │ │ ├── __init__.py │ │ ├── tasks │ │ │ └── __init__.py │ │ ├── models │ │ │ └── __init__.py │ │ ├── hydra │ │ │ └── conf │ │ │ │ ├── infer.yaml │ │ │ │ └── hydra │ │ │ │ └── sweeper │ │ │ │ └── ax.yaml │ │ └── criterions │ │ │ └── __init__.py │ ├── rxf │ │ ├── __init__.py │ │ └── rxf_src │ │ │ └── __init__.py │ ├── noisychannel │ │ └── __init__.py │ ├── simultaneous_translation │ │ ├── __init__.py │ │ ├── README.md │ │ ├── models │ │ │ └── __init__.py │ │ ├── utils │ │ │ └── __init__.py │ │ └── modules │ │ │ └── __init__.py │ ├── roberta │ │ ├── commonsense_qa │ │ │ ├── __init__.py │ │ │ └── download_cqa_data.sh │ │ └── wsc │ │ │ └── __init__.py │ ├── pointer_generator │ │ └── pointer_generator_src │ │ │ └── __init__.py │ ├── translation_moe │ │ └── translation_moe_src │ │ │ ├── __init__.py │ │ │ └── logsumexp_moe.py │ ├── truncated_bptt │ │ └── __init__.py │ ├── __init__.py │ ├── m2m_100 │ │ └── tokenizers │ │ │ ├── thirdparty │ │ │ └── .gitignore │ │ │ ├── tokenize_zh.py │ │ │ ├── tokenize_thai.py │ │ │ ├── seg_ja.sh │ │ │ ├── seg_ko.sh │ │ │ ├── README.md │ │ │ ├── tokenize_indic.py │ │ │ └── tokenizer_ar.sh │ ├── laser │ │ └── laser_src │ │ │ └── __init__.py │ ├── fast_noisy_channel │ │ └── __init__.py │ ├── constrained_decoding │ │ ├── normalize.py │ │ └── tok.py │ ├── megatron_11b │ │ └── detok.py │ ├── unsupervised_quality_estimation │ │ └── repeat_lines.py │ ├── language_model │ │ └── prepare-wikitext-103.sh │ ├── backtranslation │ │ └── sacrebleu.sh │ ├── quant_noise │ │ └── transformer_quantization_config.yaml │ └── wav2vec │ │ └── config │ │ ├── finetuning │ │ ├── base_960h.yaml │ │ ├── vox_960h.yaml │ │ ├── base_100h.yaml │ │ └── vox_100h.yaml │ │ └── pretraining │ │ └── wav2vec2_base_librispeech.yaml ├── docs │ ├── docutils.conf │ ├── requirements.txt │ ├── fairseq.gif │ ├── fairseq_logo.png │ ├── _static │ │ └── theme_overrides.css │ ├── modules.rst │ ├── Makefile │ ├── criterions.rst │ ├── make.bat │ ├── optim.rst │ ├── lr_scheduler.rst │ └── index.rst ├── pyproject.toml ├── .gitmodules ├── .github │ ├── ISSUE_TEMPLATE.md │ ├── ISSUE_TEMPLATE │ │ ├── documentation.md │ │ ├── feature_request.md │ │ ├── how-to-question.md │ │ └── bug_report.md │ ├── PULL_REQUEST_TEMPLATE.md │ └── workflows │ │ └── build_wheels.yml ├── train.py ├── data_splitter.py ├── LICENSE └── CONTRIBUTING.md ├── GIN ├── examples │ ├── lsc │ │ ├── mag240m │ │ │ └── root.py │ │ ├── wikikg90m │ │ │ └── dgl-ke-ogb-lsc │ │ │ │ ├── conda │ │ │ │ └── README.md │ │ │ │ ├── docker │ │ │ │ └── README.md │ │ │ │ ├── examples │ │ │ │ ├── wn18 │ │ │ │ │ ├── rel.list │ │ │ │ │ ├── raw_rel.list │ │ │ │ │ ├── head.list │ │ │ │ │ ├── tail.list │ │ │ │ │ ├── raw_head.list │ │ │ │ │ └── raw_tail.list │ │ │ │ └── wn18_weighted │ │ │ │ │ └── README.md │ │ │ │ ├── python │ │ │ │ ├── dglke │ │ │ │ │ ├── VERSION.txt │ │ │ │ │ ├── models │ │ │ │ │ │ ├── mxnet │ │ │ │ │ │ │ └── __init__.py │ │ │ │ │ │ ├── pytorch │ │ │ │ │ │ │ └── __init__.py │ │ │ │ │ │ └── __init__.py │ │ │ │ │ ├── dataloader │ │ │ │ │ │ └── __init__.py │ │ │ │ │ └── __init__.py │ │ │ │ └── setup.py │ │ │ │ ├── NOTICE │ │ │ │ ├── img │ │ │ │ ├── logo.png │ │ │ │ ├── vs-gv-wn18.png │ │ │ │ ├── vs-pbg-fb.png │ │ │ │ ├── dgl_ke_arch.PNG │ │ │ │ └── vs-gv-fb15k.png │ │ │ │ ├── CONTRIBUTORS.md │ │ │ │ ├── docs │ │ │ │ ├── images │ │ │ │ │ ├── metis.png │ │ │ │ │ ├── rescal.png │ │ │ │ │ ├── rescal2.png │ │ │ │ │ ├── rescal3.png │ │ │ │ │ ├── rotate.png │ │ │ │ │ ├── transe.png │ │ │ │ │ ├── transr.png │ │ │ │ │ ├── distmult.png │ │ │ │ │ ├── multi-gpu.png │ │ │ │ │ ├── vs-pbg-fb.png │ │ │ │ │ ├── dgl_ke_arch.png │ │ │ │ │ ├── dist_train.png │ │ │ │ │ ├── kg_example.png │ │ │ │ │ ├── kge_scores.png │ │ │ │ │ ├── multi-core.png │ │ │ │ │ └── vs-gv-fb15k.png │ │ │ │ ├── .gitignore │ │ │ │ ├── source │ │ │ │ │ └── profile.rst │ │ │ │ ├── Makefile │ │ │ │ └── make.bat │ │ │ │ ├── CODE_OF_CONDUCT.md │ │ │ │ └── README.md │ │ └── pcqm4m │ │ │ └── deeper_gcn.py │ ├── linkproppred │ │ ├── biokg │ │ │ ├── README.md │ │ │ └── examples.sh │ │ ├── wikikg2 │ │ │ └── README.md │ │ ├── ddi │ │ │ └── README.md │ │ ├── ppa │ │ │ └── README.md │ │ └── collab │ │ │ └── README.md │ ├── nodeproppred │ │ ├── proteins │ │ │ └── README.md │ │ └── arxiv │ │ │ └── README.md │ └── README.md ├── ogb │ ├── __init__.py │ ├── io │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ └── checkpoint_utils.py │ ├── linkproppred │ │ ├── __init__.py │ │ └── master.csv │ ├── nodeproppred │ │ ├── __init__.py │ │ └── master.csv │ ├── graphproppred │ │ └── __init__.py │ ├── lsc │ │ └── __init__.py │ └── version.py ├── MANIFEST.in ├── .gitignore ├── scripts │ └── train │ │ └── Dockerfile └── LICENSE ├── paper.pdf ├── Standard Transformer ├── data │ └── input0 │ │ ├── test.bin │ │ ├── test.idx │ │ ├── valid.bin │ │ └── valid.idx ├── pylib │ └── ext │ │ └── __init__.py ├── average_transformer.py └── reproduce.sh └── DataPreprocessing └── obtain_data.py /Two-branch Transformer/molecule/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /GIN/examples/lsc/mag240m/root.py: -------------------------------------------------------------------------------- 1 | ROOT = '.' 2 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq_cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/tests/gpu/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /GIN/ogb/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/logging/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/tests/distributed/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/conda/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docker/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/molecule/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/models/gnn_kdd/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/version.txt: -------------------------------------------------------------------------------- 1 | 1.0.0a0 2 | -------------------------------------------------------------------------------- /GIN/ogb/io/__init__.py: -------------------------------------------------------------------------------- 1 | from .save_dataset import DatasetSaver -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/quantization/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/tests/speech_recognition/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/.gitignore: -------------------------------------------------------------------------------- 1 | !*/*.sh 2 | !*/*.md 3 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/examples/wn18/rel.list: -------------------------------------------------------------------------------- 1 | 0 2 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/docutils.conf: -------------------------------------------------------------------------------- 1 | [writers] 2 | option-limit=0 3 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/latent_depth/latent_depth_src/loss/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/linformer/linformer_src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/linformer/linformer_src/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/VERSION.txt: -------------------------------------------------------------------------------- 1 | 0.1.0.dev 2 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx<2.0 2 | sphinx-argparse 3 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/latent_depth/latent_depth_src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/latent_depth/latent_depth_src/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/multilingual/data_scripts/requirement.txt: -------------------------------------------------------------------------------- 1 | wget 2 | pandas -------------------------------------------------------------------------------- /paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/paper.pdf -------------------------------------------------------------------------------- /GIN/ogb/utils/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | from .mol import smiles2graph 3 | except ImportError: 4 | pass -------------------------------------------------------------------------------- /Two-branch Transformer/molecule/dict_label.txt: -------------------------------------------------------------------------------- 1 | 0 1076 2 | 1 95 3 | madeupword0000 0 4 | madeupword0001 0 5 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/examples/wn18/raw_rel.list: -------------------------------------------------------------------------------- 1 | _derivationally_related_form 2 | _hyponym 3 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/adaptive_span/truncated_bptt_lm_task.py: -------------------------------------------------------------------------------- 1 | ../truncated_bptt/truncated_bptt_lm_task.py -------------------------------------------------------------------------------- /Two-branch Transformer/examples/speech_recognition/__init__.py: -------------------------------------------------------------------------------- 1 | from . import criterions, models, tasks # noqa 2 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /GIN/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include ogb/graphproppred/master.csv 2 | include ogb/nodeproppred/master.csv 3 | include ogb/linkproppred/master.csv -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/examples/wn18/head.list: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 2 4 | 3 5 | 4 6 | 5 7 | 6 8 | 7 9 | 8 10 | 9 11 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/examples/wn18/tail.list: -------------------------------------------------------------------------------- 1 | 11 2 | 12 3 | 13 4 | 14 5 | 15 6 | 16 7 | 17 8 | 18 9 | 19 10 | 20 11 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/fairseq.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/docs/fairseq.gif -------------------------------------------------------------------------------- /Two-branch Transformer/examples/multilingual/data_scripts/utils/strip_sgm.sh: -------------------------------------------------------------------------------- 1 | grep "seg id" | sed 's///g' | sed 's/<\/seg>//g' 2 | -------------------------------------------------------------------------------- /Standard Transformer/data/input0/test.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Standard Transformer/data/input0/test.bin -------------------------------------------------------------------------------- /Standard Transformer/data/input0/test.idx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Standard Transformer/data/input0/test.idx -------------------------------------------------------------------------------- /Standard Transformer/data/input0/valid.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Standard Transformer/data/input0/valid.bin -------------------------------------------------------------------------------- /Standard Transformer/data/input0/valid.idx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Standard Transformer/data/input0/valid.idx -------------------------------------------------------------------------------- /Two-branch Transformer/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel", "cython"] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/fairseq_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/docs/fairseq_logo.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/logo.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors of DGL-KE 2 | 3 | * [Zhichen Jiang](https://github.com/sherry-1001): Add a profiler to MXNet KGE models. 4 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/wav2vec/vq_wav2vec_gumbel.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation: gelu 3 | vq_type: gumbel 4 | vq_depth: 2 5 | combine_groups: true 6 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/vs-gv-wn18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/vs-gv-wn18.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/vs-pbg-fb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/vs-pbg-fb.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/dgl_ke_arch.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/dgl_ke_arch.PNG -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/vs-gv-fb15k.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/img/vs-gv-fb15k.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/metis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/metis.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/rescal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/rescal.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/rescal2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/rescal2.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/rescal3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/rescal3.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/rotate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/rotate.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/transe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/transe.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/transr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/transr.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/distmult.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/distmult.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/multi-gpu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/multi-gpu.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/vs-pbg-fb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/vs-pbg-fb.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/dgl_ke_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/dgl_ke_arch.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/dist_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/dist_train.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/kg_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/kg_example.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/kge_scores.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/kge_scores.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/multi-core.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/multi-core.png -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/vs-gv-fb15k.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/images/vs-gv-fb15k.png -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/noising.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/noising.cpython-36.pyc -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/examples/wn18/raw_head.list: -------------------------------------------------------------------------------- 1 | 02553196 2 | 13068917 3 | 00083809 4 | 02757462 5 | 02321009 6 | 03976960 7 | 08847694 8 | 02537319 9 | 12927354 10 | 01685439 11 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/examples/wn18/raw_tail.list: -------------------------------------------------------------------------------- 1 | 05940414 2 | 01999186 3 | 05494365 4 | 01490112 5 | 08221897 6 | 00719705 7 | 07747951 8 | 07362386 9 | 09440400 10 | 02482139 11 | -------------------------------------------------------------------------------- /Two-branch Transformer/.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "fairseq/model_parallel/megatron"] 2 | path = fairseq/model_parallel/megatron 3 | url = https://github.com/ngoyal2707/Megatron-LM 4 | branch = fairseq 5 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/iterators.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/iterators.cpython-36.pyc -------------------------------------------------------------------------------- /GIN/examples/lsc/pcqm4m/deeper_gcn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class DeeperGCN(nn.Module): 6 | def __init__(self, num_tasks=1, num_layers=3, *, args=None): 7 | super().__init__() -------------------------------------------------------------------------------- /Standard Transformer/pylib/ext/__init__.py: -------------------------------------------------------------------------------- 1 | from .kdd_roberta import * 2 | from .kdd_roberta_mb import * 3 | from .sentence_prediction_custom_criterion import * 4 | from .sentence_prediction_custom_task import * 5 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/data_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/data_utils.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/dictionary.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/dictionary.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/id_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/id_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/list_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/list_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/pad_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/pad_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/plasma_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/plasma_utils.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/roll_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/roll_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/sort_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/sort_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/concat_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/concat_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/fasta_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/fasta_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/numel_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/numel_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/audio/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/bytes.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/bytes.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/colorize_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/colorize_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/denoising_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/denoising_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/fairseq_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/fairseq_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/indexed_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/indexed_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/lru_cache_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/lru_cache_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/prepend_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/prepend_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/raw_label_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/raw_label_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/replace_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/replace_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/shorten_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/shorten_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/subsample_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/subsample_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/__pycache__/audio_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/audio/__pycache__/audio_utils.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/byte_bpe.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/byte_bpe.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/fastbpe.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/fastbpe.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/gpt2_bpe.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/gpt2_bpe.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/legacy/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/legacy/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/molecule/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/molecule/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/molecule/__pycache__/molecule.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/molecule/__pycache__/molecule.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/add_target_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/add_target_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/mask_tokens_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/mask_tokens_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/monolingual_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/monolingual_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/num_samples_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/num_samples_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/resampling_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/resampling_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/strip_token_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/strip_token_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/token_block_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/token_block_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/byte_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/byte_utils.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/characters.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/characters.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/append_token_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/append_token_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/base_wrapper_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/base_wrapper_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/language_pair_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/language_pair_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/offset_tokens_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/offset_tokens_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/prepend_token_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/prepend_token_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/transform_eos_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/transform_eos_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/data_utils_fast.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/data_utils_fast.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/hf_bert_bpe.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/hf_bert_bpe.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/hf_byte_bpe.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/hf_byte_bpe.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/multilingual/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/multilingual/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/backtranslation_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/backtranslation_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/concat_sentences_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/concat_sentences_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/round_robin_zip_datasets.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/round_robin_zip_datasets.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/__pycache__/raw_audio_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/audio/__pycache__/raw_audio_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/gpt2_bpe_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/gpt2_bpe_utils.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/moses_tokenizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/moses_tokenizer.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/nltk_tokenizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/nltk_tokenizer.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/space_tokenizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/space_tokenizer.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/subword_nmt_bpe.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/subword_nmt_bpe.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/legacy/__pycache__/masked_lm_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/legacy/__pycache__/masked_lm_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/molecule/__pycache__/indexed_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/molecule/__pycache__/indexed_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/bucket_pad_length_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/bucket_pad_length_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/lm_context_window_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/lm_context_window_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/nested_dictionary_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/nested_dictionary_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__pycache__/sentencepiece_bpe.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/encoders/__pycache__/sentencepiece_bpe.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/legacy/__pycache__/block_pair_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/legacy/__pycache__/block_pair_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/legacy/__pycache__/masked_lm_dictionary.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/legacy/__pycache__/masked_lm_dictionary.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/token_block_utils_fast.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/wav2vec2/wav2vec2_base.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | quantize_targets: true 4 | final_dim: 256 5 | encoder_layerdrop: 0.05 6 | dropout_input: 0.1 7 | dropout_features: 0.1 8 | feature_grad_mult: 0.1 9 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/multi_corpus_sampled_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/multi_corpus_sampled_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/__pycache__/speech_to_text_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/audio/__pycache__/speech_to_text_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/multilingual/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/multilingual/__pycache__/sampling_method.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/multilingual/__pycache__/sampling_method.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/__pycache__/transform_eos_lang_pair_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/__pycache__/transform_eos_lang_pair_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/molecule/__pycache__/graphseq_pair_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/molecule/__pycache__/graphseq_pair_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/multilingual/__pycache__/multilingual_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/multilingual/__pycache__/multilingual_utils.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/feature_transforms/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/audio/feature_transforms/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/multilingual/__pycache__/sampled_multi_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/multilingual/__pycache__/sampled_multi_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/feature_transforms/__pycache__/global_cmvn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/audio/feature_transforms/__pycache__/global_cmvn.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/feature_transforms/__pycache__/specaugment.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/audio/feature_transforms/__pycache__/specaugment.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/feature_transforms/__pycache__/utterance_cmvn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/audio/feature_transforms/__pycache__/utterance_cmvn.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/multilingual/__pycache__/multilingual_data_manager.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/multilingual/__pycache__/multilingual_data_manager.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/multilingual/__pycache__/sampled_multi_epoch_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TransfromerMeetsGraph/GNNLearner/HEAD/Two-branch Transformer/fairseq/data/multilingual/__pycache__/sampled_multi_epoch_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /Two-branch Transformer/examples/rxf/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import rxf_src # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/_static/theme_overrides.css: -------------------------------------------------------------------------------- 1 | .wy-table-responsive table td kbd { 2 | white-space: nowrap; 3 | } 4 | .wy-table-responsive table td { 5 | white-space: normal !important; 6 | } 7 | .wy-table-responsive { 8 | overflow: visible !important; 9 | } 10 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/noisychannel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .rerank_options import * # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/simultaneous_translation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import models # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/model_parallel/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## 👉 [Please follow one of these issue templates](https://github.com/pytorch/fairseq/issues/new/choose) 👈 2 | 3 | Note: to keep the backlog clean and actionable, issues may be immediately closed if they do not follow one of the above issue templates. 4 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/roberta/commonsense_qa/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import commonsense_qa_task # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/model_parallel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import criterions, models, modules # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/linformer/linformer_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .models import linformer_roberta # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/quantization/scalar/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import quantize_model_ # NOQA 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/pointer_generator/pointer_generator_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import transformer_pg # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/translation_moe/translation_moe_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import translation_moe # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/lightconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .lightconv_layer import LightconvLayer # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/modules.rst: -------------------------------------------------------------------------------- 1 | Modules 2 | ======= 3 | 4 | Fairseq provides several stand-alone :class:`torch.nn.Module` classes that may 5 | be helpful when implementing a new :class:`~fairseq.models.BaseFairseqModel`. 6 | 7 | .. automodule:: fairseq.modules 8 | :members: 9 | :undoc-members: 10 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/roberta/wsc/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import wsc_criterion # noqa 7 | from . import wsc_task # noqa 8 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/truncated_bptt/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import transformer_xl_model, truncated_bptt_lm_task # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .model import * # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/models/bart/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/dynamicconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .dynamicconv_layer import DynamicconvLayer # noqa 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/quantization/pq/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .utils import SizeTracker, quantize_model_ # NOQA 7 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/rxf/rxf_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa 7 | -------------------------------------------------------------------------------- /GIN/ogb/linkproppred/__init__.py: -------------------------------------------------------------------------------- 1 | from .evaluate import Evaluator 2 | from .dataset import LinkPropPredDataset 3 | 4 | try: 5 | from .dataset_pyg import PygLinkPropPredDataset 6 | except ImportError: 7 | pass 8 | 9 | try: 10 | from .dataset_dgl import DglLinkPropPredDataset 11 | except (ImportError, OSError): 12 | pass 13 | -------------------------------------------------------------------------------- /GIN/ogb/nodeproppred/__init__.py: -------------------------------------------------------------------------------- 1 | from .evaluate import Evaluator 2 | from .dataset import NodePropPredDataset 3 | 4 | try: 5 | from .dataset_pyg import PygNodePropPredDataset 6 | except ImportError: 7 | pass 8 | 9 | try: 10 | from .dataset_dgl import DglNodePropPredDataset 11 | except (ImportError, OSError): 12 | pass 13 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | try: 7 | from fairseq.version import __version__ # noqa 8 | except ImportError: 9 | pass 10 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/m2m_100/tokenizers/thirdparty/.gitignore: -------------------------------------------------------------------------------- 1 | seg_my.py 2 | indic_nlp_library/ 3 | indic_nlp_resources/ 4 | kytea/ 5 | mecab-0.996-ko-0.9.2.tar.gz 6 | mecab-0.996-ko-0.9.2/ 7 | mosesdecoder/ 8 | wat2020.my-en.zip 9 | wat2020.my-en/ 10 | wmt16-scripts/ 11 | mecab-ko-dic-2.1.1-20180720/ 12 | mecab-ko-dic-2.1.1-20180720.tar.gz -------------------------------------------------------------------------------- /GIN/examples/linkproppred/biokg/README.md: -------------------------------------------------------------------------------- 1 | # ogbl-biokg 2 | 3 | This code includes implementation of TransE, DistMult, ComplEx and RotatE with OGB evaluator. It is based on this [repository](https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding). 4 | 5 | ## Training & Evaluation 6 | 7 | ``` 8 | # Run with default config 9 | bash examples.sh 10 | ``` -------------------------------------------------------------------------------- /Two-branch Transformer/examples/speech_recognition/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | task_name = file[: file.find(".py")] 8 | importlib.import_module("examples.speech_recognition.tasks." + task_name) 9 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | # import models/tasks to register them 7 | from . import dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa 8 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/models/wav2vec/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .wav2vec import * # noqa 7 | from .wav2vec2 import * # noqa 8 | from .wav2vec2_asr import * # noqa 9 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/speech_recognition/models/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | for file in os.listdir(os.path.dirname(__file__)): 6 | if file.endswith(".py") and not file.startswith("_"): 7 | model_name = file[: file.find(".py")] 8 | importlib.import_module("examples.speech_recognition.models." + model_name) 9 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/laser/laser_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .laser_task import * # noqa 7 | from .laser_lstm import * # noqa 8 | from .laser_transformer import * # noqa 9 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/models/speech_to_text/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .berard import * # noqa 7 | from .convtransformer import * # noqa 8 | from .s2t_transformer import * # noqa 9 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/quantization/pq/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .qconv import PQConv2d # NOQA 7 | from .qemb import PQEmbedding # NOQA 8 | from .qlinear import PQLinear # NOQA 9 | -------------------------------------------------------------------------------- /GIN/ogb/graphproppred/__init__.py: -------------------------------------------------------------------------------- 1 | from .evaluate import Evaluator 2 | from .dataset import GraphPropPredDataset 3 | 4 | try: 5 | from .dataset_pyg import PygGraphPropPredDataset 6 | except ImportError: 7 | pass 8 | 9 | try: 10 | from .dataset_dgl import DglGraphPropPredDataset 11 | from .dataset_dgl import collate_dgl 12 | except (ImportError, OSError): 13 | pass 14 | -------------------------------------------------------------------------------- /Two-branch Transformer/.github/ISSUE_TEMPLATE/documentation.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 📚 Documentation/Typos 3 | about: Report an issue related to documentation or a typo 4 | labels: 'documentation, needs triage' 5 | --- 6 | 7 | ## 📚 Documentation 8 | 9 | For typos and doc fixes, please go ahead and: 10 | 11 | 1. Create an issue. 12 | 2. Fix the typo. 13 | 3. Submit a PR. 14 | 15 | Thanks! 16 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/fast_noisy_channel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import noisy_channel_translation # noqa 7 | from . import noisy_channel_sequence_generator # noqa 8 | from . import noisy_channel_beam_search # noqa 9 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/dataclass/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .configs import FairseqDataclass 7 | from .constants import ChoiceEnum 8 | 9 | 10 | __all__ = [ 11 | "FairseqDataclass", 12 | "ChoiceEnum", 13 | ] 14 | -------------------------------------------------------------------------------- /GIN/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Distribution / packaging 7 | .Python 8 | build/ 9 | develop-eggs/ 10 | dist/ 11 | downloads/ 12 | eggs/ 13 | .eggs/ 14 | lib/ 15 | lib64/ 16 | parts/ 17 | sdist/ 18 | var/ 19 | wheels/ 20 | pip-wheel-metadata/ 21 | share/python-wheels/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/simultaneous_translation/README.md: -------------------------------------------------------------------------------- 1 | # Simultaneous Translation 2 | Examples of simultaneous translation in fairseq 3 | - [English-to-Japanese text-to-text wait-k model](docs/enja-waitk.md) 4 | - [English-to-Germen text-to-text monotonic multihead attention model](docs/ende-mma.md) 5 | - [English-to-Germen speech-to-text simultaneous translation model](../speech_to_text/docs/simulst_mustc_example.md) 6 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/config.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | hydra: 4 | run: 5 | dir: . 6 | 7 | defaults: 8 | - task: null 9 | - model: null 10 | - criterion: cross_entropy 11 | - optimizer: null 12 | - lr_scheduler: fixed 13 | - bpe: null 14 | - tokenizer: null 15 | - scoring: null 16 | - generation: null 17 | - common_eval: null 18 | - eval_lm: null 19 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/m2m_100/tokenizers/tokenize_zh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | 8 | import fileinput 9 | 10 | import sacrebleu 11 | 12 | 13 | for line in fileinput.input(): 14 | print(sacrebleu.tokenize_zh(line)) 15 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/m2m_100/tokenizers/tokenize_thai.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import sys 8 | 9 | from pythainlp import word_tokenize 10 | 11 | 12 | for line in sys.stdin: 13 | print(" ".join(word_tokenize(line.strip()))) 14 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/quantization/scalar/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .qact import ActivationQuantizer # NOQA 7 | from .qconv import IntConv2d # NOQA 8 | from .qemb import IntEmbedding # NOQA 9 | from .qlinear import IntLinear # NOQA 10 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | 9 | SPACE_NORMALIZER = re.compile(r"\s+") 10 | 11 | 12 | def tokenize_line(line): 13 | line = SPACE_NORMALIZER.sub(" ", line) 14 | line = line.strip() 15 | return line.split() 16 | -------------------------------------------------------------------------------- /Two-branch Transformer/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead. 8 | """ 9 | 10 | from fairseq_cli.train import cli_main 11 | 12 | 13 | if __name__ == "__main__": 14 | cli_main() 15 | -------------------------------------------------------------------------------- /GIN/ogb/lsc/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | from .pcqm4m import PCQM4MDataset, PCQM4MEvaluator 3 | except ImportError: 4 | pass 5 | 6 | try: 7 | from .pcqm4m_pyg import PygPCQM4MDataset 8 | except ImportError: 9 | pass 10 | 11 | try: 12 | from .pcqm4m_dgl import DglPCQM4MDataset 13 | except (ImportError, OSError): 14 | pass 15 | 16 | from .mag240m import MAG240MDataset, MAG240MEvaluator 17 | from .wikikg90m import WikiKG90MDataset, WikiKG90MEvaluator 18 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/latent_depth/latent_depth_src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import multilingual_translation_latent_depth # noqa 7 | from .loss import latent_depth # noqa 8 | from .models import latent_multilingual_transformer # noqa 9 | from .modules import latent_layers # noqa 10 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | from .enc_dec import * # noqa 9 | from .model_camembert import * # noqa 10 | from .model_gottbert import * # noqa 11 | from .model_xlmr import * # noqa 12 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/m2m_100/tokenizers/seg_ja.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | SCRIPT=`realpath $0` 7 | KYTEA=`dirname $SCRIPT`/thirdparty/kytea 8 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$KYTEA/lib:/usr/local/lib 9 | export PATH=$PATH:"$KYTEA/bin" 10 | 11 | cat - | tr -d "[:blank:]" | kytea -notags 12 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/m2m_100/tokenizers/seg_ko.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | SCRIPT=`realpath $0` 7 | MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2 8 | 9 | export PATH=$PATH:"$MECAB/bin":"$MECAB/lib" 10 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib" 11 | 12 | cat - | mecab -O wakati 13 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | *.a 27 | *.lib 28 | 29 | # Executables 30 | *.exe 31 | *.out 32 | *.app 33 | 34 | *.DS_Store 35 | *.un~ 36 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/num_samples_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqDataset 7 | 8 | 9 | class NumSamplesDataset(FairseqDataset): 10 | def __getitem__(self, index): 11 | return 1 12 | 13 | def __len__(self): 14 | return 0 15 | 16 | def collater(self, samples): 17 | return sum(samples) 18 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/wav2vec2/wav2vec2_large.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | quantize_targets: true 4 | extractor_mode: layer_norm 5 | layer_norm_first: true 6 | final_dim: 768 7 | latent_temp: [2.0,0.1,0.999995] 8 | encoder_layerdrop: 0.0 9 | dropout_input: 0.0 10 | dropout_features: 0.0 11 | dropout: 0.0 12 | attention_dropout: 0.0 13 | conv_bias: true 14 | 15 | encoder_layers: 24 16 | encoder_embed_dim: 1024 17 | encoder_ffn_embed_dim: 4096 18 | encoder_attention_heads: 16 19 | 20 | feature_grad_mult: 1.0 21 | -------------------------------------------------------------------------------- /Two-branch Transformer/scripts/spm_train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from __future__ import absolute_import, division, print_function, unicode_literals 9 | 10 | import sys 11 | 12 | import sentencepiece as spm 13 | 14 | 15 | if __name__ == "__main__": 16 | spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:])) 17 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/id_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class IdDataset(FairseqDataset): 12 | def __getitem__(self, index): 13 | return index 14 | 15 | def __len__(self): 16 | return 0 17 | 18 | def collater(self, samples): 19 | return torch.tensor(samples) 20 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/offset_tokens_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class OffsetTokensDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, offset): 11 | super().__init__(dataset) 12 | self.offset = offset 13 | 14 | def __getitem__(self, idx): 15 | return self.dataset[idx] + self.offset 16 | -------------------------------------------------------------------------------- /Standard Transformer/average_transformer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | predictions = [ 4 | "l1_loss_reg_comb_seed4444.checkpoint119.pt.npz", 5 | "l1_loss_reg_comb_seed88888888.checkpoint119.pt.npz", 6 | "l1_loss_reg_comb_seed22.checkpoint119.pt.npz", 7 | "l1_loss_reg_comb_seed666666.checkpoint119.pt.npz" 8 | ] 9 | 10 | arr = 0 11 | 12 | for p in predictions: 13 | X = np.load("predictions/" + p) 14 | arr += X['y_pred'] 15 | 16 | arr /= len(predictions) 17 | 18 | np.savez_compressed("predictions/y_pred_pcqm4m_comb_test.npz", y_pred = arr.astype(np.float32)) 19 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/grad_multiply.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class GradMultiply(torch.autograd.Function): 10 | @staticmethod 11 | def forward(ctx, x, scale): 12 | ctx.scale = scale 13 | res = x.new(x) 14 | return res 15 | 16 | @staticmethod 17 | def backward(ctx, grad): 18 | return grad * ctx.scale, None 19 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .block_pair_dataset import BlockPairDataset 7 | from .masked_lm_dataset import MaskedLMDataset 8 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary 9 | 10 | 11 | __all__ = [ 12 | "BertDictionary", 13 | "BlockPairDataset", 14 | "MaskedLMDataset", 15 | "MaskedLMDictionary", 16 | ] 17 | -------------------------------------------------------------------------------- /Two-branch Transformer/scripts/compound_split_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ]; then 4 | echo "usage: $0 GENERATE_PY_OUTPUT" 5 | exit 1 6 | fi 7 | 8 | GEN=$1 9 | 10 | SYS=$GEN.sys 11 | REF=$GEN.ref 12 | 13 | if [ $(tail -n 1 $GEN | grep BLEU | wc -l) -ne 1 ]; then 14 | echo "not done generating" 15 | exit 16 | fi 17 | 18 | grep ^H $GEN | awk -F '\t' '{print $NF}' | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $SYS 19 | grep ^T $GEN | cut -f2- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $REF 20 | fairseq-score --sys $SYS --ref $REF 21 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/models/nat/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | from .fairseq_nat_model import * 8 | from .nonautoregressive_transformer import * 9 | from .nat_crf_transformer import * 10 | from .iterative_nonautoregressive_transformer import * 11 | from .cmlm_transformer import * 12 | from .levenshtein_transformer import * 13 | from .insertion_transformer import * 14 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/multilingual/ML50_langs.txt: -------------------------------------------------------------------------------- 1 | ar_AR 2 | cs_CZ 3 | de_DE 4 | en_XX 5 | es_XX 6 | et_EE 7 | fi_FI 8 | fr_XX 9 | gu_IN 10 | hi_IN 11 | it_IT 12 | ja_XX 13 | kk_KZ 14 | ko_KR 15 | lt_LT 16 | lv_LV 17 | my_MM 18 | ne_NP 19 | nl_XX 20 | ro_RO 21 | ru_RU 22 | si_LK 23 | tr_TR 24 | vi_VN 25 | zh_CN 26 | af_ZA 27 | az_AZ 28 | bn_IN 29 | fa_IR 30 | he_IL 31 | hr_HR 32 | id_ID 33 | ka_GE 34 | km_KH 35 | mk_MK 36 | ml_IN 37 | mn_MN 38 | mr_IN 39 | pl_PL 40 | ps_AF 41 | pt_XX 42 | sv_SE 43 | sw_KE 44 | ta_IN 45 | te_IN 46 | th_TH 47 | tl_XX 48 | uk_UA 49 | ur_PK 50 | xh_ZA 51 | gl_ES 52 | sl_SI -------------------------------------------------------------------------------- /Two-branch Transformer/examples/simultaneous_translation/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | for file in os.listdir(os.path.dirname(__file__)): 11 | if file.endswith(".py") and not file.startswith("_"): 12 | model_name = file[: file.find(".py")] 13 | importlib.import_module( 14 | "examples.simultaneous_translation.models." + model_name 15 | ) 16 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/roll_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class RollDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, shifts): 13 | super().__init__(dataset) 14 | self.shifts = shifts 15 | 16 | def __getitem__(self, index): 17 | item = self.dataset[index] 18 | return torch.roll(item, self.shifts) 19 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/speech_recognition/hydra/conf/infer.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | defaults: 4 | - task: null 5 | - model: null 6 | 7 | hydra: 8 | run: 9 | dir: ${common_eval.results_path}/${dataset.gen_subset} 10 | sweep: 11 | dir: ${common_eval.results_path} 12 | subdir: ${dataset.gen_subset} 13 | common_eval: 14 | results_path: ${decoding.exp_dir}/decode/${decoding.decoder.name} 15 | path: ${decoding.exp_dir}/checkpoint_best.pt 16 | post_process: letter 17 | generation: 18 | nbest: 1 19 | beam: 500 20 | dataset: 21 | max_tokens: 1000000 22 | gen_subset: test 23 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/model_parallel/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the criterions/ directory 11 | for file in os.listdir(os.path.dirname(__file__)): 12 | if file.endswith(".py") and not file.startswith("_"): 13 | module = file[: file.find(".py")] 14 | importlib.import_module("fairseq.model_parallel.criterions." + module) 15 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/simultaneous_translation/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the criterions/ directory 11 | for file in os.listdir(os.path.dirname(__file__)): 12 | if file.endswith(".py") and not file.startswith("_"): 13 | module = file[: file.find(".py")] 14 | importlib.import_module("examples.simultaneous_translation.utils." + module) 15 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/speech_recognition/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | # ASG loss requires flashlight bindings 6 | files_to_skip = set() 7 | try: 8 | import flashlight.lib.sequence.criterion 9 | except ImportError: 10 | files_to_skip.add("ASG_loss.py") 11 | 12 | for file in os.listdir(os.path.dirname(__file__)): 13 | if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip: 14 | criterion_name = file[: file.find(".py")] 15 | importlib.import_module( 16 | "examples.speech_recognition.criterions." + criterion_name 17 | ) 18 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/source/profile.rst: -------------------------------------------------------------------------------- 1 | Profile DGL-KE 2 | -------------- 3 | 4 | This document is mainly for developing the DGL-KE models and accelerating their training. 5 | 6 | To analyze MXNet version of KE models, please enable `MXNet_PROFILER` environment variable when running the training job:: 7 | 8 | MXNET_PROFILER=1 dglke_train --model_name TransE_l2 --dataset FB15k --batch_size 1000 --neg_sample_size 200 --hidden_dim 400 \ 9 | --gamma 19.9 --lr 0.25 --max_step 3000 --log_interval 100 --batch_size_eval 16 --test -adv \ 10 | --regularization_coef 1.00E-09 --num_thread 1 --num_proc 8 11 | 12 | 13 | -------------------------------------------------------------------------------- /Two-branch Transformer/scripts/sacrebleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 4 ]; then 4 | echo "usage: $0 TESTSET SRCLANG TGTLANG GEN" 5 | exit 1 6 | fi 7 | 8 | TESTSET=$1 9 | SRCLANG=$2 10 | TGTLANG=$3 11 | 12 | GEN=$4 13 | 14 | if ! command -v sacremoses &> /dev/null 15 | then 16 | echo "sacremoses could not be found, please install with: pip install sacremoses" 17 | exit 18 | fi 19 | 20 | grep ^H $GEN \ 21 | | sed 's/^H\-//' \ 22 | | sort -n -k 1 \ 23 | | cut -f 3 \ 24 | | sacremoses detokenize \ 25 | > $GEN.sorted.detok 26 | 27 | sacrebleu --test-set $TESTSET --language-pair "${SRCLANG}-${TGTLANG}" < $GEN.sorted.detok 28 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/model_parallel/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | from .multihead_attention import ModelParallelMultiheadAttention 8 | from .transformer_layer import ( 9 | ModelParallelTransformerEncoderLayer, 10 | ModelParallelTransformerDecoderLayer, 11 | ) 12 | 13 | __all__ = [ 14 | "ModelParallelMultiheadAttention", 15 | "ModelParallelTransformerEncoderLayer", 16 | "ModelParallelTransformerDecoderLayer", 17 | ] 18 | -------------------------------------------------------------------------------- /Standard Transformer/reproduce.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=0 2 | 3 | export PYTHONPATH=/tmp/standard_transformer/pylib 4 | 5 | MODEL=roberta 6 | DATA=/tmp/standard_transformer/data 7 | MODEL_FOLDER=/tmp/standard_transformer/checkpoints 8 | 9 | mkdir -p predictions 10 | 11 | TF_models=( 12 | "l1_loss_reg_comb_seed22.checkpoint119.pt" 13 | "l1_loss_reg_comb_seed4444.checkpoint119.pt" 14 | "l1_loss_reg_comb_seed666666.checkpoint119.pt" 15 | "l1_loss_reg_comb_seed88888888.checkpoint119.pt" 16 | ) 17 | 18 | for modelname in ${TF_models[@]}; do 19 | python infer_test.py $DATA $MODEL_FOLDER $modelname --bsz 64 20 | done 21 | 22 | python average_transformer.py 23 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/same_pad.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from torch import nn 8 | 9 | 10 | class SamePad(nn.Module): 11 | def __init__(self, kernel_size, causal=False): 12 | super().__init__() 13 | if causal: 14 | self.remove = kernel_size - 1 15 | else: 16 | self.remove = 1 if kernel_size % 2 == 0 else 0 17 | 18 | def forward(self, x): 19 | if self.remove > 0: 20 | x = x[:, :, : -self.remove] 21 | return x 22 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/transpose_last.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | transpose last 2 dimensions of the input 7 | """ 8 | 9 | import torch.nn as nn 10 | 11 | 12 | class TransposeLast(nn.Module): 13 | def __init__(self, deconstruct_idx=None): 14 | super().__init__() 15 | self.deconstruct_idx = deconstruct_idx 16 | 17 | def forward(self, x): 18 | if self.deconstruct_idx is not None: 19 | x = x[self.deconstruct_idx] 20 | return x.transpose(-2, -1) 21 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/raw_label_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class RawLabelDataset(FairseqDataset): 12 | def __init__(self, labels): 13 | super().__init__() 14 | self.labels = labels 15 | 16 | def __getitem__(self, index): 17 | return self.labels[index] 18 | 19 | def __len__(self): 20 | return len(self.labels) 21 | 22 | def collater(self, samples): 23 | return torch.tensor(samples) 24 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = source 8 | BUILDDIR = build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/lru_cache_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from functools import lru_cache 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class LRUCacheDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, token=None): 13 | super().__init__(dataset) 14 | 15 | @lru_cache(maxsize=8) 16 | def __getitem__(self, index): 17 | return self.dataset[index] 18 | 19 | @lru_cache(maxsize=8) 20 | def collater(self, samples): 21 | return self.dataset.collater(samples) 22 | -------------------------------------------------------------------------------- /Two-branch Transformer/.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Before submitting 2 | 3 | - [ ] Was this discussed/approved via a Github issue? (no need for typos, doc improvements) 4 | - [ ] Did you read the [contributor guideline](https://github.com/pytorch/fairseq/blob/master/CONTRIBUTING.md)? 5 | - [ ] Did you make sure to update the docs? 6 | - [ ] Did you write any new necessary tests? 7 | 8 | ## What does this PR do? 9 | Fixes # (issue). 10 | 11 | ## PR review 12 | Anyone in the community is free to review the PR once the tests have passed. 13 | If we didn't discuss your PR in Github issues there's a high chance it will not be merged. 14 | 15 | ## Did you have fun? 16 | Make sure you had fun coding 🙃 17 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = fairseq 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /Two-branch Transformer/examples/roberta/commonsense_qa/download_cqa_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | OUTDIR=data/CommonsenseQA 8 | 9 | mkdir -p $OUTDIR 10 | 11 | wget -O $OUTDIR/train.jsonl https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl 12 | wget -O $OUTDIR/valid.jsonl https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl 13 | wget -O $OUTDIR/test.jsonl https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl 14 | wget -O $OUTDIR/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt 15 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/unfold.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn.functional as F 7 | 8 | 9 | def unfold1d(x, kernel_size, padding_l, pad_value=0): 10 | """unfold T x B x C to T x B x C x K""" 11 | if kernel_size > 1: 12 | T, B, C = x.size() 13 | x = F.pad( 14 | x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value 15 | ) 16 | x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) 17 | else: 18 | x = x.unsqueeze(3) 19 | return x 20 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/space_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | from fairseq.data.encoders import register_tokenizer 9 | from fairseq.dataclass import FairseqDataclass 10 | 11 | 12 | @register_tokenizer("space", dataclass=FairseqDataclass) 13 | class SpaceTokenizer(object): 14 | def __init__(self, *unused): 15 | self.space_tok = re.compile(r"\s+") 16 | 17 | def encode(self, x: str) -> str: 18 | return self.space_tok.sub(" ", x) 19 | 20 | def decode(self, x: str) -> str: 21 | return x 22 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/lightconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 9 | 10 | 11 | setup( 12 | name="lightconv_layer", 13 | ext_modules=[ 14 | CUDAExtension( 15 | "lightconv_cuda", 16 | [ 17 | "lightconv_cuda.cpp", 18 | "lightconv_cuda_kernel.cu", 19 | ], 20 | ), 21 | ], 22 | cmdclass={"build_ext": BuildExtension}, 23 | ) 24 | -------------------------------------------------------------------------------- /GIN/ogb/version.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from threading import Thread 4 | 5 | __version__ = '1.3.1' 6 | 7 | try: 8 | os.environ['OUTDATED_IGNORE'] = '1' 9 | from outdated import check_outdated # noqa 10 | except ImportError: 11 | check_outdated = None 12 | 13 | 14 | def check(): 15 | try: 16 | is_outdated, latest = check_outdated('ogb', __version__) 17 | if is_outdated: 18 | logging.warning( 19 | f'The OGB package is out of date. Your version is ' 20 | f'{__version__}, while the latest version is {latest}.') 21 | except Exception: 22 | pass 23 | 24 | 25 | if check_outdated is not None: 26 | thread = Thread(target=check) 27 | thread.start() 28 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/sort_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class SortDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, sort_order): 13 | super().__init__(dataset) 14 | if not isinstance(sort_order, (list, tuple)): 15 | sort_order = [sort_order] 16 | self.sort_order = sort_order 17 | 18 | assert all(len(so) == len(dataset) for so in sort_order) 19 | 20 | def ordered_indices(self): 21 | return np.lexsort(self.sort_order) 22 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/dynamicconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 9 | 10 | 11 | setup( 12 | name="dynamicconv_layer", 13 | ext_modules=[ 14 | CUDAExtension( 15 | name="dynamicconv_cuda", 16 | sources=[ 17 | "dynamicconv_cuda.cpp", 18 | "dynamicconv_cuda_kernel.cu", 19 | ], 20 | ), 21 | ], 22 | cmdclass={"build_ext": BuildExtension}, 23 | ) 24 | -------------------------------------------------------------------------------- /GIN/examples/linkproppred/wikikg2/README.md: -------------------------------------------------------------------------------- 1 | # ogbl-wikikg2 2 | 3 | **Note (Dec 29, 2020)**: The older version `ogbl-wikikg` is deprecated because negative samples used in validation and test sets are found to be quite biased (i.e., half of the entity nodes are never sampled as negative examples). `ogbl-wikikg2` (available from `ogb>=1.2.4` ) fixes this issue while retaining everyelse the same. The leaderboard results of `ogbl-wikikg` and `ogbl-wikikg2` are *not* comparable. 4 | 5 | This code includes implementation of TransE, DistMult, ComplEx and RotatE with OGB evaluator. It is based on this [repository](https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding). 6 | 7 | ## Training & Evaluation 8 | 9 | ``` 10 | # Run with default config 11 | bash examples.sh 12 | ``` -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/strip_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class StripTokenDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, id_to_strip): 11 | super().__init__(dataset) 12 | self.id_to_strip = id_to_strip 13 | 14 | def __getitem__(self, index): 15 | item = self.dataset[index] 16 | while len(item) > 0 and item[-1] == self.id_to_strip: 17 | item = item[:-1] 18 | while len(item) > 0 and item[0] == self.id_to_strip: 19 | item = item[1:] 20 | return item 21 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/multilingual/data_scripts/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Install dependency 3 | ```bash 4 | pip install -r requirement.txt 5 | ``` 6 | 7 | # Download the data set 8 | ```bash 9 | export WORKDIR_ROOT= 10 | 11 | ``` 12 | The downloaded data will be at $WORKDIR_ROOT/ML50 13 | 14 | # preprocess the data 15 | Install SPM [here](https://github.com/google/sentencepiece) 16 | ```bash 17 | export WORKDIR_ROOT= 18 | export SPM_PATH= 19 | ``` 20 | * $WORKDIR_ROOT/ML50/raw: extracted raw data 21 | * $WORKDIR_ROOT/ML50/dedup: dedup data 22 | * $WORKDIR_ROOT/ML50/clean: data with valid and test sentences removed from the dedup data 23 | 24 | 25 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/adaptive_span/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | # automatically import any Python files in the current directory 10 | cur_dir = os.path.dirname(__file__) 11 | for file in os.listdir(cur_dir): 12 | path = os.path.join(cur_dir, file) 13 | if ( 14 | not file.startswith("_") 15 | and not file.startswith(".") 16 | and (file.endswith(".py") or os.path.isdir(path)) 17 | ): 18 | mod_name = file[: file.find(".py")] if file.endswith(".py") else file 19 | module = importlib.import_module(__name__ + "." + mod_name) 20 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/models/mxnet/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # __init__.py 4 | # 5 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/models/pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # __init__.py 4 | # 5 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # -------------------------------------------------------------------------------- /Two-branch Transformer/examples/speech_recognition/hydra/conf/hydra/sweeper/ax.yaml: -------------------------------------------------------------------------------- 1 | # @package hydra.sweeper 2 | _target_: hydra_plugins.hydra_ax_sweeper.ax_sweeper.AxSweeper 3 | max_batch_size: null 4 | ax_config: 5 | max_trials: 100 6 | early_stop: 7 | minimize: true 8 | max_epochs_without_improvement: 10 9 | epsilon: 1.0e-05 10 | experiment: 11 | name: ${dataset.gen_subset} 12 | objective_name: wer 13 | minimize: true 14 | parameter_constraints: null 15 | outcome_constraints: null 16 | status_quo: null 17 | client: 18 | verbose_logging: false 19 | random_seed: null 20 | params: 21 | decoding.decoder.lmweight: 22 | type: range 23 | bounds: [0.0, 5.0] 24 | decoding.decoder.wordscore: 25 | type: range 26 | bounds: [-5.0, 5.0] 27 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/clib/libnat_cuda/edit_dist.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | 11 | #include 12 | 13 | torch::Tensor LevenshteinDistanceCuda( 14 | torch::Tensor source, 15 | torch::Tensor target, 16 | torch::Tensor source_length, 17 | torch::Tensor target_length); 18 | 19 | torch::Tensor GenerateDeletionLabelCuda( 20 | torch::Tensor source, 21 | torch::Tensor operations); 22 | 23 | std::pair GenerateInsertionLabelCuda( 24 | torch::Tensor source, 25 | torch::Tensor operations); 26 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/simultaneous_translation/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | 11 | 12 | ( 13 | build_monotonic_attention, 14 | register_monotonic_attention, 15 | MONOTONIC_ATTENTION_REGISTRY, 16 | _, 17 | ) = registry.setup_registry("--simul-type") 18 | 19 | for file in os.listdir(os.path.dirname(__file__)): 20 | if file.endswith(".py") and not file.startswith("_"): 21 | model_name = file[: file.find(".py")] 22 | importlib.import_module( 23 | "examples.simultaneous_translation.modules." + model_name 24 | ) 25 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/model_parallel/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the models/ directory 11 | models_dir = os.path.dirname(__file__) 12 | for file in os.listdir(models_dir): 13 | path = os.path.join(models_dir, file) 14 | if ( 15 | not file.startswith("_") 16 | and not file.startswith(".") 17 | and (file.endswith(".py") or os.path.isdir(path)) 18 | ): 19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file 20 | module = importlib.import_module("fairseq.model_parallel.models." + model_name) 21 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/models/huggingface/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | 10 | # automatically import any Python files in the models/huggingface/ directory 11 | models_dir = os.path.dirname(__file__) 12 | for file in os.listdir(models_dir): 13 | path = os.path.join(models_dir, file) 14 | if ( 15 | not file.startswith("_") 16 | and not file.startswith(".") 17 | and (file.endswith(".py") or os.path.isdir(path)) 18 | ): 19 | model_name = file[: file.find(".py")] if file.endswith(".py") else file 20 | module = importlib.import_module("fairseq.models.huggingface." + model_name) 21 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/gelu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with 7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs 8 | """ 9 | 10 | import math 11 | 12 | import torch 13 | import torch.nn as nn 14 | 15 | 16 | def gelu_accurate(x): 17 | if not hasattr(gelu_accurate, "_a"): 18 | gelu_accurate._a = math.sqrt(2 / math.pi) 19 | return ( 20 | 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) 21 | ) 22 | 23 | 24 | def gelu(x: torch.Tensor) -> torch.Tensor: 25 | return torch.nn.functional.gelu(x.float()).type_as(x) 26 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/m2m_100/tokenizers/README.md: -------------------------------------------------------------------------------- 1 | # M2M-100 Tokenization 2 | 3 | We apply different tokenization strategies for different languages following the existing literature. Here we provide tok.sh a tokenizer that can be used to reproduce our results. 4 | 5 | To reproduce the results, follow these steps: 6 | 7 | ``` 8 | tgt_lang=... 9 | reference_translation=... 10 | cat generation_output | grep -P "^H" | sort -V | cut -f 3- | sh tok.sh $tgt_lang > hyp 11 | cat $reference_translation |sh tok.sh $tgt_lang > ref 12 | sacrebleu -tok 'none' ref < hyp 13 | ``` 14 | 15 | ## Installation 16 | 17 | Tools needed for all the languages except Arabic can be installed by running install_dependencies.sh 18 | If you want to evaluate Arabic models, please follow the instructions provided here: http://alt.qcri.org/tools/arabic-normalizer/ to install 19 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/characters.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | SPACE = chr(32) 11 | SPACE_ESCAPE = chr(9601) 12 | 13 | 14 | @register_bpe("characters") 15 | class Characters(object): 16 | def __init__(self, *unused): 17 | pass 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | pass 22 | 23 | @staticmethod 24 | def encode(x: str) -> str: 25 | escaped = x.replace(SPACE, SPACE_ESCAPE) 26 | return SPACE.join(list(escaped)) 27 | 28 | @staticmethod 29 | def decode(x: str) -> str: 30 | return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) 31 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # __init__.py 4 | # 5 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # 19 | 20 | from .KGDataset import * 21 | from .sampler import * 22 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/constrained_decoding/normalize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import sys 9 | 10 | from sacremoses.normalize import MosesPunctNormalizer 11 | 12 | 13 | def main(args): 14 | normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn) 15 | for line in sys.stdin: 16 | print(normalizer.normalize(line.rstrip()), flush=True) 17 | 18 | 19 | if __name__ == "__main__": 20 | import argparse 21 | 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument("--lang", "-l", default="en") 24 | parser.add_argument("--penn", "-p", action="store_true") 25 | args = parser.parse_args() 26 | 27 | main(args) 28 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/m2m_100/tokenizers/tokenize_indic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | # Use: echo {text} | python tokenize_indic.py {language} 8 | 9 | import sys 10 | 11 | from indicnlp.normalize.indic_normalize import IndicNormalizerFactory 12 | from indicnlp.tokenize.indic_tokenize import trivial_tokenize 13 | 14 | 15 | factory = IndicNormalizerFactory() 16 | normalizer = factory.get_normalizer( 17 | sys.argv[1], remove_nuktas=False, nasals_mode="do_nothing" 18 | ) 19 | 20 | for line in sys.stdin: 21 | normalized_line = normalizer.normalize(line.strip()) 22 | tokenized_line = " ".join(trivial_tokenize(normalized_line, sys.argv[1])) 23 | print(tokenized_line) 24 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/fp32_group_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | Layer norm done in fp32 (for fp16 training) 7 | """ 8 | 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | 12 | 13 | class Fp32GroupNorm(nn.GroupNorm): 14 | def __init__(self, *args, **kwargs): 15 | super().__init__(*args, **kwargs) 16 | 17 | def forward(self, input): 18 | output = F.group_norm( 19 | input.float(), 20 | self.num_groups, 21 | self.weight.float() if self.weight is not None else None, 22 | self.bias.float() if self.bias is not None else None, 23 | self.eps, 24 | ) 25 | return output.type_as(input) 26 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/distributed/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .distributed_timeout_wrapper import DistributedTimeoutWrapper 7 | from .fully_sharded_data_parallel import fsdp_enable_wrap, fsdp_wrap, FullyShardedDataParallel 8 | from .legacy_distributed_data_parallel import LegacyDistributedDataParallel 9 | from .module_proxy_wrapper import ModuleProxyWrapper 10 | from .tpu_distributed_data_parallel import TPUDistributedDataParallel 11 | 12 | 13 | __all__ = [ 14 | "DistributedTimeoutWrapper", 15 | "fsdp_enable_wrap", 16 | "fsdp_wrap", 17 | "FullyShardedDataParallel", 18 | "LegacyDistributedDataParallel", 19 | "ModuleProxyWrapper", 20 | "TPUDistributedDataParallel", 21 | ] 22 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # -*- coding: utf-8 -*- 4 | # 5 | # setup.py 6 | # 7 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | # 21 | 22 | from setuptools import setup 23 | 24 | if __name__ == "__main__": 25 | setup() 26 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/nltk_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_tokenizer 7 | from fairseq.dataclass import FairseqDataclass 8 | 9 | 10 | @register_tokenizer("nltk", dataclass=FairseqDataclass) 11 | class NLTKTokenizer(object): 12 | def __init__(self, *unused): 13 | try: 14 | from nltk.tokenize import word_tokenize 15 | 16 | self.word_tokenize = word_tokenize 17 | except ImportError: 18 | raise ImportError("Please install nltk with: pip install nltk") 19 | 20 | def encode(self, x: str) -> str: 21 | return " ".join(self.word_tokenize(x)) 22 | 23 | def decode(self, x: str) -> str: 24 | return x 25 | -------------------------------------------------------------------------------- /Two-branch Transformer/.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🚀 Feature Request 3 | about: Submit a proposal/request for a new feature 4 | labels: 'enhancement, help wanted, needs triage' 5 | --- 6 | 7 | ## 🚀 Feature Request 8 | 9 | 10 | ### Motivation 11 | 12 | 13 | 14 | ### Pitch 15 | 16 | 17 | 18 | ### Alternatives 19 | 20 | 21 | 22 | ### Additional context 23 | 24 | 25 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/list_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ListDataset(BaseWrapperDataset): 10 | def __init__(self, dataset, sizes=None): 11 | super().__init__(dataset) 12 | self._sizes = sizes 13 | 14 | def __iter__(self): 15 | for x in self.dataset: 16 | yield x 17 | 18 | def collater(self, samples): 19 | return samples 20 | 21 | @property 22 | def sizes(self): 23 | return self._sizes 24 | 25 | def num_tokens(self, index): 26 | return self.sizes[index] 27 | 28 | def size(self, index): 29 | return self.sizes[index] 30 | 31 | def set_epoch(self, epoch): 32 | pass 33 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/linformer/README.md: -------------------------------------------------------------------------------- 1 | # Linformer: Self-Attention with Linear Complexity (Wang et al., 2020) 2 | 3 | This example contains code to train Linformer models as described in our paper 4 | [Linformer: Self-Attention with Linear Complexity](https://arxiv.org/abs/2006.04768). 5 | 6 | ## Training a new Linformer RoBERTa model 7 | 8 | You can mostly follow the [RoBERTa pretraining README](/examples/roberta/README.pretraining.md), 9 | updating your training command with `--user-dir examples/linformer/linformer_src --arch linformer_roberta_base`. 10 | 11 | ## Citation 12 | 13 | If you use our work, please cite: 14 | 15 | ```bibtex 16 | @article{wang2020linformer, 17 | title={Linformer: Self-Attention with Linear Complexity}, 18 | author={Wang, Sinong and Li, Belinda and Khabsa, Madian and Fang, Han and Ma, Hao}, 19 | journal={arXiv preprint arXiv:2006.04768}, 20 | year={2020} 21 | } 22 | ``` 23 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/criterions.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | .. _Criterions: 5 | 6 | Criterions 7 | ========== 8 | 9 | Criterions compute the loss function given the model and batch, roughly:: 10 | 11 | loss = criterion(model, batch) 12 | 13 | .. automodule:: fairseq.criterions 14 | :members: 15 | 16 | .. autoclass:: fairseq.criterions.FairseqCriterion 17 | :members: 18 | :undoc-members: 19 | 20 | .. autoclass:: fairseq.criterions.adaptive_loss.AdaptiveLoss 21 | :members: 22 | :undoc-members: 23 | .. autoclass:: fairseq.criterions.composite_loss.CompositeLoss 24 | :members: 25 | :undoc-members: 26 | .. autoclass:: fairseq.criterions.cross_entropy.CrossEntropyCriterion 27 | :members: 28 | :undoc-members: 29 | .. autoclass:: fairseq.criterions.label_smoothed_cross_entropy.LabelSmoothedCrossEntropyCriterion 30 | :members: 31 | :undoc-members: 32 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | 12 | 13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry( 14 | "--tokenizer", 15 | default=None, 16 | ) 17 | 18 | 19 | build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry( 20 | "--bpe", 21 | default=None, 22 | ) 23 | 24 | 25 | # automatically import any Python files in the encoders/ directory 26 | for file in os.listdir(os.path.dirname(__file__)): 27 | if file.endswith(".py") and not file.startswith("_"): 28 | module = file[: file.find(".py")] 29 | importlib.import_module("fairseq.data.encoders." + module) 30 | -------------------------------------------------------------------------------- /Two-branch Transformer/molecule/shuffle_head.py: -------------------------------------------------------------------------------- 1 | import io 2 | import random 3 | import argparse 4 | import os 5 | 6 | 7 | def main(args): 8 | input_fn = args.fn 9 | lines = io.open(input_fn, 'r', encoding='utf8', newline='\n').readlines() 10 | random.shuffle(lines) 11 | assert args.head_num > 0 12 | if args.output_fn is None: 13 | prefix, ext = os.path.splitext(input_fn) 14 | output_fn = '{}.{}{}'.format(prefix, args.head_num, ext) 15 | else: 16 | output_fn = args.output_fn 17 | 18 | io.open(output_fn, 'w', encoding='utf8', newline='\n').writelines(lines[:args.head_num]) 19 | 20 | 21 | if __name__ == '__main__': 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument('fn', type=str) 24 | parser.add_argument('head_num', type=int) 25 | parser.add_argument('--output-fn', type=str, default=None) 26 | args = parser.parse_args() 27 | main(args) 28 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/m2m_100/tokenizers/tokenizer_ar.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | # 7 | # Please follow the instructions here http://alt.qcri.org/tools/arabic-normalizer/ 8 | # to install tools needed for Arabic 9 | 10 | echo "Please install Arabic tools: http://alt.qcri.org/tools/arabic-normalizer/" 11 | echo "Then update environment variables in tokenizer_ar.sh" 12 | exit 1 13 | 14 | SVMTOOL=... 15 | GOMOSESGO=... 16 | QCRI_ARABIC_NORMALIZER=... 17 | 18 | export PERL5LIB="$SVMTOOL/lib":"$GOMOSESGO/bin/MADA-3.2":$PERL5LIB 19 | 20 | 21 | tempfile=$(mktemp) 22 | cat - > $tempfile 23 | 24 | cd $QCRI_ARABIC_NORMALIZER 25 | 26 | bash qcri_normalizer_mada3.2_aramorph1.2.1.sh $tempfile 27 | cat $tempfile.mada_norm-aramorph.europarl_tok 28 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/scoring/chrf.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.scoring import BaseScorer, register_scorer 7 | 8 | 9 | @register_scorer("chrf") 10 | class ChrFScorer(BaseScorer): 11 | def __init__(self, args): 12 | super(ChrFScorer, self).__init__(args) 13 | import sacrebleu 14 | 15 | self.sacrebleu = sacrebleu 16 | 17 | def add_string(self, ref, pred): 18 | self.ref.append(ref) 19 | self.pred.append(pred) 20 | 21 | def score(self, order=4): 22 | return self.result_string(order).score 23 | 24 | def result_string(self, order=4): 25 | if order != 4: 26 | raise NotImplementedError 27 | return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format() 28 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/examples/wn18_weighted/README.md: -------------------------------------------------------------------------------- 1 | # Weighted WN18 Example 2 | This example shows how to train a knowledge graph with weighted edges (each edge has an importance score) 3 | 4 | ## How to get data 5 | ``` 6 | >>> wget https://dgl-data.s3-us-west-2.amazonaws.com/dataset/KGE_Examples/wn18_weighted_edge/wn18_weighted.tgz 7 | >>> tar -zxf wn18_weighted.tgz 8 | >>> ls wn18_weighted 9 | README entities.dict relations.dict test_weight.txt train_weight.txt valid_weight.txt 10 | ``` 11 | 12 | ## How to train 13 | ``` 14 | dglke_train --model_name TransE_l1 --dataset wn18-weight --format raw_udd_hrt --data_files train_weight.txt valid_weight.txt test_weight.txt --data_path ./data/wn18_weighted/ --batch_size 2048 --log_interval 1000 --neg_sample_size 128 --regularization_coef 2e-07 --hidden_dim 512 --gamma 12.0 --lr 0.007 --batch_size_eval 16 --test -adv --gpu 0 --max_step 32000 --has_edge_importance 15 | ``` 16 | -------------------------------------------------------------------------------- /Two-branch Transformer/tests/gpu/transformer_quantization_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | # This file defines example configuration arguments for quantizing 7 | # a transformer model with product quantization 8 | 9 | n_centroids: 10 | Linear: 11 | key: in_features 12 | value: {"*": 8} 13 | Embedding: 14 | key: embedding_dim 15 | value: {"*": 8} 16 | 17 | block_sizes: 18 | Linear: 19 | key: fuzzy_name 20 | value: {fc: 8, attn: 4, emb: 4} 21 | Embedding: 22 | key: fuzzy_name 23 | value: {emb: 8} 24 | 25 | layers_to_quantize: 26 | - decoder\\.layers\\.\d+\\.fc[12] 27 | - decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01] 28 | - decoder\\.layers\\.\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj) 29 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # __init__.py 4 | # 5 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # 19 | 20 | #!/usr/bin/env python3 21 | 22 | import pkg_resources 23 | 24 | __version__ = pkg_resources.resource_string("dglke", "VERSION.txt").decode("utf-8").strip() 25 | -------------------------------------------------------------------------------- /Two-branch Transformer/.github/ISSUE_TEMPLATE/how-to-question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ❓ Questions/Help 3 | about: If you have questions, please first search existing issues and docs 4 | labels: 'question, needs triage' 5 | --- 6 | 7 | ## ❓ Questions and Help 8 | 9 | ### Before asking: 10 | 1. search the issues. 11 | 2. search the docs. 12 | 13 | 14 | 15 | #### What is your question? 16 | 17 | #### Code 18 | 19 | 20 | 21 | #### What have you tried? 22 | 23 | #### What's your environment? 24 | 25 | - fairseq Version (e.g., 1.0 or master): 26 | - PyTorch Version (e.g., 1.0) 27 | - OS (e.g., Linux): 28 | - How you installed fairseq (`pip`, source): 29 | - Build command you used (if compiling from source): 30 | - Python version: 31 | - CUDA/cuDNN version: 32 | - GPU models and configuration: 33 | - Any other relevant information: 34 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/multilingual/multilingual_fairseq_gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | lang_pairs="en-fr,en-cs,fr-en,cs-en" 9 | path_2_data=$1 # 10 | lang_list=$2 # 11 | model=$3 # 12 | source_lang=cs 13 | target_lang=en 14 | 15 | fairseq-generate "$path_2_data" \ 16 | --path "$model" \ 17 | --task translation_multi_simple_epoch \ 18 | --gen-subset test \ 19 | --source-lang "$source_lang" \ 20 | --target-lang "$target_lang" \ 21 | --sacrebleu --remove-bpe 'sentencepiece'\ 22 | --batch-size 32 \ 23 | --encoder-langtok "src" \ 24 | --decoder-langtok \ 25 | --lang-dict "$lang_list" \ 26 | --lang-pairs "$lang_pairs" 27 | -------------------------------------------------------------------------------- /GIN/examples/linkproppred/ddi/README.md: -------------------------------------------------------------------------------- 1 | # ogbl-ddi 2 | 3 | This repository includes the following example scripts: 4 | 5 | * **[MLP](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/ddi/mlp.py)**: Full-batch MLP training based on Node2Vec features. This script requires node embeddings be saved in `embedding.pt`. To generate them, please run `python node2vec.py` [requires `torch-geometric>=1.5.0`]. 6 | * **[GNN](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/ddi/gnn.py)**: Full-batch GNN training using either the GCN or GraphSAGE operator (`--use_sage`) [requires `torch-geometric>=1.6.0`]. 7 | * **[Matrix Factorization](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/ddi/mf.py)**: Full-batch Matrix Factorization training. 8 | 9 | ## Training & Evaluation 10 | 11 | ``` 12 | # Run with default config 13 | python mlp.py 14 | 15 | # Run with custom config 16 | python mlp.py --hidden_channels=128 17 | ``` 18 | -------------------------------------------------------------------------------- /GIN/examples/linkproppred/ppa/README.md: -------------------------------------------------------------------------------- 1 | # ogbl-ppa 2 | 3 | This repository includes the following example scripts: 4 | 5 | * **[MLP](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/ppa/mlp.py)**: Full-batch MLP training based on Node2Vec features. This script requires node embeddings be saved in `embedding.pt`. To generate them, please run `python node2vec.py` [requires `torch-geometric>=1.5.0`]. 6 | * **[GNN](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/ppa/gnn.py)**: Full-batch GNN training using either the GCN or GraphSAGE operator (`--use_sage`) [requires `torch-geometric>=1.6.0`]. 7 | * **[Matrix Factorization](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/ppa/mf.py)**: Full-batch Matrix Factorization training. 8 | 9 | ## Training & Evaluation 10 | 11 | ``` 12 | # Run with default config 13 | python mlp.py 14 | 15 | # Run with custom config 16 | python mlp.py --hidden_channels=128 17 | ``` 18 | -------------------------------------------------------------------------------- /GIN/examples/nodeproppred/proteins/README.md: -------------------------------------------------------------------------------- 1 | # ogbn-proteins 2 | 3 | This repository includes the following example scripts: 4 | 5 | * **[MLP](https://github.com/snap-stanford/ogb/blob/master/examples/nodeproppred/proteins/mlp.py)**: Full-batch MLP training based on average incoming edge features and optional Node2Vec features (`--use_node_embedding`). For training with Node2Vec features, this script requires node embeddings be saved in `embedding.pt`. To generate them, please run `python node2vec.py` [requires `torch-geometric>=1.5.0`]. 6 | * **[GNN](https://github.com/snap-stanford/ogb/blob/master/examples/nodeproppred/proteins/gnn.py)**: Full-batch GNN training using either the GCN or GraphSAGE operator (`--use_sage`) [requires `torch-geometric>=1.6.0`]. 7 | 8 | ## Training & Evaluation 9 | 10 | ``` 11 | # Run with default config 12 | python gnn.py 13 | 14 | # Run with custom config (adding dropout may improve performance) 15 | python gnn.py --dropout 0.5 16 | ``` 17 | -------------------------------------------------------------------------------- /GIN/examples/README.md: -------------------------------------------------------------------------------- 1 | # OGB Examples 2 | 3 | We provide baseline experiments/example scripts for **[node property prediction tasks](https://github.com/snap-stanford/ogb/tree/master/examples/nodeproppred)**, **[link property prediction tasks](https://github.com/snap-stanford/ogb/tree/master/examples/linkproppred)** and **[graph property prediction tasks](https://github.com/snap-stanford/ogb/tree/master/examples/graphproppred)** on *all* datasets currently included in OGB. 4 | We additionally provide **[our baseline code](https://github.com/snap-stanford/ogb/tree/master/examples/lsc)** for our **[OGB-LSC @ KDD Cup 2021](https://ogb.stanford.edu/kddcup2021/)**. 5 | 6 | Each dataset is hosted in its own folder and provides separate examples for each individual model. 7 | Unless otherwise specified, the default hyper-parameters are what we used to generate the results in our paper and leaderboards. 8 | Please read their respective `README.md` files for further information on how to run them. 9 | -------------------------------------------------------------------------------- /Two-branch Transformer/data_splitter.py: -------------------------------------------------------------------------------- 1 | import os 2 | from string import ascii_lowercase 3 | 4 | def worker(idx): 5 | X = "train.x.bpe" 6 | Y = "train.y" 7 | Z = "train.17144.y" 8 | for ch in ascii_lowercase[:4]: 9 | if ch == idx: 10 | continue 11 | X += f" valid.x.bpea{ch}" 12 | Y += f" valid.ya{ch}" 13 | Z += f" valid.17144.ya{ch}" 14 | 15 | os.system(f"cat {X} > train.aug.{idx}.x") 16 | os.system(f"cat {Y} > train.aug.{idx}.yr") 17 | os.system(f"cat {Z} > train.aug.{idx}.yc") 18 | os.system(f"cat valid.x.bpea{idx} > valid.aug.{idx}.x") 19 | os.system(f"cat valid.ya{idx} > valid.aug.{idx}.yr") 20 | os.system(f"cat valid.17144.ya{idx} > valid.aug.{idx}.yc") 21 | 22 | 23 | os.system("split -l 95168 valid.y valid.y") 24 | os.system("split -l 95168 valid.x.bpe valid.x.bpe") 25 | os.system("split -l 95168 valid.17144.y valid.17144.y") 26 | 27 | worker("a") 28 | worker("b") 29 | worker("c") 30 | worker("d") -------------------------------------------------------------------------------- /Two-branch Transformer/scripts/convert_dictionary.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (c) Facebook, Inc. and its affiliates. 2 | -- 3 | -- This source code is licensed under the MIT license found in the 4 | -- LICENSE file in the root directory of this source tree. 5 | -- 6 | -- Usage: convert_dictionary.lua 7 | require 'fairseq' 8 | require 'torch' 9 | require 'paths' 10 | 11 | if #arg < 1 then 12 | print('usage: convert_dictionary.lua ') 13 | os.exit(1) 14 | end 15 | if not paths.filep(arg[1]) then 16 | print('error: file does not exit: ' .. arg[1]) 17 | os.exit(1) 18 | end 19 | 20 | dict = torch.load(arg[1]) 21 | dst = paths.basename(arg[1]):gsub('.th7', '.txt') 22 | assert(dst:match('.txt$')) 23 | 24 | f = io.open(dst, 'w') 25 | for idx, symbol in ipairs(dict.index_to_symbol) do 26 | if idx > dict.cutoff then 27 | break 28 | end 29 | f:write(symbol) 30 | f:write(' ') 31 | f:write(dict.index_to_freq[idx]) 32 | f:write('\n') 33 | end 34 | f:close() 35 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=fairseq 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/colorize_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class ColorizeDataset(BaseWrapperDataset): 12 | """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """ 13 | 14 | def __init__(self, dataset, color_getter): 15 | super().__init__(dataset) 16 | self.color_getter = color_getter 17 | 18 | def collater(self, samples): 19 | base_collate = super().collater(samples) 20 | if len(base_collate) > 0: 21 | base_collate["net_input"]["colors"] = torch.tensor( 22 | list(self.color_getter(self.dataset, s["id"]) for s in samples), 23 | dtype=torch.long, 24 | ) 25 | return base_collate 26 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/pad_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import data_utils 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class PadDataset(BaseWrapperDataset): 12 | def __init__(self, dataset, pad_idx, left_pad): 13 | super().__init__(dataset) 14 | self.pad_idx = pad_idx 15 | self.left_pad = left_pad 16 | 17 | def collater(self, samples): 18 | return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) 19 | 20 | 21 | class LeftPadDataset(PadDataset): 22 | def __init__(self, dataset, pad_idx): 23 | super().__init__(dataset, pad_idx, left_pad=True) 24 | 25 | 26 | class RightPadDataset(PadDataset): 27 | def __init__(self, dataset, pad_idx): 28 | super().__init__(dataset, pad_idx, left_pad=False) 29 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/clib/libbleu/module.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include 10 | 11 | 12 | static PyMethodDef method_def[] = { 13 | {NULL, NULL, 0, NULL} 14 | }; 15 | 16 | static struct PyModuleDef module_def = { 17 | PyModuleDef_HEAD_INIT, 18 | "libbleu", /* name of module */ 19 | NULL, /* module documentation, may be NULL */ 20 | -1, /* size of per-interpreter state of the module, 21 | or -1 if the module keeps state in global variables. */ 22 | method_def 23 | }; 24 | 25 | 26 | #if PY_MAJOR_VERSION == 2 27 | PyMODINIT_FUNC init_libbleu() 28 | #else 29 | PyMODINIT_FUNC PyInit_libbleu() 30 | #endif 31 | { 32 | PyObject *m = PyModule_Create(&module_def); 33 | if (!m) { 34 | return NULL; 35 | } 36 | return m; 37 | } 38 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/models/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # __init__.py 4 | # 5 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # 19 | 20 | from .general_models import KEModel, InferModel 21 | from .ke_model import TransEModel, TransE_l2Model, TransE_l1Model, DistMultModel, TransRModel, ComplExModel, RESCALModel, RotatEModel, GNNModel 22 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/megatron_11b/detok.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -u 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import argparse 8 | import fileinput 9 | 10 | import sacremoses 11 | 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="") 15 | parser.add_argument("files", nargs="*", help="input files") 16 | args = parser.parse_args() 17 | 18 | detok = sacremoses.MosesDetokenizer() 19 | 20 | for line in fileinput.input(args.files, openhook=fileinput.hook_compressed): 21 | print( 22 | detok.detokenize(line.strip().split(" ")) 23 | .replace(" @", "") 24 | .replace("@ ", "") 25 | .replace(" =", "=") 26 | .replace("= ", "=") 27 | .replace(" – ", "–") 28 | ) 29 | 30 | 31 | if __name__ == "__main__": 32 | main() 33 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/unsupervised_quality_estimation/repeat_lines.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import argparse 7 | import sys 8 | 9 | 10 | def _normalize_spaces(line): 11 | return " ".join(line.split()) 12 | 13 | 14 | def main(): 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("-i", "--input_file", required=True, type=str) 17 | parser.add_argument("-n", "--repeat_times", required=True, type=int) 18 | parser.add_argument("-o", "--output_file", required=False, type=str) 19 | args = parser.parse_args() 20 | stream = open(args.output_file, "w") if args.output_file else sys.stdout 21 | 22 | for line in open(args.input_file): 23 | for _ in range(args.repeat_times): 24 | stream.write(_normalize_spaces(line) + "\n") 25 | 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/translation_moe/translation_moe_src/logsumexp_moe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class LogSumExpMoE(torch.autograd.Function): 10 | """Standard LogSumExp forward pass, but use *posterior* for the backward. 11 | 12 | See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" 13 | (Shen et al., 2019) `_. 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, logp, posterior, dim=-1): 18 | ctx.save_for_backward(posterior) 19 | ctx.dim = dim 20 | return torch.logsumexp(logp, dim=dim) 21 | 22 | @staticmethod 23 | def backward(ctx, grad_output): 24 | (posterior,) = ctx.saved_tensors 25 | grad_logp = grad_output.unsqueeze(ctx.dim) * posterior 26 | return grad_logp, None, None 27 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/multilingual/data_scripts/preprocess_ML50_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | if [ -z $WORKDIR_ROOT ] ; 9 | then 10 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." 11 | exit 12 | fi 13 | 14 | if [ -z $SPM_PATH ] ; 15 | then 16 | echo "Please install sentence piecence from https://github.com/google/sentencepiece and set SPM_PATH pointing to the installed spm_encode.py. Exitting..." 17 | exit 18 | fi 19 | 20 | ML50=${WORKDIR_ROOT}/ML50 21 | 22 | mkdir -p $ML50/dedup 23 | mkdir -p $ML50/cleaned_dedup 24 | 25 | python ./dedup_all.py --from-folder $ML50/raw --to-folder $ML50/dedup 26 | python ./remove_valid_test_in_train.py --from-folder $ML50/dedup --to-folder $ML50/clean 27 | python ./binarize.py --raw-folder $ML50/clean -------------------------------------------------------------------------------- /Two-branch Transformer/examples/language_model/prepare-wikitext-103.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh 3 | 4 | URLS=( 5 | "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip" 6 | ) 7 | FILES=( 8 | "wikitext-103-v1.zip" 9 | ) 10 | 11 | for ((i=0;i<${#URLS[@]};++i)); do 12 | file=${FILES[i]} 13 | if [ -f $file ]; then 14 | echo "$file already exists, skipping download" 15 | else 16 | url=${URLS[i]} 17 | wget "$url" 18 | if [ -f $file ]; then 19 | echo "$url successfully downloaded." 20 | else 21 | echo "$url not successfully downloaded." 22 | exit -1 23 | fi 24 | if [ ${file: -4} == ".tgz" ]; then 25 | tar zxvf $file 26 | elif [ ${file: -4} == ".tar" ]; then 27 | tar xvf $file 28 | elif [ ${file: -4} == ".zip" ]; then 29 | unzip $file 30 | fi 31 | fi 32 | done 33 | cd .. 34 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/bytes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from fairseq.data.encoders import register_bpe 8 | from fairseq.data.encoders.byte_utils import ( 9 | SPACE, 10 | SPACE_ESCAPE, 11 | byte_encode, 12 | smart_byte_decode, 13 | ) 14 | 15 | 16 | @register_bpe("bytes") 17 | class Bytes(object): 18 | def __init__(self, *unused): 19 | pass 20 | 21 | @staticmethod 22 | def add_args(parser): 23 | pass 24 | 25 | @staticmethod 26 | def encode(x: str) -> str: 27 | encoded = byte_encode(x) 28 | escaped = encoded.replace(SPACE, SPACE_ESCAPE) 29 | return SPACE.join(list(escaped)) 30 | 31 | @staticmethod 32 | def decode(x: str) -> str: 33 | unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) 34 | return smart_byte_decode(unescaped) 35 | -------------------------------------------------------------------------------- /GIN/examples/linkproppred/biokg/examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | CUDA_VISIBLE_DEVICES=0 python3.5 run.py --do_train --cuda --do_valid --do_test --evaluate_train \ 5 | --model TransE -n 128 -b 512 -d 2000 -g 20 -a 1.0 -adv \ 6 | -lr 0.0001 --max_steps 300000 --cpu_num 2 --test_batch_size 32 7 | 8 | CUDA_VISIBLE_DEVICES=1 python3.5 run.py --do_train --cuda --do_valid --do_test --evaluate_train \ 9 | --model DistMult -n 128 -b 512 -d 2000 -g 500 -a 1.0 -adv \ 10 | -lr 0.001 --max_steps 300000 --cpu_num 2 --test_batch_size 32 -r 0.000002 11 | 12 | CUDA_VISIBLE_DEVICES=2 python3.5 run.py --do_train --cuda --do_valid --do_test --evaluate_train \ 13 | --model RotatE -n 128 -b 512 -d 1000 -g 20 -a 1.0 -adv \ 14 | -lr 0.0001 --max_steps 300000 --cpu_num 2 --test_batch_size 32 -de 15 | 16 | CUDA_VISIBLE_DEVICES=3 python3.5 run.py --do_train --cuda --do_valid --do_test --evaluate_train \ 17 | --model ComplEx -n 128 -b 512 -d 1000 -g 500 -a 1.0 -adv \ 18 | -lr 0.001 --max_steps 300000 --cpu_num 2 --test_batch_size 32 -de -dr -r 0.000002 19 | -------------------------------------------------------------------------------- /GIN/scripts/train/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pytorch/pytorch:1.6.0-cuda10.1-cudnn7-runtime 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y vim 5 | 6 | # Used for OGB 7 | RUN conda install -y -c dglteam dgl-cuda10.1 8 | 9 | RUN conda install -y numpy scipy matplotlib ipython 10 | RUN conda install -y scikit-learn 11 | RUN conda install -y tqdm 12 | 13 | # Used for RDKit 14 | RUN apt-get install -y libxrender1 libxext6 15 | RUN conda install -y rdkit>=2019.03.1 -c rdkit 16 | 17 | RUN conda install -y tensorboard==2.5.0 18 | 19 | RUN python -m pip install --no-index torch-scatter -f https://pytorch-geometric.com/whl/torch-1.6.0+cu101.html \ 20 | && python -m pip install --no-index torch-sparse -f https://pytorch-geometric.com/whl/torch-1.6.0+cu101.html \ 21 | && python -m pip install --no-index torch-cluster -f https://pytorch-geometric.com/whl/torch-1.6.0+cu101.html \ 22 | && python -m pip install --no-index torch-spline-conv -f https://pytorch-geometric.com/whl/torch-1.6.0+cu101.html \ 23 | && python -m pip install torch-geometric 24 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/optim.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | .. _optimizers: 5 | 6 | Optimizers 7 | ========== 8 | 9 | Optimizers update the Model parameters based on the gradients. 10 | 11 | .. automodule:: fairseq.optim 12 | :members: 13 | 14 | .. autoclass:: fairseq.optim.FairseqOptimizer 15 | :members: 16 | :undoc-members: 17 | 18 | .. autoclass:: fairseq.optim.adadelta.Adadelta 19 | :members: 20 | :undoc-members: 21 | .. autoclass:: fairseq.optim.adagrad.Adagrad 22 | :members: 23 | :undoc-members: 24 | .. autoclass:: fairseq.optim.adafactor.FairseqAdafactor 25 | :members: 26 | :undoc-members: 27 | .. autoclass:: fairseq.optim.adam.FairseqAdam 28 | :members: 29 | :undoc-members: 30 | .. autoclass:: fairseq.optim.fp16_optimizer.FP16Optimizer 31 | :members: 32 | :undoc-members: 33 | .. autoclass:: fairseq.optim.nag.FairseqNAG 34 | :members: 35 | :undoc-members: 36 | .. autoclass:: fairseq.optim.sgd.SGD 37 | :members: 38 | :undoc-members: 39 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/constrained_decoding/tok.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import sys 9 | 10 | import sacremoses 11 | 12 | 13 | def main(args): 14 | """Tokenizes, preserving tabs""" 15 | mt = sacremoses.MosesTokenizer(lang=args.lang) 16 | 17 | def tok(s): 18 | return mt.tokenize(s, return_str=True) 19 | 20 | for line in sys.stdin: 21 | parts = list(map(tok, line.split("\t"))) 22 | print(*parts, sep="\t", flush=True) 23 | 24 | 25 | if __name__ == "__main__": 26 | import argparse 27 | 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument("--lang", "-l", default="en") 30 | parser.add_argument("--penn", "-p", action="store_true") 31 | parser.add_argument("--fields", "-f", help="fields to tokenize") 32 | args = parser.parse_args() 33 | 34 | main(args) 35 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | std::vector dynamicconv_cpu_forward( 5 | float* input, 6 | float* filters, 7 | int padding_l); 8 | 9 | std::vector dynamicconv_cpu_backward( 10 | float* gradOutput, 11 | int padding_l, 12 | float* input, 13 | float* filters); 14 | 15 | std::vector dynamicconv_forward( 16 | float* input, 17 | float* filters, 18 | int padding_l) { 19 | 20 | return dynamicconv_cpu_forward(input, filters, padding_l); 21 | } 22 | 23 | std::vector dynamicconv_backward( 24 | float* gradOutput, 25 | int padding_l, 26 | float* input, 27 | float* filters) { 28 | 29 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters); 30 | } 31 | 32 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 33 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)"); 34 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)"); 35 | } 36 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/modules/scalar_bias.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | # 6 | 7 | import torch 8 | 9 | 10 | class ScalarBias(torch.autograd.Function): 11 | """ 12 | Adds a vector of scalars, used in self-attention mechanism to allow 13 | the model to optionally attend to this vector instead of the past 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, input, dim, bias_init): 18 | size = list(input.size()) 19 | size[dim] += 1 20 | output = input.new(*size).fill_(bias_init) 21 | output.narrow(dim, 1, size[dim] - 1).copy_(input) 22 | ctx.dim = dim 23 | return output 24 | 25 | @staticmethod 26 | def backward(ctx, grad): 27 | return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None 28 | 29 | 30 | def scalar_bias(input, dim, bias_init=0): 31 | return ScalarBias.apply(input, dim, bias_init) 32 | -------------------------------------------------------------------------------- /DataPreprocessing/obtain_data.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | from ogb.lsc import PCQM4MDataset 5 | 6 | if len(sys.argv) <= 1: 7 | print('Usage: python obtain_data.py path_to_smiles_datasets') 8 | exit(1) 9 | 10 | output_path = sys.argv[1] 11 | os.makedirs(output_path, exist_ok=True) 12 | 13 | dataset = PCQM4MDataset(root="/tmp/ogb", only_smiles=True) 14 | 15 | split_dict = dataset.get_idx_split() 16 | 17 | train_dataset = [dataset[i] for i in split_dict['train']] 18 | valid_dataset = [dataset[i] for i in split_dict['valid']] 19 | test_dataset = [dataset[i] for i in split_dict['test']] 20 | 21 | def writer(prefix, dump_list): 22 | fw_x = open(output_path + '/' + prefix + ".x", "w", encoding="utf8") 23 | fw_y = open(output_path + '/' + prefix + ".y", "w", encoding="utf8") 24 | 25 | for (x,y) in dump_list: 26 | print(x.strip(), file=fw_x) 27 | print(y, file=fw_y) 28 | 29 | fw_x.close() 30 | fw_y.close() 31 | 32 | writer("train", train_dataset) 33 | writer("valid", valid_dataset) 34 | writer("test", test_dataset) 35 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from fairseq.data import encoders 8 | 9 | 10 | def get_whole_word_mask(args, dictionary): 11 | bpe = encoders.build_bpe(args) 12 | if bpe is not None: 13 | 14 | def is_beginning_of_word(i): 15 | if i < dictionary.nspecial: 16 | # special elements are always considered beginnings 17 | return True 18 | tok = dictionary[i] 19 | if tok.startswith("madeupword"): 20 | return True 21 | try: 22 | return bpe.is_beginning_of_word(tok) 23 | except ValueError: 24 | return True 25 | 26 | mask_whole_words = torch.ByteTensor( 27 | list(map(is_beginning_of_word, range(len(dictionary)))) 28 | ) 29 | return mask_whole_words 30 | return None 31 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/numel_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | from torch_geometric.data import Data 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class NumelDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, reduce=False): 14 | super().__init__(dataset) 15 | self.reduce = reduce 16 | 17 | def __getitem__(self, index): 18 | item = self.dataset[index] 19 | if torch.is_tensor(item): 20 | return torch.numel(item) 21 | elif isinstance(item, Data): 22 | return item.num_nodes 23 | else: 24 | return np.size(item) 25 | 26 | def __len__(self): 27 | return len(self.dataset) 28 | 29 | def collater(self, samples): 30 | if self.reduce: 31 | return sum(samples) 32 | else: 33 | return torch.tensor(samples) 34 | -------------------------------------------------------------------------------- /GIN/ogb/nodeproppred/master.csv: -------------------------------------------------------------------------------- 1 | ,ogbn-proteins,ogbn-products,ogbn-arxiv,ogbn-mag,ogbn-papers100M 2 | num tasks,112,1,1,1,1 3 | num classes,2,47,40,349,172 4 | eval metric,rocauc,acc,acc,acc,acc 5 | task type,binary classification,multiclass classification,multiclass classification,multiclass classification,multiclass classification 6 | download_name,proteins,products,arxiv,mag,papers100M-bin 7 | version,1,1,1,2,1 8 | url,http://snap.stanford.edu/ogb/data/nodeproppred/proteins.zip,http://snap.stanford.edu/ogb/data/nodeproppred/products.zip,http://snap.stanford.edu/ogb/data/nodeproppred/arxiv.zip,http://snap.stanford.edu/ogb/data/nodeproppred/mag.zip,http://snap.stanford.edu/ogb/data/nodeproppred/papers100M-bin.zip 9 | add_inverse_edge,True,True,False,False,False 10 | has_node_attr,False,True,True,True,True 11 | has_edge_attr,True,False,False,False,False 12 | split,species,sales_ranking,time,time,time 13 | additional node files,node_species,None,node_year,node_year,node_year 14 | additional edge files,None,None,None,edge_reltype,None 15 | is hetero,False,False,False,True,False 16 | binary,False,False,False,False,True 17 | -------------------------------------------------------------------------------- /GIN/ogb/utils/checkpoint_utils.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import torch 4 | 5 | 6 | def restore_checkpoint(model: 'torch.nn.Module', optimizer: 'torch.optim.Optimizer', lr_scheduler, 7 | checkpoint_dir: str, 8 | checkpoint_name: str, reset_optimizer: bool = False, reset_lr_scheduler: bool = False): 9 | checkpoint_path = Path(checkpoint_dir) / checkpoint_name 10 | 11 | if not checkpoint_dir or not checkpoint_path.exists(): 12 | print('| No exist checkpoints, train from scratch') 13 | return 0 14 | checkpoint = torch.load(checkpoint_path) 15 | 16 | print(f'| Restore from {checkpoint_path} (epoch {checkpoint["epoch"]})') 17 | model.load_state_dict(checkpoint['model_state_dict']) 18 | 19 | if not reset_optimizer: 20 | optimizer.load_state_dict(checkpoint['optimizer_state_dict']) 21 | if not reset_lr_scheduler: 22 | lr_scheduler.load_state_dict(checkpoint['scheduler_state_dict']) 23 | start_epoch = checkpoint['epoch'] 24 | else: 25 | start_epoch = 0 26 | 27 | return start_epoch 28 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/prepend_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): 14 | super().__init__(dataset) 15 | self.prepend_getter = prepend_getter 16 | self.ensure_first_token = ensure_first_token_is 17 | 18 | def __getitem__(self, idx): 19 | item = self.dataset[idx] 20 | is_tuple = isinstance(item, tuple) 21 | src = item[0] if is_tuple else item 22 | 23 | assert self.ensure_first_token is None or src[0] == self.ensure_first_token 24 | prepend_idx = self.prepend_getter(self.dataset, idx) 25 | assert isinstance(prepend_idx, int) 26 | src[0] = prepend_idx 27 | item = tuple((src,) + item[1:]) if is_tuple else src 28 | return item 29 | -------------------------------------------------------------------------------- /Two-branch Transformer/molecule/merge_state_dict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import os 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument( 7 | "--nmt", 8 | type=str, 9 | default="/blob/v-jinhzh/model/pretrainmol/checkpoints/retrosys/uspto50k/checkpoint100.pt", 10 | ) 11 | parser.add_argument( 12 | "--plm", 13 | type=str, 14 | default="/blob/v-jinhzh/model/pretrainmol/checkpoints/bntg-pubchem-10m-doublemodel-tu-125000-wu-10000-lr-0.0005-uf-16-mt-12288-usemlm-usecontrastive-usebottleneckhead-bottleneckratio-4/checkpoint41.pt", 15 | ) 16 | args = parser.parse_args() 17 | 18 | 19 | def load_state_from_ckt(fn): 20 | assert os.path.exists(fn), "{} does not exist!".format(fn) 21 | return torch.load(fn, map_location=torch.device("cpu")) 22 | 23 | 24 | nmt_state = load_state_from_ckt(args.nmt) 25 | plm_state = load_state_from_ckt(args.plm) 26 | 27 | for k, v in plm_state['model'].items(): 28 | nmt_state['model']["encoder.plm_encoder.{}".format(k)] = v 29 | 30 | save_fn = "{}.plm".format(args.nmt) 31 | torch.save(nmt_state, save_fn) 32 | print("save to {}".format(save_fn)) -------------------------------------------------------------------------------- /Two-branch Transformer/scripts/constraints/validate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # 5 | # This source code is licensed under the MIT license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import sys 9 | 10 | 11 | """Reads in a fairseq output file, and verifies that the constraints 12 | (C- lines) are present in the output (the first H- line). Assumes that 13 | constraints are listed prior to the first hypothesis. 14 | """ 15 | 16 | constraints = [] 17 | found = 0 18 | total = 0 19 | for line in sys.stdin: 20 | if line.startswith("C-"): 21 | constraints.append(line.rstrip().split("\t")[1]) 22 | elif line.startswith("H-"): 23 | text = line.split("\t")[2] 24 | 25 | for constraint in constraints: 26 | total += 1 27 | if constraint in text: 28 | found += 1 29 | else: 30 | print(f"No {constraint} in {text}", file=sys.stderr) 31 | 32 | constraints = [] 33 | 34 | print(f"Found {found} / {total} = {100 * found / total:.1f}%") 35 | -------------------------------------------------------------------------------- /GIN/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 OGB Team 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Two-branch Transformer/tests/test_iopath.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import unittest 7 | from unittest import mock 8 | 9 | 10 | class TestIOPath(unittest.TestCase): 11 | 12 | def test_no_iopath(self): 13 | from .test_reproducibility import TestReproducibility 14 | 15 | with mock.patch.dict("sys.modules", {"iopath": None}): 16 | # reuse reproducibility tests, which are e2e tests that should cover 17 | # most checkpoint related functionality 18 | TestReproducibility._test_reproducibility(self, "test_reproducibility") 19 | 20 | def test_no_supports_rename(self): 21 | from .test_reproducibility import TestReproducibility 22 | 23 | with mock.patch("fairseq.file_io.PathManager.supports_rename") as mock_fn: 24 | mock_fn.return_value = False 25 | TestReproducibility._test_reproducibility(self, "test_reproducibility") 26 | 27 | 28 | if __name__ == "__main__": 29 | unittest.main() 30 | -------------------------------------------------------------------------------- /GIN/ogb/linkproppred/master.csv: -------------------------------------------------------------------------------- 1 | ,ogbl-ppa,ogbl-collab,ogbl-citation2,ogbl-wikikg2,ogbl-ddi,ogbl-biokg 2 | eval metric,hits@100,hits@50,mrr,mrr,hits@20,mrr 3 | task type,link prediction,link prediction,link prediction,KG completion,link prediction,KG completion 4 | download_name,ppassoc,collab,citation-v2,wikikg-v2,ddi,biokg 5 | version,1,1,1,1,1,1 6 | url,http://snap.stanford.edu/ogb/data/linkproppred/ppassoc.zip,http://snap.stanford.edu/ogb/data/linkproppred/collab.zip,http://snap.stanford.edu/ogb/data/linkproppred/citation-v2.zip,http://snap.stanford.edu/ogb/data/linkproppred/wikikg-v2.zip,http://snap.stanford.edu/ogb/data/linkproppred/ddi.zip,http://snap.stanford.edu/ogb/data/linkproppred/biokg.zip 7 | add_inverse_edge,True,True,False,False,True,False 8 | has_node_attr,True,True,True,False,False,False 9 | has_edge_attr,False,False,False,False,False,False 10 | split,throughput,time,time,time,target,random 11 | additional node files,None,None,node_year,None,None,None 12 | additional edge files,None,"edge_weight,edge_year",None,edge_reltype,None,edge_reltype 13 | is hetero,False,False,False,False,False,True 14 | binary,False,False,False,False,False,False 15 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/multilingual/data_scripts/download_ML50_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | if [ -z $WORKDIR_ROOT ] ; 9 | then 10 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." 11 | exit 12 | fi 13 | 14 | # first run download_wmt20.sh; it will install a few useful tools for other scripts 15 | # TODO: need to print out instructions on downloading a few files which requires manually authentication from the websites 16 | bash ./download_wmt20.sh 17 | 18 | python ./download_wmt19_and_before.py 19 | bash ./download_wat19_my.sh 20 | python ./download_ted_and_extract.py 21 | bash ./download_lotus.sh 22 | bash ./download_iitb.sh 23 | bash ./download_af_xh.sh 24 | 25 | 26 | # IWSLT downloading URLs have changed in between; TODO: fix them: 27 | bash ./download_iwslt_and_extract.sh 28 | 29 | # TODO: globalvoices URLs changed; need to be fixed 30 | bash ./download_flores_data.sh 31 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/audio/feature_transforms/global_cmvn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from fairseq.data.audio.feature_transforms import ( 3 | AudioFeatureTransform, 4 | register_audio_feature_transform, 5 | ) 6 | 7 | 8 | @register_audio_feature_transform("global_cmvn") 9 | class GlobalCMVN(AudioFeatureTransform): 10 | """Global CMVN (cepstral mean and variance normalization). The global mean 11 | and variance need to be pre-computed and stored in NumPy format (.npz).""" 12 | 13 | @classmethod 14 | def from_config_dict(cls, config=None): 15 | _config = {} if config is None else config 16 | return GlobalCMVN(_config.get("stats_npz_path")) 17 | 18 | def __init__(self, stats_npz_path): 19 | self.stats_npz_path = stats_npz_path 20 | stats = np.load(stats_npz_path) 21 | self.mean, self.std = stats["mean"], stats["std"] 22 | 23 | def __repr__(self): 24 | return self.__class__.__name__ + f'(stats_npz_path="{self.stats_npz_path}")' 25 | 26 | def __call__(self, x): 27 | x = np.subtract(x, self.mean) 28 | x = np.divide(x, self.std) 29 | return x 30 | -------------------------------------------------------------------------------- /Two-branch Transformer/.github/workflows/build_wheels.yml: -------------------------------------------------------------------------------- 1 | name: build_wheels 2 | 3 | on: 4 | push: 5 | branches: 6 | - v[0-9]+.[0-9]+.[x0-9]+ 7 | tags: 8 | - v* 9 | 10 | jobs: 11 | build_wheels: 12 | name: Build wheels on ${{ matrix.os }} 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | matrix: 16 | os: [ubuntu-latest, macos-latest] 17 | 18 | steps: 19 | - uses: actions/checkout@v2 20 | 21 | - name: Install Python 22 | uses: actions/setup-python@v2 23 | with: 24 | python-version: '3.7' 25 | 26 | - name: Install cibuildwheel 27 | run: | 28 | python -m pip install cibuildwheel 29 | 30 | - name: Build wheels for CPython 31 | run: | 32 | python -m cibuildwheel --output-dir dist 33 | env: 34 | CIBW_BUILD: "cp36-*64 cp37-*64 cp38-*64" 35 | CIBW_MANYLINUX_X86_64_IMAGE: manylinux1 36 | CIBW_BEFORE_BUILD: git submodule update --init --recursive && pip install . 37 | 38 | - uses: actions/upload-artifact@v2 39 | with: 40 | name: wheels 41 | path: ./dist/*.whl 42 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/backtranslation/sacrebleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 5 ]; then 4 | echo "usage: $0 [dataset=wmt14/full] [langpair=en-de] [databin] [bpecode] [model]" 5 | exit 6 | fi 7 | 8 | 9 | DATASET=$1 10 | LANGPAIR=$2 11 | DATABIN=$3 12 | BPECODE=$4 13 | MODEL=$5 14 | 15 | SRCLANG=$(echo $LANGPAIR | cut -d '-' -f 1) 16 | TGTLANG=$(echo $LANGPAIR | cut -d '-' -f 2) 17 | 18 | 19 | BPEROOT=examples/backtranslation/subword-nmt/subword_nmt 20 | if [ ! -e $BPEROOT ]; then 21 | BPEROOT=subword-nmt/subword_nmt 22 | if [ ! -e $BPEROOT ]; then 23 | echo 'Cloning Subword NMT repository (for BPE pre-processing)...' 24 | git clone https://github.com/rsennrich/subword-nmt.git 25 | fi 26 | fi 27 | 28 | 29 | sacrebleu -t $DATASET -l $LANGPAIR --echo src \ 30 | | sacremoses tokenize -a -l $SRCLANG -q \ 31 | | python $BPEROOT/apply_bpe.py -c $BPECODE \ 32 | | fairseq-interactive $DATABIN --path $MODEL \ 33 | -s $SRCLANG -t $TGTLANG \ 34 | --beam 5 --remove-bpe --buffer-size 1024 --max-tokens 8000 \ 35 | | grep ^H- | cut -f 3- \ 36 | | sacremoses detokenize -l $TGTLANG -q \ 37 | | sacrebleu -t $DATASET -l $LANGPAIR 38 | -------------------------------------------------------------------------------- /Two-branch Transformer/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Facebook, Inc. and its affiliates. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | from fairseq.criterions.fairseq_criterion import ( # noqa 12 | FairseqCriterion, 13 | LegacyFairseqCriterion, 14 | ) 15 | from omegaconf import DictConfig 16 | 17 | 18 | ( 19 | build_criterion_, 20 | register_criterion, 21 | CRITERION_REGISTRY, 22 | CRITERION_DATACLASS_REGISTRY, 23 | ) = registry.setup_registry( 24 | "--criterion", base_class=FairseqCriterion, default="cross_entropy" 25 | ) 26 | 27 | 28 | def build_criterion(cfg: DictConfig, task): 29 | return build_criterion_(cfg, task) 30 | 31 | 32 | # automatically import any Python files in the criterions/ directory 33 | for file in os.listdir(os.path.dirname(__file__)): 34 | if file.endswith(".py") and not file.startswith("_"): 35 | file_name = file[: file.find(".py")] 36 | importlib.import_module("fairseq.criterions." + file_name) 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_big.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "relu" 3 | dropout: 0.1 4 | attention_dropout: 0.0 5 | activation_dropout: 0.0 6 | relu_dropout: 0.0 7 | decoder_embed_dim: 1024 8 | decoder_output_dim: 1024 9 | decoder_input_dim: 1024 10 | decoder_ffn_embed_dim: 4096 11 | decoder_layers: 12 12 | decoder_attention_heads: 16 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: false 15 | adaptive_softmax_cutoff: null 16 | adaptive_softmax_dropout: 0 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: false 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: null 27 | tie_adaptive_weights: false 28 | tie_adaptive_proj: false 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_gbw.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "relu" 3 | dropout: 0.1 4 | attention_dropout: 0.1 5 | activation_dropout: 0.0 6 | relu_dropout: 0.0 7 | decoder_embed_dim: 512 8 | decoder_output_dim: 512 9 | decoder_input_dim: 512 10 | decoder_ffn_embed_dim: 4096 11 | decoder_layers: 12 12 | decoder_attention_heads: 16 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: true 15 | adaptive_softmax_cutoff: null 16 | adaptive_softmax_dropout: 0 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: false 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: null 27 | tie_adaptive_weights: false 28 | tie_adaptive_proj: false 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_gpt.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "gelu" 3 | dropout: 0.1 4 | attention_dropout: 0.1 5 | activation_dropout: 0.0 6 | relu_dropout: 0.0 7 | decoder_embed_dim: 768 8 | decoder_output_dim: 768 9 | decoder_input_dim: 768 10 | decoder_ffn_embed_dim: 3072 11 | decoder_layers: 12 12 | decoder_attention_heads: 12 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: false 15 | adaptive_softmax_cutoff: null 16 | adaptive_softmax_dropout: 0 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: false 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: null 27 | tie_adaptive_weights: false 28 | tie_adaptive_proj: false 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_baevski_gbw.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "relu" 3 | dropout: 0.1 4 | attention_dropout: 0.1 5 | activation_dropout: 0.0 6 | relu_dropout: 0.0 7 | decoder_embed_dim: 512 8 | decoder_output_dim: 512 9 | decoder_input_dim: 512 10 | decoder_ffn_embed_dim: 4096 11 | decoder_layers: 12 12 | decoder_attention_heads: 16 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: true 15 | adaptive_softmax_cutoff: null 16 | adaptive_softmax_dropout: 0 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: false 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: null 27 | tie_adaptive_weights: false 28 | tie_adaptive_proj: false 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_gpt2_big.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "gelu" 3 | dropout: 0.1 4 | attention_dropout: 0.1 5 | activation_dropout: 0.0 6 | relu_dropout: 0.0 7 | decoder_embed_dim: 1600 8 | decoder_output_dim: 1600 9 | decoder_input_dim: 1600 10 | decoder_ffn_embed_dim: 6400 11 | decoder_layers: 48 12 | decoder_attention_heads: 25 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: false 15 | adaptive_softmax_cutoff: null 16 | adaptive_softmax_dropout: 0 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: false 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: null 27 | tie_adaptive_weights: false 28 | tie_adaptive_proj: false 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_gpt2_medium.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "gelu" 3 | dropout: 0.1 4 | attention_dropout: 0.1 5 | activation_dropout: 0.0 6 | relu_dropout: 0.0 7 | decoder_embed_dim: 1280 8 | decoder_output_dim: 1280 9 | decoder_input_dim: 1280 10 | decoder_ffn_embed_dim: 5120 11 | decoder_layers: 36 12 | decoder_attention_heads: 20 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: false 15 | adaptive_softmax_cutoff: null 16 | adaptive_softmax_dropout: 0 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: false 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: null 27 | tie_adaptive_weights: false 28 | tie_adaptive_proj: false 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_gpt2_small.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "gelu" 3 | dropout: 0.1 4 | attention_dropout: 0.1 5 | activation_dropout: 0.0 6 | relu_dropout: 0.0 7 | decoder_embed_dim: 1024 8 | decoder_output_dim: 1024 9 | decoder_input_dim: 1024 10 | decoder_ffn_embed_dim: 4096 11 | decoder_layers: 24 12 | decoder_attention_heads: 16 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: false 15 | adaptive_softmax_cutoff: null 16 | adaptive_softmax_dropout: 0 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: false 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: null 27 | tie_adaptive_weights: false 28 | tie_adaptive_proj: false 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /GIN/examples/lsc/wikikg90m/dgl-ke-ogb-lsc/README.md: -------------------------------------------------------------------------------- 1 | # 2 | 3 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](./LICENSE) 4 | 5 | [Documentation](https://dglke.dgl.ai/doc/) 6 | 7 | # This is an implementation of DGL-KE for OGB-LSC 8 | 9 | Check out [the original repo](https://github.com/awslabs/dgl-ke) for more details. 10 | 11 | ### Cite 12 | 13 | If you use DGL-KE in a scientific publication, we would appreciate citations to the following paper: 14 | 15 | ```bibtex 16 | @inproceedings{DGL-KE, 17 | author = {Zheng, Da and Song, Xiang and Ma, Chao and Tan, Zeyuan and Ye, Zihao and Dong, Jin and Xiong, Hao and Zhang, Zheng and Karypis, George}, 18 | title = {DGL-KE: Training Knowledge Graph Embeddings at Scale}, 19 | year = {2020}, 20 | publisher = {Association for Computing Machinery}, 21 | address = {New York, NY, USA}, 22 | booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval}, 23 | pages = {739–748}, 24 | numpages = {10}, 25 | series = {SIGIR '20} 26 | } 27 | ``` 28 | 29 | ### License 30 | 31 | This project is licensed under the Apache-2.0 License. 32 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/lr_scheduler.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | .. _Learning Rate Schedulers: 5 | 6 | Learning Rate Schedulers 7 | ======================== 8 | 9 | Learning Rate Schedulers update the learning rate over the course of training. 10 | Learning rates can be updated after each update via :func:`step_update` or at 11 | epoch boundaries via :func:`step`. 12 | 13 | .. automodule:: fairseq.optim.lr_scheduler 14 | :members: 15 | 16 | .. autoclass:: fairseq.optim.lr_scheduler.FairseqLRScheduler 17 | :members: 18 | :undoc-members: 19 | 20 | .. autoclass:: fairseq.optim.lr_scheduler.cosine_lr_scheduler.CosineSchedule 21 | :members: 22 | :undoc-members: 23 | .. autoclass:: fairseq.optim.lr_scheduler.fixed_schedule.FixedSchedule 24 | :members: 25 | :undoc-members: 26 | .. autoclass:: fairseq.optim.lr_scheduler.inverse_square_root_schedule.InverseSquareRootSchedule 27 | :members: 28 | :undoc-members: 29 | .. autoclass:: fairseq.optim.lr_scheduler.reduce_lr_on_plateau.ReduceLROnPlateau 30 | :members: 31 | :undoc-members: 32 | .. autoclass:: fairseq.optim.lr_scheduler.triangular_lr_scheduler.TriangularSchedule 33 | :members: 34 | :undoc-members: 35 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/multilingual/data_scripts/download_iitb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | 9 | if [ -z $WORKDIR_ROOT ] ; 10 | then 11 | echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." 12 | exit 13 | fi 14 | 15 | IITB=$WORKDIR_ROOT/IITB 16 | mkdir -p $IITB 17 | pushd $IITB 18 | 19 | wget http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download/parallel.tgz 20 | tar -xvzf parallel.tgz 21 | 22 | wget http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download/dev_test.tgz 23 | tar -xvzf dev_test.tgz 24 | 25 | DESTDIR=${WORKDIR_ROOT}/ML50/raw/ 26 | 27 | cp parallel/IITB.en-hi.en $DESTDIR/train.hi_IN-en_XX.en_XX 28 | cp parallel/IITB.en-hi.hi $DESTDIR/train.hi_IN-en_XX.hi_IN 29 | 30 | cp dev_test/dev.en $DESTDIR/valid.hi_IN-en_XX.en_XX 31 | cp dev_test/dev.hi $DESTDIR/valid.hi_IN-en_XX.hi_IN 32 | 33 | cp dev_test/test.en $DESTDIR/test.hi_IN-en_XX.en_XX 34 | cp dev_test/test.hi $DESTDIR/test.hi_IN-en_XX.hi_IN 35 | popd -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_wiki103.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "relu" 3 | dropout: 0.3 4 | attention_dropout: 0.1 5 | activation_dropout: 0.1 6 | relu_dropout: 0.1 7 | decoder_embed_dim: 1024 8 | decoder_output_dim: 1024 9 | decoder_input_dim: 1024 10 | decoder_ffn_embed_dim: 4096 11 | decoder_layers: 16 12 | decoder_attention_heads: 8 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: true 15 | adaptive_softmax_cutoff: "20000,60000" 16 | adaptive_softmax_dropout: 0.2 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: true 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: "20000,60000" 27 | tie_adaptive_weights: true 28 | tie_adaptive_proj: true 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/docs/index.rst: -------------------------------------------------------------------------------- 1 | .. fairseq documentation master file, created by 2 | sphinx-quickstart on Fri Aug 17 21:45:30 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | :github_url: https://github.com/pytorch/fairseq 7 | 8 | 9 | fairseq documentation 10 | ===================== 11 | 12 | Fairseq is a sequence modeling toolkit written in `PyTorch 13 | `_ that allows researchers and developers to 14 | train custom models for translation, summarization, language modeling and other 15 | text generation tasks. 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | :caption: Getting Started 20 | 21 | getting_started 22 | command_line_tools 23 | 24 | .. toctree:: 25 | :maxdepth: 1 26 | :caption: Extending Fairseq 27 | 28 | overview 29 | tutorial_simple_lstm 30 | tutorial_classifying_names 31 | 32 | .. toctree:: 33 | :maxdepth: 2 34 | :caption: Library Reference 35 | 36 | tasks 37 | models 38 | criterions 39 | optim 40 | lr_scheduler 41 | data 42 | modules 43 | 44 | 45 | Indices and tables 46 | ================== 47 | 48 | * :ref:`genindex` 49 | * :ref:`search` 50 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/config/model/transformer_lm/transformer_lm_baevski_wiki103.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activation_fn: "relu" 3 | dropout: 0.3 4 | attention_dropout: 0.1 5 | activation_dropout: 0.1 6 | relu_dropout: 0.1 7 | decoder_embed_dim: 1024 8 | decoder_output_dim: 1024 9 | decoder_input_dim: 1024 10 | decoder_ffn_embed_dim: 4096 11 | decoder_layers: 16 12 | decoder_attention_heads: 8 13 | decoder_normalize_before: true 14 | no_decoder_final_norm: true 15 | adaptive_softmax_cutoff: "20000,60000" 16 | adaptive_softmax_dropout: 0.2 17 | adaptive_softmax_factor: 4 18 | no_token_positional_embeddings: false 19 | share_decoder_input_output_embed: false 20 | character_embeddings: false 21 | character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" 22 | character_embedding_dim: 4 23 | char_embedder_highway_layers: 2 24 | adaptive_input: true 25 | adaptive_input_factor: 4 26 | adaptive_input_cutoff: "20000,60000" 27 | tie_adaptive_weights: true 28 | tie_adaptive_proj: true 29 | decoder_learned_pos: false 30 | decoder_layerdrop: 0 31 | decoder_layers_to_keep: null 32 | layernorm_embedding: false 33 | no_scale_embedding: false 34 | quant_noise_pq: 0 35 | quant_noise_pq_block_size: 8 36 | quant_noise_scalar: 0 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/optim/lr_scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """isort:skip_file""" 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import ( # noqa 12 | FairseqLRScheduler, 13 | LegacyFairseqLRScheduler, 14 | ) 15 | from omegaconf import DictConfig 16 | 17 | 18 | ( 19 | build_lr_scheduler_, 20 | register_lr_scheduler, 21 | LR_SCHEDULER_REGISTRY, 22 | LR_SCHEDULER_DATACLASS_REGISTRY, 23 | ) = registry.setup_registry( 24 | "--lr-scheduler", base_class=FairseqLRScheduler, default="fixed" 25 | ) 26 | 27 | 28 | def build_lr_scheduler(cfg: DictConfig, optimizer): 29 | return build_lr_scheduler_(cfg, optimizer) 30 | 31 | 32 | # automatically import any Python files in the optim/lr_scheduler/ directory 33 | for file in os.listdir(os.path.dirname(__file__)): 34 | if file.endswith(".py") and not file.startswith("_"): 35 | file_name = file[: file.find(".py")] 36 | importlib.import_module("fairseq.optim.lr_scheduler." + file_name) 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/quant_noise/transformer_quantization_config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | # This file defines example configuration arguments for quantizing 7 | # a transformer model with product quantization 8 | 9 | # Number of Centroids for Product Quantization, by default 256 (byte-aligned) 10 | n_centroids: 11 | Linear: 12 | key: in_features 13 | value: {"*": 256} 14 | Embedding: 15 | key: embedding_dim 16 | value: {"*": 256} 17 | 18 | # Block Sizes for Product Quantization 19 | # We suggest: 8 for FFN, 4 for ATTN, 4 for embedding projections, 8 for embeddings 20 | block_sizes: 21 | Linear: 22 | key: fuzzy_name 23 | value: {fc: 8, attn: 4, emb: 4} 24 | Embedding: 25 | key: fuzzy_name 26 | value: {emb: 8} 27 | 28 | # Layers to Quantize Sequentially 29 | # We suggest: first FFN, then EMB, then ATTN 30 | layers_to_quantize: 31 | - decoder\\.layers\\.\d+\\.fc[12] 32 | - decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01] 33 | - decoder\\.layers\\.\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj) 34 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/wav2vec/config/finetuning/base_960h.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | common: 4 | fp16: true 5 | log_format: json 6 | log_interval: 200 7 | 8 | checkpoint: 9 | no_epoch_checkpoints: true 10 | best_checkpoint_metric: wer 11 | 12 | task: 13 | _name: audio_pretraining 14 | data: ??? 15 | normalize: false 16 | labels: ltr 17 | 18 | dataset: 19 | num_workers: 6 20 | max_tokens: 3200000 21 | skip_invalid_size_inputs_valid_test: true 22 | valid_subset: dev_other 23 | 24 | distributed_training: 25 | ddp_backend: legacy_ddp 26 | distributed_world_size: 8 27 | 28 | criterion: 29 | _name: ctc 30 | zero_infinity: true 31 | 32 | optimization: 33 | max_update: 320000 34 | lr: [0.00001] 35 | sentence_avg: true 36 | 37 | optimizer: 38 | _name: adam 39 | adam_betas: (0.9,0.98) 40 | adam_eps: 1e-08 41 | 42 | lr_scheduler: 43 | _name: tri_stage 44 | phase_ratio: [0.1, 0.4, 0.5] 45 | final_lr_scale: 0.05 46 | 47 | model: 48 | _name: wav2vec_ctc 49 | w2v_path: ??? 50 | apply_mask: true 51 | mask_prob: 0.5 52 | mask_channel_prob: 0.1 53 | mask_channel_length: 64 54 | layerdrop: 0.1 55 | activation_dropout: 0.1 56 | feature_grad_mult: 0.0 57 | freeze_finetune_updates: 0 58 | 59 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/wav2vec/config/finetuning/vox_960h.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | common: 4 | fp16: true 5 | log_format: json 6 | log_interval: 200 7 | 8 | checkpoint: 9 | no_epoch_checkpoints: true 10 | best_checkpoint_metric: wer 11 | 12 | task: 13 | _name: audio_pretraining 14 | data: ??? 15 | normalize: true 16 | labels: ltr 17 | 18 | dataset: 19 | num_workers: 6 20 | max_tokens: 1280000 21 | skip_invalid_size_inputs_valid_test: true 22 | valid_subset: dev_other 23 | 24 | distributed_training: 25 | ddp_backend: legacy_ddp 26 | distributed_world_size: 24 27 | 28 | criterion: 29 | _name: ctc 30 | zero_infinity: true 31 | 32 | optimization: 33 | max_update: 320000 34 | lr: [0.00003] 35 | sentence_avg: true 36 | 37 | optimizer: 38 | _name: adam 39 | adam_betas: (0.9,0.98) 40 | adam_eps: 1e-08 41 | 42 | lr_scheduler: 43 | _name: tri_stage 44 | phase_ratio: [0.1, 0.4, 0.5] 45 | final_lr_scale: 0.05 46 | 47 | model: 48 | _name: wav2vec_ctc 49 | w2v_path: ??? 50 | apply_mask: true 51 | mask_prob: 0.5 52 | mask_channel_prob: 0.25 53 | mask_channel_length: 64 54 | layerdrop: 0.1 55 | activation_dropout: 0.1 56 | feature_grad_mult: 0.0 57 | freeze_finetune_updates: 10000 58 | 59 | -------------------------------------------------------------------------------- /GIN/examples/nodeproppred/arxiv/README.md: -------------------------------------------------------------------------------- 1 | # ogbn-arxiv 2 | 3 | This repository includes the following example scripts: 4 | 5 | * **[MLP](https://github.com/snap-stanford/ogb/blob/master/examples/nodeproppred/arxiv/mlp.py)**: Full-batch MLP training based on paper features and optional Node2Vec features (`--use_node_embedding`). For training with Node2Vec features, this script requires node embeddings be saved in `embedding.pt`. To generate them, please run `python node2vec.py` [requires `torch-geometric>=1.5.0`]. 6 | * **[GNN](https://github.com/snap-stanford/ogb/blob/master/examples/nodeproppred/arxiv/gnn.py)**: Full-batch GNN training using either the GCN or GraphSAGE operator (`--use_sage`) [requires `torch-geometric>=1.6.0`]. 7 | 8 | ## Training & Evaluation 9 | 10 | ``` 11 | # Run with default config 12 | python gnn.py 13 | 14 | # Run with custom config 15 | python gnn.py --hidden_channels=128 16 | ``` 17 | 18 | ## Getting Raw Texts 19 | 20 | The tsv file that maps paper IDs into their titles and abstracts are available [here](https://snap.stanford.edu/ogb/data/misc/ogbn_arxiv/titleabs.tsv.gz). 21 | There are three columns: paperid \t title \t abstract. 22 | You can obtain the paper ID for each node at `mapping/nodeidx2paperid.csv.gz` of the downloaded dataset directory. 23 | -------------------------------------------------------------------------------- /GIN/examples/linkproppred/collab/README.md: -------------------------------------------------------------------------------- 1 | # ogbl-collab 2 | 3 | This repository includes the following example scripts: 4 | 5 | * **[MLP](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/collab/mlp.py)**: Full-batch MLP training based on author features and optional Node2Vec features (`--use_node_embedding`). For training with Node2Vec features, this script requires node embeddings be saved in `embedding.pt`. To generate them, please run `python node2vec.py` [requires `torch-geometric>=1.5.0`]. 6 | * **[GNN](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/collab/gnn.py)**: Full-batch GNN training using either the GCN or GraphSAGE operator (`--use_sage`) [requires `torch-geometric>=1.6.0`]. Setting `--use_valedges_as_input` would allow models to use validation edges at inference time. See [here](https://ogb.stanford.edu/docs/leader_rules/) for the rules of using validation labels. 7 | * **[Matrix Factorization](https://github.com/snap-stanford/ogb/blob/master/examples/linkproppred/collab/mf.py)**: Full-batch Matrix Factorization training. 8 | 9 | ## Training & Evaluation 10 | 11 | ``` 12 | # Run with default config 13 | python gnn.py 14 | 15 | # Run with inference using validation edges 16 | python gnn.py --use_valedges_as_input 17 | ``` 18 | -------------------------------------------------------------------------------- /Two-branch Transformer/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Facebook AI Research Sequence-to-Sequence Toolkit (fairseq) 2 | We want to make contributing to this project as easy and transparent as 3 | possible. 4 | 5 | ## Pull Requests 6 | We actively welcome your pull requests. 7 | 8 | 1. Fork the repo and create your branch from `master`. 9 | 2. If you've added code that should be tested, add tests. 10 | 3. If you've changed APIs, update the documentation. 11 | 4. Ensure the test suite passes. 12 | 5. Make sure your code lints. 13 | 6. If you haven't already, complete the Contributor License Agreement ("CLA"). 14 | 15 | ## Contributor License Agreement ("CLA") 16 | In order to accept your pull request, we need you to submit a CLA. You only need 17 | to do this once to work on any of Facebook's open source projects. 18 | 19 | Complete your CLA here: 20 | 21 | ## Issues 22 | We use GitHub issues to track public bugs. Please ensure your description is 23 | clear and has sufficient instructions to be able to reproduce the issue. 24 | 25 | ## License 26 | By contributing to Facebook AI Research Sequence-to-Sequence Toolkit (fairseq), 27 | you agree that your contributions will be licensed under the LICENSE file in 28 | the root directory of this source tree. 29 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/wav2vec/config/finetuning/base_100h.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | common: 4 | fp16: true 5 | log_format: json 6 | log_interval: 200 7 | 8 | checkpoint: 9 | no_epoch_checkpoints: true 10 | best_checkpoint_metric: wer 11 | 12 | task: 13 | _name: audio_pretraining 14 | data: ??? 15 | normalize: false 16 | labels: ltr 17 | 18 | dataset: 19 | num_workers: 6 20 | max_tokens: 3200000 21 | skip_invalid_size_inputs_valid_test: true 22 | valid_subset: dev_other 23 | 24 | distributed_training: 25 | ddp_backend: legacy_ddp 26 | distributed_world_size: 2 27 | 28 | criterion: 29 | _name: ctc 30 | zero_infinity: true 31 | 32 | optimization: 33 | max_update: 80000 34 | lr: [0.00003] 35 | sentence_avg: true 36 | update_freq: [4] 37 | 38 | optimizer: 39 | _name: adam 40 | adam_betas: (0.9,0.98) 41 | adam_eps: 1e-08 42 | 43 | lr_scheduler: 44 | _name: tri_stage 45 | phase_ratio: [0.1, 0.4, 0.5] 46 | final_lr_scale: 0.05 47 | 48 | model: 49 | _name: wav2vec_ctc 50 | w2v_path: ??? 51 | apply_mask: true 52 | mask_prob: 0.65 53 | mask_channel_prob: 0.5 54 | mask_channel_length: 64 55 | layerdrop: 0.1 56 | activation_dropout: 0.1 57 | feature_grad_mult: 0.0 58 | freeze_finetune_updates: 0 59 | 60 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/wav2vec/config/finetuning/vox_100h.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | common: 4 | fp16: true 5 | log_format: json 6 | log_interval: 200 7 | 8 | checkpoint: 9 | no_epoch_checkpoints: true 10 | best_checkpoint_metric: wer 11 | 12 | task: 13 | _name: audio_pretraining 14 | data: ??? 15 | normalize: true 16 | labels: ltr 17 | 18 | dataset: 19 | num_workers: 6 20 | max_tokens: 1280000 21 | skip_invalid_size_inputs_valid_test: true 22 | valid_subset: dev_other 23 | 24 | distributed_training: 25 | ddp_backend: legacy_ddp 26 | distributed_world_size: 4 27 | 28 | criterion: 29 | _name: ctc 30 | zero_infinity: true 31 | 32 | optimization: 33 | max_update: 80000 34 | lr: [0.00003] 35 | sentence_avg: true 36 | update_freq: [5] 37 | 38 | optimizer: 39 | _name: adam 40 | adam_betas: (0.9,0.98) 41 | adam_eps: 1e-08 42 | 43 | lr_scheduler: 44 | _name: tri_stage 45 | phase_ratio: [0.1, 0.4, 0.5] 46 | final_lr_scale: 0.05 47 | 48 | model: 49 | _name: wav2vec_ctc 50 | w2v_path: ??? 51 | apply_mask: true 52 | mask_prob: 0.5 53 | mask_channel_prob: 0.5 54 | mask_channel_length: 64 55 | layerdrop: 0.1 56 | activation_dropout: 0.1 57 | feature_grad_mult: 0.0 58 | freeze_finetune_updates: 10000 59 | 60 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/append_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class AppendTokenDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, token=None): 14 | super().__init__(dataset) 15 | self.token = token 16 | if token is not None: 17 | self._sizes = np.array(dataset.sizes) + 1 18 | else: 19 | self._sizes = dataset.sizes 20 | 21 | def __getitem__(self, idx): 22 | item = self.dataset[idx] 23 | if self.token is not None: 24 | item = torch.cat([item, item.new([self.token])]) 25 | return item 26 | 27 | @property 28 | def sizes(self): 29 | return self._sizes 30 | 31 | def num_tokens(self, index): 32 | n = self.dataset.num_tokens(index) 33 | if self.token is not None: 34 | n += 1 35 | return n 36 | 37 | def size(self, index): 38 | n = self.dataset.size(index) 39 | if self.token is not None: 40 | n += 1 41 | return n 42 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/prepend_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependTokenDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, token=None): 14 | super().__init__(dataset) 15 | self.token = token 16 | if token is not None: 17 | self._sizes = np.array(dataset.sizes) + 1 18 | else: 19 | self._sizes = dataset.sizes 20 | 21 | def __getitem__(self, idx): 22 | item = self.dataset[idx] 23 | if self.token is not None: 24 | item = torch.cat([item.new([self.token]), item]) 25 | return item 26 | 27 | @property 28 | def sizes(self): 29 | return self._sizes 30 | 31 | def num_tokens(self, index): 32 | n = self.dataset.num_tokens(index) 33 | if self.token is not None: 34 | n += 1 35 | return n 36 | 37 | def size(self, index): 38 | n = self.dataset.size(index) 39 | if self.token is not None: 40 | n += 1 41 | return n 42 | -------------------------------------------------------------------------------- /Two-branch Transformer/.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🐛 Bug Report 3 | about: Submit a bug report to help us improve 4 | labels: 'bug, needs triage' 5 | --- 6 | 7 | ## 🐛 Bug 8 | 9 | 10 | 11 | ### To Reproduce 12 | 13 | Steps to reproduce the behavior (**always include the command you ran**): 14 | 15 | 1. Run cmd '....' 16 | 2. See error 17 | 18 | 19 | 20 | 21 | #### Code sample 22 | 24 | 25 | ### Expected behavior 26 | 27 | 28 | 29 | ### Environment 30 | 31 | - fairseq Version (e.g., 1.0 or master): 32 | - PyTorch Version (e.g., 1.0) 33 | - OS (e.g., Linux): 34 | - How you installed fairseq (`pip`, source): 35 | - Build command you used (if compiling from source): 36 | - Python version: 37 | - CUDA/cuDNN version: 38 | - GPU models and configuration: 39 | - Any other relevant information: 40 | 41 | ### Additional context 42 | 43 | 44 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/pdb.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import multiprocessing 7 | import os 8 | import pdb 9 | import sys 10 | 11 | 12 | __all__ = ["set_trace"] 13 | 14 | 15 | _stdin = [None] 16 | _stdin_lock = multiprocessing.Lock() 17 | try: 18 | _stdin_fd = sys.stdin.fileno() 19 | except Exception: 20 | _stdin_fd = None 21 | 22 | 23 | class MultiprocessingPdb(pdb.Pdb): 24 | """A Pdb wrapper that works in a multiprocessing environment. 25 | 26 | Usage: `from fairseq import pdb; pdb.set_trace()` 27 | """ 28 | 29 | def __init__(self): 30 | pdb.Pdb.__init__(self, nosigint=True) 31 | 32 | def _cmdloop(self): 33 | stdin_bak = sys.stdin 34 | with _stdin_lock: 35 | try: 36 | if _stdin_fd is not None: 37 | if not _stdin[0]: 38 | _stdin[0] = os.fdopen(_stdin_fd) 39 | sys.stdin = _stdin[0] 40 | self.cmdloop() 41 | finally: 42 | sys.stdin = stdin_bak 43 | 44 | 45 | def set_trace(): 46 | pdb = MultiprocessingPdb() 47 | pdb.set_trace(sys._getframe().f_back) 48 | -------------------------------------------------------------------------------- /Two-branch Transformer/examples/wav2vec/config/pretraining/wav2vec2_base_librispeech.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | common: 4 | fp16: true 5 | log_format: json 6 | log_interval: 200 7 | 8 | checkpoint: 9 | save_interval_updates: 25000 10 | keep_interval_updates: 1 11 | no_epoch_checkpoints: true 12 | 13 | task: 14 | _name: audio_pretraining 15 | data: ??? 16 | max_sample_size: 250000 17 | min_sample_size: 32000 18 | normalize: false 19 | 20 | dataset: 21 | num_workers: 6 22 | max_tokens: 1400000 23 | skip_invalid_size_inputs_valid_test: true 24 | 25 | distributed_training: 26 | distributed_world_size: 64 27 | ddp_backend: legacy_ddp 28 | 29 | criterion: 30 | _name: wav2vec 31 | infonce: true 32 | log_keys: ["prob_perplexity","code_perplexity","temp"] 33 | loss_weights: [0.1, 10] 34 | 35 | optimization: 36 | max_update: 400000 37 | lr: [0.0005] 38 | 39 | optimizer: 40 | _name: adam 41 | adam_betas: (0.9,0.98) 42 | adam_eps: 1e-06 43 | weight_decay: 0.01 44 | 45 | lr_scheduler: 46 | _name: polynomial_decay 47 | warmup_updates: 32000 48 | 49 | model: 50 | _name: wav2vec2 51 | quantize_targets: true 52 | final_dim: 256 53 | encoder_layerdrop: 0.05 54 | dropout_input: 0.1 55 | dropout_features: 0.1 56 | feature_grad_mult: 0.1 57 | encoder_embed_dim: 768 58 | -------------------------------------------------------------------------------- /Two-branch Transformer/scripts/compare_namespaces.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Helper script to compare two argparse.Namespace objects.""" 3 | 4 | from argparse import Namespace # noqa 5 | 6 | 7 | def main(): 8 | 9 | ns1 = eval(input("Namespace 1: ")) 10 | ns2 = eval(input("Namespace 2: ")) 11 | 12 | def keys(ns): 13 | ks = set() 14 | for k in dir(ns): 15 | if not k.startswith("_"): 16 | ks.add(k) 17 | return ks 18 | 19 | k1 = keys(ns1) 20 | k2 = keys(ns2) 21 | 22 | def print_keys(ks, ns1, ns2=None): 23 | for k in ks: 24 | if ns2 is None: 25 | print("{}\t{}".format(k, getattr(ns1, k, None))) 26 | else: 27 | print( 28 | "{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None)) 29 | ) 30 | 31 | print("Keys unique to namespace 1:") 32 | print_keys(k1 - k2, ns1) 33 | print() 34 | 35 | print("Keys unique to namespace 2:") 36 | print_keys(k2 - k1, ns2) 37 | print() 38 | 39 | print("Overlapping keys with different values:") 40 | ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")] 41 | print_keys(ks, ns1, ns2) 42 | print() 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /Two-branch Transformer/fairseq/data/encoders/fastbpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from dataclasses import dataclass, field 7 | 8 | from fairseq import file_utils 9 | from fairseq.data.encoders import register_bpe 10 | from fairseq.dataclass import FairseqDataclass 11 | 12 | 13 | @dataclass 14 | class fastBPEConfig(FairseqDataclass): 15 | bpe_codes: str = field(default="???", metadata={"help": "path to fastBPE BPE"}) 16 | 17 | 18 | @register_bpe("fastbpe", dataclass=fastBPEConfig) 19 | class fastBPE(object): 20 | def __init__(self, cfg): 21 | if cfg.bpe_codes is None: 22 | raise ValueError("--bpe-codes is required for --bpe=fastbpe") 23 | codes = file_utils.cached_path(cfg.bpe_codes) 24 | try: 25 | import fastBPE 26 | 27 | self.bpe = fastBPE.fastBPE(codes) 28 | self.bpe_symbol = "@@ " 29 | except ImportError: 30 | raise ImportError("Please install fastBPE with: pip install fastBPE") 31 | 32 | def encode(self, x: str) -> str: 33 | return self.bpe.apply([x])[0] 34 | 35 | def decode(self, x: str) -> str: 36 | return (x + " ").replace(self.bpe_symbol, "").rstrip() 37 | -------------------------------------------------------------------------------- /Two-branch Transformer/molecule/detokenize_re.py: -------------------------------------------------------------------------------- 1 | import re 2 | import io 3 | import argparse 4 | from tqdm import tqdm 5 | import multiprocessing 6 | 7 | 8 | def detokenize(smi): 9 | return re.sub('\s+', '', smi) 10 | 11 | 12 | def main(args): 13 | input_fn = args.fn 14 | 15 | def lines(): 16 | with io.open(input_fn, 'r', encoding='utf8', newline='\n') as srcf: 17 | for line in srcf: 18 | yield line.strip() 19 | 20 | results = [] 21 | total = len(io.open(input_fn, 'r', encoding='utf8', newline='\n').readlines()) 22 | 23 | pool = multiprocessing.Pool(args.workers) 24 | for res in tqdm(pool.imap(detokenize, lines(), chunksize=10000), total=total): 25 | if res: 26 | results.append('{}\n'.format(res)) 27 | 28 | if args.output_fn is None: 29 | output_fn = '{}.debpe'.format(input_fn) 30 | else: 31 | output_fn = args.output_fn 32 | io.open(output_fn, 'w', encoding='utf8', newline='\n').writelines(results) 33 | print('{}/{}'.format(len(results), total)) 34 | 35 | 36 | if __name__ == "__main__": 37 | parser = argparse.ArgumentParser() 38 | parser.add_argument('fn', type=str) 39 | parser.add_argument('--workers', type=int, default=1) 40 | parser.add_argument('--output-fn', type=str, default=None) 41 | args = parser.parse_args() 42 | main(args) --------------------------------------------------------------------------------