├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ ├── config.yml
│ ├── feature-request.md
│ └── new-adapter-setup.md
└── workflows
│ ├── adapter_docs_build.yml
│ ├── stale.yml
│ └── tests_torch.yml
├── .gitignore
├── .gitmodules
├── CITATION.cff
├── LICENSE
├── MANIFEST.in
├── Makefile
├── README.md
├── conftest.py
├── docs
├── Makefile
├── README.md
├── _config.py
├── _static
│ └── custom.css
├── _templates
│ └── versions.html
├── adapter_composition.md
├── classes
│ ├── adapter_config.rst
│ ├── adapter_layer.rst
│ ├── adapter_model_interface.rst
│ ├── adapter_training.rst
│ ├── adapter_utils.rst
│ ├── model_adapters_config.rst
│ ├── model_mixins.rst
│ └── models
│ │ ├── albert.rst
│ │ ├── auto.rst
│ │ ├── bart.rst
│ │ ├── beit.rst
│ │ ├── bert-generation.rst
│ │ ├── bert.rst
│ │ ├── clip.rst
│ │ ├── deberta.rst
│ │ ├── deberta_v2.rst
│ │ ├── distilbert.rst
│ │ ├── electra.rst
│ │ ├── encoderdecoder.rst
│ │ ├── gpt2.rst
│ │ ├── gptj.rst
│ │ ├── llama.rst
│ │ ├── mbart.rst
│ │ ├── mistral.rst
│ │ ├── mt5.rst
│ │ ├── plbart.rst
│ │ ├── roberta.rst
│ │ ├── t5.rst
│ │ ├── vit.rst
│ │ ├── whisper.rst
│ │ ├── xlmroberta.rst
│ │ └── xmod.rst
├── conf.py
├── contributing.md
├── contributing
│ ├── adding_adapter_methods.md
│ └── adding_adapters_to_a_model.md
├── embeddings.md
├── extending.md
├── favicon.png
├── hub_contributing.md
├── huggingface_hub.md
├── img
│ ├── Fusion.png
│ ├── adapter-bert.png
│ ├── adapter_blocks_nesting.png
│ ├── architecture.png
│ ├── compacter.png
│ ├── hfhub.svg
│ ├── ia3.png
│ ├── lora.png
│ ├── parallel.png
│ ├── prefix.png
│ ├── splitting_adapters.png
│ ├── stacking_adapters.png
│ └── unipelt.png
├── index.rst
├── installation.md
├── loading.md
├── logo.png
├── make.bat
├── merging_adapters.md
├── method_combinations.md
├── methods.md
├── model_overview.md
├── multi_task_methods.md
├── overview.md
├── plugin_interface.md
├── prediction_heads.md
├── quickstart.md
├── scripts
│ └── post_build.py
├── training.md
└── transitioning.md
├── examples
└── pytorch
│ ├── README.md
│ ├── _tests_requirements.txt
│ ├── adapterdrop
│ └── drop_at_inference.py
│ ├── adapterfusion
│ ├── README.md
│ └── run_fusion_glue.py
│ ├── conftest.py
│ ├── dependency-parsing
│ ├── README.md
│ ├── preprocessing.py
│ ├── requirements.txt
│ ├── run_udp.py
│ └── utils_udp.py
│ ├── language-modeling
│ ├── README.md
│ ├── requirements.txt
│ ├── run_clm.py
│ └── run_mlm.py
│ ├── multiple-choice
│ ├── README.md
│ ├── requirements.txt
│ └── run_swag.py
│ ├── question-answering
│ ├── README.md
│ ├── requirements.txt
│ ├── run_qa.py
│ ├── run_seq2seq_qa.py
│ ├── trainer_qa.py
│ ├── trainer_seq2seq_qa.py
│ └── utils_qa.py
│ ├── summarization
│ ├── README.md
│ ├── requirements.txt
│ └── run_summarization.py
│ ├── test_adapter_examples.py
│ ├── text-classification
│ ├── README.md
│ ├── requirements.txt
│ └── run_glue.py
│ ├── text-generation
│ ├── README.md
│ ├── requirements.txt
│ └── run_generation.py
│ ├── token-classification
│ ├── README.md
│ ├── requirements.txt
│ ├── run.sh
│ └── run_ner.py
│ └── translation
│ ├── README.md
│ ├── requirements.txt
│ └── run_translation.py
├── notebooks
├── 01_Adapter_Training.ipynb
├── 02_Adapter_Inference.ipynb
├── 03_Adapter_Fusion.ipynb
├── 04_Cross_Lingual_Transfer.ipynb
├── 05_Parallel_Adapter_Inference.ipynb
├── 06_Task_Arithmetics.ipynb
├── 07_Complex_Adapter_Configuration.ipynb
├── 08_NER_Wikiann.ipynb
├── Adapter_Drop_Training.ipynb
├── Adapter_Interface_Qwen.ipynb
├── Adapter_Whisper_Audio_FineTuning.ipynb
├── Adapter_With_Embeddings.ipynb
├── Adapter_id2label_inference.ipynb
├── Adapter_train_NER_with_id2label.ipynb
├── Gradient_Checkpointing_Llama.ipynb
├── QLoRA_Llama_Finetuning.ipynb
├── README.md
├── ReFT_Adapters_Finetuning.ipynb
├── Text_Generation_Training.ipynb
└── ViT_AdapterPlus_FineTuning.ipynb
├── pyproject.toml
├── setup.cfg
├── setup.py
├── src
└── adapters
│ ├── __init__.py
│ ├── composition.py
│ ├── configuration
│ ├── __init__.py
│ ├── adapter_config.py
│ ├── adapter_fusion_config.py
│ └── model_adapters_config.py
│ ├── context.py
│ ├── head_utils.py
│ ├── heads
│ ├── __init__.py
│ ├── base.py
│ ├── dependency_parsing.py
│ ├── language_modeling.py
│ └── model_mixin.py
│ ├── hub_mixin.py
│ ├── interface.py
│ ├── loading.py
│ ├── methods
│ ├── __init__.py
│ ├── adapter_layer_base.py
│ ├── bottleneck.py
│ ├── invertible.py
│ ├── lora.py
│ ├── modeling.py
│ ├── prefix_tuning.py
│ ├── prompt_tuning.py
│ ├── reft.py
│ └── utils.py
│ ├── model_mixin.py
│ ├── models
│ ├── __init__.py
│ ├── albert
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_albert.py
│ │ └── modeling_albert.py
│ ├── auto
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ └── auto_factory.py
│ ├── bart
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_bart.py
│ │ └── modeling_bart.py
│ ├── beit
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_beit.py
│ │ └── modeling_beit.py
│ ├── bert
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_bert.py
│ │ └── modeling_bert.py
│ ├── bert_generation
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ └── modeling_bert_generation.py
│ ├── clip
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_clip.py
│ │ └── modeling_clip.py
│ ├── deberta
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_deberta.py
│ │ └── modeling_deberta.py
│ ├── deberta_v2
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_deberta_v2.py
│ │ └── modeling_deberta_v2.py
│ ├── distilbert
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_distilbert.py
│ │ └── modeling_distilbert.py
│ ├── electra
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ └── modeling_electra.py
│ ├── encoder_decoder
│ │ ├── __init__.py
│ │ ├── mixin_encoder_decoder.py
│ │ └── modeling_encoder_decoder.py
│ ├── gpt2
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_gpt2.py
│ │ └── modeling_gpt2.py
│ ├── gptj
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_gptj.py
│ │ └── modeling_gptj.py
│ ├── llama
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_llama.py
│ │ └── modeling_llama.py
│ ├── mbart
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ └── modeling_mbart.py
│ ├── mistral
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_mistral.py
│ │ └── modeling_mistral.py
│ ├── mt5
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ └── modeling_mt5.py
│ ├── plbart
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_plbart.py
│ │ └── modeling_plbart.py
│ ├── roberta
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ └── modeling_roberta.py
│ ├── t5
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_t5.py
│ │ └── modeling_t5.py
│ ├── vit
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_vit.py
│ │ └── modeling_vit.py
│ ├── whisper
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_whisper.py
│ │ └── modeling_whisper.py
│ ├── xlm_roberta
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ └── modeling_xlm_roberta.py
│ └── xmod
│ │ ├── __init__.py
│ │ ├── adapter_model.py
│ │ ├── mixin_xmod.py
│ │ └── modeling_xmod.py
│ ├── trainer.py
│ ├── training.py
│ ├── utils.py
│ └── wrappers
│ ├── __init__.py
│ ├── configuration.py
│ ├── interfaces.py
│ └── model.py
├── tests
├── README.md
├── __init__.py
├── fixtures
│ ├── SiBERT
│ │ ├── config.json
│ │ ├── special_tokens_map.json
│ │ ├── tokenizer_config.json
│ │ └── vocab.txt
│ ├── __init__.py
│ ├── audio_datasets
│ │ ├── common_voice_encoded
│ │ │ ├── dataset_dict.json
│ │ │ └── train
│ │ │ │ ├── data-00000-of-00001.arrow
│ │ │ │ ├── dataset_info.json
│ │ │ │ └── state.json
│ │ ├── common_voice_org
│ │ │ ├── dataset_dict.json
│ │ │ └── train
│ │ │ │ ├── data-00000-of-00001.arrow
│ │ │ │ ├── dataset_info.json
│ │ │ │ └── state.json
│ │ ├── prepare_audio_datasets.py
│ │ └── speech_commands_org
│ │ │ ├── dataset_dict.json
│ │ │ └── train
│ │ │ ├── data-00000-of-00001.arrow
│ │ │ ├── dataset_info.json
│ │ │ └── state.json
│ ├── hub-index.sample.json
│ ├── sample_text.txt
│ ├── samples
│ │ ├── MRPC
│ │ │ ├── dev.csv
│ │ │ ├── dev.tsv
│ │ │ ├── train.csv
│ │ │ └── train.tsv
│ │ ├── SQUAD
│ │ │ └── sample.json
│ │ ├── cifar10
│ │ │ ├── cifar10.py
│ │ │ ├── data_batch_1
│ │ │ ├── data_batch_2
│ │ │ ├── data_batch_3
│ │ │ ├── data_batch_4
│ │ │ ├── data_batch_5
│ │ │ └── test_batch
│ │ ├── conll
│ │ │ └── sample.json
│ │ ├── swag
│ │ │ └── sample.json
│ │ ├── wmt16
│ │ │ └── sample.json
│ │ └── xsum
│ │ │ └── sample.json
│ └── tests_samples
│ │ └── COCO
│ │ ├── 000000039769.png
│ │ ├── coco_annotations.txt
│ │ ├── coco_panoptic
│ │ └── 000000039769.png
│ │ └── coco_panoptic_annotations.txt
├── test_methods
│ ├── __init__.py
│ ├── base.py
│ ├── generator.py
│ ├── method_test_impl
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── composition
│ │ │ ├── __init__.py
│ │ │ ├── test_multi_task.py
│ │ │ └── test_parallel.py
│ │ ├── core
│ │ │ ├── __init__.py
│ │ │ ├── test_adapter_backward_compability.py
│ │ │ ├── test_adapter_conversion.py
│ │ │ └── test_adapter_fusion_common.py
│ │ ├── embeddings
│ │ │ ├── __init__.py
│ │ │ └── test_adapter_embeddings.py
│ │ ├── heads
│ │ │ ├── __init__.py
│ │ │ └── test_adapter_heads.py
│ │ ├── peft
│ │ │ ├── __init__.py
│ │ │ ├── test_adapter_common.py
│ │ │ ├── test_compacter.py
│ │ │ ├── test_config_union.py
│ │ │ ├── test_ia3.py
│ │ │ ├── test_lora.py
│ │ │ ├── test_mtllora.py
│ │ │ ├── test_prefix_tuning.py
│ │ │ ├── test_prompt_tuning.py
│ │ │ ├── test_reft.py
│ │ │ ├── test_unipelt.py
│ │ │ └── test_vera.py
│ │ └── utils.py
│ ├── test_all_custom_interfaces.py
│ ├── test_on_albert.py
│ ├── test_on_bart.py
│ ├── test_on_beit.py
│ ├── test_on_bert.py
│ ├── test_on_bert_generation.py
│ ├── test_on_clip
│ │ ├── test_model.py
│ │ ├── test_textmodel.py
│ │ ├── test_textwithprojectionmodel.py
│ │ ├── test_visionmodel.py
│ │ └── test_visionwithprojectionmodel.py
│ ├── test_on_custom_interface.py
│ ├── test_on_deberta.py
│ ├── test_on_debertaV2.py
│ ├── test_on_distilbert.py
│ ├── test_on_electra.py
│ ├── test_on_encoder_decoder.py
│ ├── test_on_gpt2.py
│ ├── test_on_llama.py
│ ├── test_on_mbart.py
│ ├── test_on_mistral.py
│ ├── test_on_mt5.py
│ ├── test_on_plbart.py
│ ├── test_on_roberta.py
│ ├── test_on_t5.py
│ ├── test_on_vit.py
│ ├── test_on_whisper.py
│ ├── test_on_xlm_roberta.py
│ └── test_on_xmod.py
├── test_misc
│ ├── test_adapter_composition.py
│ ├── test_adapter_config.py
│ ├── test_adapter_custom_head.py
│ ├── test_adapter_fusion_config.py
│ ├── test_adapter_hub.py
│ ├── test_adapter_safetensors.py
│ ├── test_adapter_save_id2label.py
│ ├── test_adapter_trainer
│ │ ├── __init__.py
│ │ ├── test_adapter_trainer.py
│ │ └── test_adapter_trainer_ext.py
│ └── test_custom_interface_compat.py
└── test_models
│ ├── __init__.py
│ ├── base.py
│ ├── test_albert_model.py
│ ├── test_bart_model.py
│ ├── test_beit_model.py
│ ├── test_bert_generation_model.py
│ ├── test_bert_model.py
│ ├── test_clip_model.py
│ ├── test_debertaV2_model.py
│ ├── test_deberta_model.py
│ ├── test_distilbert_model.py
│ ├── test_electra_model.py
│ ├── test_encoder_decoder_model.py
│ ├── test_gpt2_model.py
│ ├── test_gptj_model.py
│ ├── test_llama_model.py
│ ├── test_mbart_model.py
│ ├── test_mistral_model.py
│ ├── test_mt5_model.py
│ ├── test_plbart_model.py
│ ├── test_roberta_model.py
│ ├── test_t5_model.py
│ ├── test_vit_model.py
│ ├── test_whisper_model.py
│ ├── test_xlm_roberta_model.py
│ └── test_xmod_model.py
└── utils
├── back_comp
├── README.md
├── Utils.py
├── compare.sh
├── compare_outputs.py
└── create_outputs.py
├── check_inits.py
├── convert_xmod_checkpoint.py
├── custom_init_isort.py
└── sort_auto_mappings.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.py eol=lf
2 | *.rst eol=lf
3 | *.md eol=lf
4 | *.mdx eol=lf
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F41B Bug Report"
3 | about: Submit a bug report to help us improve adapters
4 | title: ''
5 | labels: 'bug'
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 | ## Environment info
12 |
14 |
15 | - `adapters` version:
16 | - Platform:
17 | - Python version:
18 | - PyTorch version (GPU?):
19 | - Tensorflow version (GPU?):
20 | - Using GPU in script?:
21 | - Using distributed or parallel set-up in script?:
22 |
23 | ## Information
24 |
25 | Model I am using (Bert, XLNet ...):
26 |
27 | Language I am using the model on (English, Chinese ...):
28 |
29 | Adapter setup I am using (if any):
30 |
31 | The problem arises when using:
32 | * [ ] the official example scripts: (give details below)
33 | * [ ] my own modified scripts: (give details below)
34 |
35 | The tasks I am working on is:
36 | * [ ] an official GLUE/SQUaD task: (give the name)
37 | * [ ] my own task or dataset: (give details below)
38 |
39 | ## To reproduce
40 |
41 | Steps to reproduce the behavior:
42 |
43 | 1.
44 | 2.
45 | 3.
46 |
47 |
50 |
51 | ## Expected behavior
52 |
53 |
54 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: "🗪 Discussions Forum"
4 | url: https://github.com/adapter-hub/adapters/discussions
5 | about: Ask questions on working with adapters, request help or share your work
6 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F680 Feature request"
3 | about: Submit a proposal/request for a new adapters feature
4 | title: ''
5 | labels: 'enhancement'
6 | assignees: ''
7 |
8 | ---
9 |
10 | # 🚀 Feature request
11 |
12 |
14 |
15 | ## Motivation
16 |
17 |
20 |
21 | ## Your contribution
22 |
23 |
26 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/new-adapter-setup.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F31F New adapter setup or model"
3 | about: Submit a proposal/request to implement a new adapter setup or to add adapters to a new model
4 | title: ''
5 | labels: 'enhancement'
6 | assignees: ''
7 |
8 | ---
9 |
10 | # 🌟 New adapter setup
11 |
12 | ## Model description
13 |
14 |
15 |
16 | ## Open source status
17 |
18 | * [ ] the model implementation is available: (give details)
19 | * [ ] the model weights are available: (give details)
20 | * [ ] who are the authors: (mention them, if possible by @gh-username)
21 |
--------------------------------------------------------------------------------
/.github/workflows/adapter_docs_build.yml:
--------------------------------------------------------------------------------
1 | name: Build Adapter Docs
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | paths:
7 | - '.github/workflows/**'
8 | - 'docs/**'
9 | workflow_dispatch:
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 | with:
17 | ref: main
18 | fetch-depth: 0
19 | - uses: actions/setup-python@v2
20 | with:
21 | python-version: "3.10"
22 | - name: Install
23 | run: |
24 | pip install setuptools==57.4.0
25 | pip install torch
26 | pip install recommonmark==0.7.1
27 | pip install -e .[docs]
28 | pip install Jinja2==2.11.3
29 | pip install markupsafe==2.0.1
30 | - name: Build
31 | run: |
32 | cd docs && make html-multi-version && cd ..
33 | - name: Deploy
34 | uses: peaceiris/actions-gh-pages@v3
35 | with:
36 | github_token: ${{ secrets.GITHUB_TOKEN }}
37 | user_name: "Adapter-Hub-Bert"
38 | user_email: "---"
39 | publish_dir: ./docs/_build/html
40 | publish_branch: gh-pages
41 | force_orphan: true
42 | cname: docs.adapterhub.ml
43 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: 'Stale issues Bot'
2 |
3 | on:
4 | schedule:
5 | - cron: '0 6 * * *'
6 |
7 | jobs:
8 | stale:
9 | runs-on: ubuntu-latest
10 |
11 | permissions:
12 | issues: write
13 |
14 | steps:
15 | - uses: actions/stale@v8
16 | with:
17 | repo-token: "${{ secrets.BOT_TOKEN }}"
18 | exempt-issue-labels: 'do-not-stale,enhancement,bug'
19 | stale-issue-message: 'This issue has been automatically marked as stale because it has been without activity for 90 days. This issue will be closed in 14 days unless you comment or remove the stale label.'
20 | close-issue-message: 'This issue was closed because it was stale for 14 days without any activity.'
21 | days-before-issue-stale: 90
22 | days-before-issue-close: 14
23 | days-before-pr-close: -1
24 | days-before-pr-stale: -1
25 | operations-per-run: 300
26 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "hf_transformers"]
2 | path = hf_transformers
3 | url = https://github.com/huggingface/transformers.git
4 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: "1.2.0"
2 | date-released: 2023-11
3 | message: "If you use this software, please cite it as below."
4 | title: "Adapters: A Unified Library for Parameter-Efficient and
5 | Modular Transfer Learning"
6 | url: "https://github.com/Adapter-Hub/adapters"
7 | authors:
8 | - family-names: Poth
9 | given-names: Clifton
10 | - family-names: Sterz
11 | given-names: Hannah
12 | - family-names: Paul
13 | given-names: Indraneil
14 | - family-names: Purkayastha
15 | given-names: Sukannya
16 | - family-names: Engländer
17 | given-names: Leon
18 | - family-names: Imhof
19 | given-names: Timo
20 | - family-names: Vulić
21 | given-names: Ivan
22 | - family-names: Ruder
23 | given-names: Sebastian
24 | - family-names: Gurevych
25 | given-names: Iryna
26 | - family-names: Pfeiffer
27 | given-names: Jonas
28 | preferred-citation:
29 | type: conference-paper
30 | authors:
31 | - family-names: Poth
32 | given-names: Clifton
33 | - family-names: Sterz
34 | given-names: Hannah
35 | - family-names: Paul
36 | given-names: Indraneil
37 | - family-names: Purkayastha
38 | given-names: Sukannya
39 | - family-names: Engländer
40 | given-names: Leon
41 | - family-names: Imhof
42 | given-names: Timo
43 | - family-names: Vulić
44 | given-names: Ivan
45 | - family-names: Ruder
46 | given-names: Sebastian
47 | - family-names: Gurevych
48 | given-names: Iryna
49 | - family-names: Pfeiffer
50 | given-names: Jonas
51 | booktitle: "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: System Demonstrations"
52 | month: 12
53 | start: 149
54 | end: 160
55 | title: "Adapters: A Unified Library for Parameter-Efficient and Modular Transfer Learning"
56 | year: 2023
57 | publisher: "Association for Computational Linguistics"
58 | url: "https://aclanthology.org/2023.emnlp-demo.13"
59 | address: "Singapore"
60 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: extra_style_checks quality style test test-adapter-methods test-adapter-models test-examples
2 |
3 | # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
4 | export PYTHONPATH = src
5 |
6 | check_dirs := examples tests src utils
7 |
8 | # this target runs checks on all files
9 |
10 | quality:
11 | black --check --preview $(check_dirs)
12 | isort --check-only $(check_dirs)
13 | python utils/custom_init_isort.py --check_only
14 | python utils/sort_auto_mappings.py --check_only
15 | flake8 $(check_dirs)
16 | python utils/check_inits.py
17 |
18 | # Format source code automatically and check is there are any problems left that need manual fixing
19 |
20 | extra_style_checks:
21 | python utils/custom_init_isort.py
22 | python utils/sort_auto_mappings.py
23 |
24 | # this target runs checks on all files and potentially modifies some of them
25 |
26 | style:
27 | black --preview $(check_dirs)
28 | isort $(check_dirs)
29 | ${MAKE} extra_style_checks
30 |
31 | # Library Tests
32 |
33 | # run all tests in the library
34 | test:
35 | python -m pytest -n auto --dist=loadfile -s -v ./tests/
36 | python -c "import transformers; print(transformers.__version__)"
37 |
38 | # run all tests for the adapter methods for all adapter models
39 | test-adapter-methods:
40 | python -m pytest -n auto --dist=loadfile -s -v ./tests/test_methods/
41 |
42 | # run a subset of the adapter method tests for all adapter models
43 | # list of all subsets: [core, heads, embeddings, composition, prefix_tuning, prompt_tuning, reft, unipelt, compacter, bottleneck, ia3, lora, config_union]
44 | subset ?=
45 | test-adapter-method-subset:
46 | @echo "Running subset $(subset)"
47 | python -m pytest -n auto --dist=loadfile -s -v ./tests/test_methods/ -m $(subset)
48 |
49 |
50 | # run the hugginface test suite for all adapter models
51 | test-adapter-models:
52 | python -m pytest -n auto --dist=loadfile -s -v ./tests/test_models/
53 |
54 | # Run tests for examples
55 | test-examples:
56 | python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/
57 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | html-multi-version:
18 | sphinx-multiversion "$(SOURCEDIR)" "$(BUILDDIR)/html" $(SPHINXOPTS) $(O)
19 | python scripts/post_build.py "$(BUILDDIR)/html"
20 |
21 | # Catch-all target: route all unknown targets to Sphinx using the new
22 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
23 | %: Makefile
24 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
25 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # The adapters documentation
2 |
3 | This is the documentation of the adapter-related parts of the transformers library and the Adapter-Hub. Hugging Face's documentation of the base library is located in the `/docs` folder.
4 |
5 | ## Installing & Building
6 |
7 | Building the documentation requires some additional packages installed. You can install them by running the following command in the root folder:
8 |
9 | ```bash
10 | pip install -e ".[docs]"
11 | ```
12 |
13 | Cleaning and regenerating the documentation files can be done using `sphinx` by running the following command in the `/docs` folder:
14 |
15 | ```bash
16 | make clean && make html
17 | ```
18 |
19 | The build output will be located in `/docs/_build/html`.
20 |
--------------------------------------------------------------------------------
/docs/_config.py:
--------------------------------------------------------------------------------
1 | # docstyle-ignore
2 | INSTALL_CONTENT = """
3 | # Transformers installation
4 | ! pip install transformers datasets
5 | # To install from source instead of the last release, comment the command above and uncomment the following one.
6 | # ! pip install git+https://github.com/huggingface/transformers.git
7 | """
8 |
9 | notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
10 | black_avoid_patterns = {
11 | "{processor_class}": "FakeProcessorClass",
12 | "{model_class}": "FakeModelClass",
13 | "{object_class}": "FakeObjectClass",
14 | }
15 |
--------------------------------------------------------------------------------
/docs/_static/custom.css:
--------------------------------------------------------------------------------
1 | /* The search field on top of the toc tree */
2 | /* Mobile header */
3 | .wy-side-nav-search, .wy-nav-top {
4 | background: #39B3C6;
5 | }
6 | /* toc tree text */
7 | .wy-menu-vertical header,
8 | .wy-menu-vertical p.caption {
9 | color: #39B3C6
10 | }
11 | /* toc tree activated link */
12 | .wy-menu-vertical a:active {
13 | background-color:#39B3C6;
14 | }
15 | /* Links */
16 | a {
17 | color: #39B3C6
18 | }
19 | /* Source spans */
20 | .rst-content .viewcode-link, .rst-content .viewcode-back{
21 | color: #39B3C6;
22 | }
23 | /* The literal code blocks */
24 | .rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
25 | color: #666;
26 | }
27 | .rst-content a code.literal {
28 | color: #39B3C6;
29 | }
30 | /* Sidebar scroll space for version switcher */
31 | .wy-side-scroll {
32 | padding-bottom: 1em;
33 | }
34 |
35 | /* override table no-wrap */
36 | .wy-table-responsive table td, .wy-table-responsive table th {
37 | white-space: normal;
38 | }
39 |
--------------------------------------------------------------------------------
/docs/_templates/versions.html:
--------------------------------------------------------------------------------
1 |
2 | {%- if current_version %}
3 |
4 |
5 | Versions
6 | v: {{ current_version.name | replace('adapters', 'v') }}
7 |
8 |
9 |
10 | {%- if versions.tags %}
11 |
12 | - Tags
13 | {%- for item in versions.tags %}
14 | {%- if current_version.name == 'main' -%}
15 | {%- set item_url = item.url | replace('../', './', 1) -%}
16 | {%- else -%}
17 | {%- set item_url = item.url -%}
18 | {%- endif -%}
19 | - {{ item.name | replace('adapters', 'v') }}
20 | {%- endfor %}
21 |
22 | {%- endif %}
23 | {%- if versions.branches %}
24 |
25 | - Branches
26 | {%- for item in versions.branches %}
27 | - {{ item.name }}
28 | {%- endfor %}
29 |
30 | {%- endif %}
31 |
32 |
33 | {%- endif %}
--------------------------------------------------------------------------------
/docs/classes/adapter_layer.rst:
--------------------------------------------------------------------------------
1 | Adapter Implementation
2 | =======================
3 |
4 | The following classes define the common interfaces for all adapter methods.
5 | They further hold logic shared by all adapter implementations.
6 | All newly added adapter methods should inherit from either one of these classes.
7 |
8 | .. autoclass:: adapters.AdapterLayerBase
9 | :members:
10 |
11 | .. autoclass:: adapters.ComposableAdapterLayerBase
12 | :members:
13 |
--------------------------------------------------------------------------------
/docs/classes/adapter_model_interface.rst:
--------------------------------------------------------------------------------
1 | Adapter Model Interface
2 | =======================
3 |
4 | .. autoclass:: adapters.AdapterModelInterface
5 | :members:
6 |
7 | .. autoclass:: adapters.AdapterMethod
8 | :members:
9 |
--------------------------------------------------------------------------------
/docs/classes/adapter_training.rst:
--------------------------------------------------------------------------------
1 | Adapter Training
2 | ====================
3 |
4 | Classes and methods related to training adapters.
5 |
6 | .. automodule:: adapters.training
7 | :members:
8 |
9 | .. automodule:: adapters.trainer
10 | :members:
11 |
--------------------------------------------------------------------------------
/docs/classes/adapter_utils.rst:
--------------------------------------------------------------------------------
1 | Adapter Utilities
2 | ====================
3 |
4 | A collection of utility methods mainly related to searching and loading adapter modules from
5 | Adapter-Hub.
6 |
7 | .. automodule:: adapters.utils
8 | :members:
9 |
--------------------------------------------------------------------------------
/docs/classes/model_adapters_config.rst:
--------------------------------------------------------------------------------
1 | Model Adapters Config
2 | =======================
3 |
4 | This class manages the setup and configuration of adapter modules in a pre-trained model.
5 |
6 | .. autoclass:: adapters.ModelAdaptersConfig
7 | :members:
8 |
--------------------------------------------------------------------------------
/docs/classes/model_mixins.rst:
--------------------------------------------------------------------------------
1 | Model Mixins
2 | =======================
3 |
4 | These classes provide the basis of adapter module integration into model classes such as adapter saving and loading.
5 | Depending on the model, one of these mixins should be implemented by every adapter-supporting model class.
6 |
7 | InvertibleAdaptersMixin
8 | ----------------------------------
9 |
10 | .. autoclass:: adapters.InvertibleAdaptersMixin
11 | :members:
12 |
13 |
14 | EmbeddingAdaptersMixin
15 | ----------------------------------
16 |
17 | .. autoclass:: adapters.EmbeddingAdaptersMixin
18 | :members:
19 |
20 |
21 | ModelAdaptersMixin
22 | ------------------
23 |
24 | .. autoclass:: adapters.ModelAdaptersMixin
25 | :members:
26 |
27 | ModelWithHeadsAdaptersMixin
28 | ----------------------------------
29 |
30 | .. autoclass:: adapters.ModelWithHeadsAdaptersMixin
31 | :members:
32 |
33 | ModelWithFlexibleHeadsAdaptersMixin
34 | ---------------------------------------
35 |
36 | .. autoclass:: adapters.ModelWithFlexibleHeadsAdaptersMixin
37 | :members:
38 |
39 | PushAdapterToHubMixin
40 | ----------------------
41 |
42 | .. autoclass:: adapters.hub_mixin.PushAdapterToHubMixin
43 | :members:
44 |
--------------------------------------------------------------------------------
/docs/classes/models/albert.rst:
--------------------------------------------------------------------------------
1 | ALBERT
2 | ======
3 |
4 | .. note::
5 | Adapter implementation notes for ALBERT:
6 | - As layers are shared between groups, adapters added to a layer are also shared between groups. Therefore, changing the adapter configuration for a layer affects the behavior of all groups that use this layer.
7 | - As usual, the ``leave_out`` parameter can be used to specify the layers in which adapters should be added. The layer IDs are counted by putting all layers of the groups into a sequence depending on the group number and their position in the group. I.e., for a ALBERT model with `inner_group_num=2` the first layer of the first group has ID 0, the second layer of the first group has ID 1, the first layer of the second group has ID 2, etc.
8 |
9 |
10 | The ALBERT model was proposed in `ALBERT: A Lite BERT for Self-supervised Learning of Language Representations `__
11 | by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
12 | It presents two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT:
13 |
14 | - Splitting the embedding matrix into two smaller matrices.
15 | - Using repeating layers split among groups.
16 |
17 | AlbertAdapterModel
18 | ~~~~~~~~~~~~~~~~~~~~
19 |
20 | .. autoclass:: adapters.AlbertAdapterModel
21 | :members:
22 | :inherited-members: AlbertPreTrainedModel
23 |
--------------------------------------------------------------------------------
/docs/classes/models/auto.rst:
--------------------------------------------------------------------------------
1 | Auto Classes
2 | ============
3 |
4 | Similar to the ``AutoModel`` classes built-in into HuggingFace Transformers, adapters provides an ``AutoAdapterModel`` class.
5 | As with other auto classes, the correct adapter model class is automatically instantiated based on the pre-trained model passed to the ``from_pretrained()`` method.
6 |
7 | .. note::
8 | If the model loaded with the ``from_pretrained(...)`` function has a head, this head gets loaded as well. However, this only works for non-sharded models. If you want to load a sharded model with a head, you first need to load the model and then the head separately.
9 |
10 | AutoAdapterModel
11 | ~~~~~~~~~~~~~~~~~~~~
12 |
13 | .. autoclass:: adapters.AutoAdapterModel
14 | :members:
15 |
--------------------------------------------------------------------------------
/docs/classes/models/bart.rst:
--------------------------------------------------------------------------------
1 | BART
2 | =====
3 |
4 | The Bart model was proposed in `BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation,
5 | Translation, and Comprehension `__ by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan
6 | Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer on 29 Oct, 2019.
7 |
8 | According to the abstract,
9 |
10 | - Bart uses a standard seq2seq/machine translation architecture with a bidirectional encoder (like BERT) and a
11 | left-to-right decoder (like GPT).
12 | - The pretraining task involves randomly shuffling the order of the original sentences and a novel in-filling scheme,
13 | where spans of text are replaced with a single mask token.
14 | - BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It
15 | matches the performance of RoBERTa with comparable training resources on GLUE and SQuAD, achieves new
16 | state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains
17 | of up to 6 ROUGE.
18 |
19 |
20 | BartAdapterModel
21 | ~~~~~~~~~~~~~~~~~~~~
22 |
23 | .. autoclass:: adapters.BartAdapterModel
24 | :members:
25 | :inherited-members: BartPreTrainedModel
26 |
--------------------------------------------------------------------------------
/docs/classes/models/beit.rst:
--------------------------------------------------------------------------------
1 | BEiT
2 | ======
3 |
4 | The Bidirectional Encoder representation from Image Transformers (BEiT) model was proposed in `BERT Pre-Training of Image
5 | Transformers `__ by Hangbo Bao, Li Dong, Songhao Piao, Furu Wei.
6 |
7 |
8 | The abstract from the paper is the following:
9 |
10 | *We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation
11 | from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image
12 | modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image
13 | patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into
14 | visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training
15 | objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we
16 | directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder.
17 | Experimental results on image classification and semantic segmentation show that our model achieves competitive results
18 | with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K,
19 | significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains
20 | 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).*
21 |
22 | BeitAdapterModel
23 | ~~~~~~~~~~~~~~~~~~~~
24 |
25 | .. autoclass:: adapters.BeitAdapterModel
26 | :members:
27 | :inherited-members: BeitPreTrainedModel
28 |
--------------------------------------------------------------------------------
/docs/classes/models/bert-generation.rst:
--------------------------------------------------------------------------------
1 | ..
2 | Copyright 2020 The HuggingFace Team. All rights reserved.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
5 | the License. You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
10 | an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
11 | specific language governing permissions and limitations under the License.
12 |
13 | BertGeneration
14 | -----------------------------------------------------------------------------------------------------------------------
15 |
16 | Overview
17 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 |
19 | The BertGeneration model is a BERT model that can be leveraged for sequence-to-sequence tasks using
20 | EncoderDecoderModel as proposed in `Leveraging Pre-trained Checkpoints for Sequence Generation
21 | Tasks `__ by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
22 |
23 | The abstract from the paper is the following:
24 |
25 | *Unsupervised pretraining of large neural models has recently revolutionized Natural Language Processing. By
26 | warm-starting from the publicly released checkpoints, NLP practitioners have pushed the state-of-the-art on multiple
27 | benchmarks while saving significant amounts of compute time. So far the focus has been mainly on the Natural Language
28 | Understanding tasks. In this paper, we demonstrate the efficacy of pre-trained checkpoints for Sequence Generation. We
29 | developed a Transformer-based sequence-to-sequence model that is compatible with publicly available pre-trained BERT,
30 | GPT-2 and RoBERTa checkpoints and conducted an extensive empirical study on the utility of initializing our model, both
31 | encoder and decoder, with these checkpoints. Our models result in new state-of-the-art results on Machine Translation,
32 | Text Summarization, Sentence Splitting, and Sentence Fusion.*
33 |
34 |
35 | BertGenerationAdapterModel
36 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
37 |
38 | .. autoclass:: adapters.BertGenerationAdapterModel
39 | :members:
40 | :inherited-members: BertGenerationPreTrainedModel
41 |
--------------------------------------------------------------------------------
/docs/classes/models/bert.rst:
--------------------------------------------------------------------------------
1 | BERT
2 | ======
3 |
4 | The BERT model was proposed in `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding `__
5 | by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It is a bidirectional transformer
6 | pre-trained using a combination of masked language modeling objective and next sentence prediction.
7 |
8 |
9 | BertAdapterModel
10 | ~~~~~~~~~~~~~~~~~~~~
11 |
12 | .. autoclass:: adapters.BertAdapterModel
13 | :members:
14 | :inherited-members: BertPreTrainedModel
15 |
--------------------------------------------------------------------------------
/docs/classes/models/distilbert.rst:
--------------------------------------------------------------------------------
1 | DistilBERT
2 | ===========
3 |
4 | The DistilBERT model was proposed in the blog post
5 | `Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT `__,
6 | and the paper `DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter `__.
7 | DistilBERT is a small, fast, cheap and light Transformer model trained by distilling Bert base. It has 40% less
8 | parameters than `bert-base-uncased`, runs 60% faster while preserving over 95% of Bert's performances as measured on
9 | the GLUE language understanding benchmark.
10 |
11 |
12 | DistilBertAdapterModel
13 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 |
15 | .. autoclass:: adapters.DistilBertAdapterModel
16 | :members:
17 | :inherited-members: DistilBertPreTrainedModel
18 |
--------------------------------------------------------------------------------
/docs/classes/models/electra.rst:
--------------------------------------------------------------------------------
1 | ELECTRA
2 | =======
3 |
4 | The ELECTRA model was proposed in the paper `ELECTRA: Pre-training Text Encoders as Discriminators Rather Than
5 | Generators `__. ELECTRA is a new pretraining approach which trains two
6 | transformer models: the generator and the discriminator. The generator's role is to replace tokens in a sequence, and
7 | is therefore trained as a masked language model. The discriminator, which is the model we're interested in, tries to
8 | identify which tokens were replaced by the generator in the sequence.
9 |
10 | The abstract from the paper is the following:
11 |
12 | *Masked language modeling (MLM) pretraining methods such as BERT corrupt the input by replacing some tokens with [MASK]
13 | and then train a model to reconstruct the original tokens. While they produce good results when transferred to
14 | downstream NLP tasks, they generally require large amounts of compute to be effective. As an alternative, we propose a
15 | more sample-efficient pretraining task called replaced token detection. Instead of masking the input, our approach
16 | corrupts it by replacing some tokens with plausible alternatives sampled from a small generator network. Then, instead
17 | of training a model that predicts the original identities of the corrupted tokens, we train a discriminative model that
18 | predicts whether each token in the corrupted input was replaced by a generator sample or not. Thorough experiments
19 | demonstrate this new pretraining task is more efficient than MLM because the task is defined over all input tokens
20 | rather than just the small subset that was masked out. As a result, the contextual representations learned by our
21 | approach substantially outperform the ones learned by BERT given the same model size, data, and compute. The gains are
22 | particularly strong for small models; for example, we train a model on one GPU for 4 days that outperforms GPT (trained
23 | using 30x more compute) on the GLUE natural language understanding benchmark. Our approach also works well at scale,
24 | where it performs comparably to RoBERTa and XLNet while using less than 1/4 of their compute and outperforms them when
25 | using the same amount of compute.*
26 |
27 | ElectraAdapterModel
28 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 |
30 | .. autoclass:: adapters.ElectraAdapterModel
31 | :members:
32 | :inherited-members: ElectraPreTrainedModel
33 |
--------------------------------------------------------------------------------
/docs/classes/models/gpt2.rst:
--------------------------------------------------------------------------------
1 | OpenAI GPT2
2 | -----------------------------------------------------------------------------------------------------------------------
3 |
4 | OpenAI GPT-2 model was proposed in `Language Models are Unsupervised Multitask Learners
5 | `_ by Alec
6 | Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever. It's a causal (unidirectional)
7 | transformer pretrained using language modeling on a very large corpus of ~40 GB of text data.
8 |
9 | The abstract from the paper is the following:
10 |
11 | *GPT-2 is a large transformer-based language model with 1.5 billion parameters, trained on a dataset[1] of 8 million
12 | web pages. GPT-2 is trained with a simple objective: predict the next word, given all of the previous words within some
13 | text. The diversity of the dataset causes this simple goal to contain naturally occurring demonstrations of many tasks
14 | across diverse domains. GPT-2 is a direct scale-up of GPT, with more than 10X the parameters and trained on more than
15 | 10X the amount of data.*
16 |
17 |
18 | GPT2AdapterModel
19 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 |
21 | .. autoclass:: adapters.GPT2AdapterModel
22 | :members:
23 | :inherited-members: GPT2PreTrainedModel
24 |
--------------------------------------------------------------------------------
/docs/classes/models/gptj.rst:
--------------------------------------------------------------------------------
1 | EleutherAI GPT-J-6B
2 | -----------------------------------------------------------------------------------------------------------------------
3 |
4 | EleutherAI GPT-J-6B is an open source, autoregressive language model created by a group of researchers called
5 | EleutherAI. It's one of the most advanced alternatives to OpenAI's GPT-3 and performs well on a wide array of
6 | natural language tasks such as chat, summarization, and question answering, to name a few.
7 |
8 | For a deeper dive, GPT-J is a transformer model trained using Ben Wang's Mesh Transformer JAX `Mesh Transformer JAX
9 | `_. "GPT" is short for
10 | generative pre-trained transformer, "J" distinguishes this model from other GPT models, and "6B" represents the 6
11 | billion trainable parameters.
12 |
13 | The model consists of 28 layers with a model dimension of 4096, and a feedforward dimension of 16384. The model
14 | dimension is split into 16 heads, each with a dimension of 256. Rotary Position Embedding (RoPE) is applied to
15 | 64 dimensions of each head. The model is trained with a tokenization vocabulary of 50257, using the same set of
16 | BPEs as GPT-2/GPT-3.
17 |
18 |
19 | GPTJAdapterModel
20 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21 |
22 | .. autoclass:: adapters.GPTJAdapterModel
23 | :members:
24 | :inherited-members: GPTJPreTrainedModel
25 |
--------------------------------------------------------------------------------
/docs/classes/models/llama.rst:
--------------------------------------------------------------------------------
1 | LLaMA
2 | -----------------------------------------------------------------------------------------------------------------------
3 |
4 | .. note::
5 | Loading a ``LlamaForQuestionAnswering`` via [`AutoAdapterModel`](adapters.AutoAdapterModel) or via [`LlamaAdapterModel`](adapters.LlamaAdapterModel) does not load the head, even if the model is not sharded. Please load the base model first and then subsequently the head.
6 | Note that for sharded models the head is never automatically loaded as described here: [Auto Classes](auto.rst)
7 |
8 |
9 | The LLaMA model was proposed in `LLaMA: Open and Efficient Foundation Language Models `__ by
10 | Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal,
11 | Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. It is a collection of foundation language
12 | models ranging from 7B to 65B parameters.
13 |
14 | The abstract from the paper is the following:
15 |
16 | *We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens,
17 | and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary
18 | and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models,
19 | Chinchilla-70B and PaLM-540B. We release all our models to the research community.*
20 |
21 | LlamaAdapterModel
22 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 |
24 | .. autoclass:: adapters.LlamaAdapterModel
25 | :members:
26 | :inherited-members: LlamaPreTrainedModel
27 |
--------------------------------------------------------------------------------
/docs/classes/models/mbart.rst:
--------------------------------------------------------------------------------
1 | MBart
2 | -----------------------------------------------------------------------------------------------------------------------
3 |
4 | The MBart model was presented in `Multilingual Denoising Pre-training for Neural Machine Translation
5 | `_ by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov Marjan
6 | Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
7 |
8 | According to the abstract, MBART is a sequence-to-sequence denoising auto-encoder pretrained on large-scale monolingual
9 | corpora in many languages using the BART objective. mBART is one of the first methods for pretraining a complete
10 | sequence-to-sequence model by denoising full texts in multiple languages, while previous approaches have focused only
11 | on the encoder, decoder, or reconstructing parts of the text.
12 |
13 |
14 | MBartAdapterModel
15 | ~~~~~~~~~~~~~~~~~~~~
16 |
17 | .. autoclass:: adapters.MBartAdapterModel
18 | :members:
19 | :inherited-members: MBartPreTrainedModel
20 |
--------------------------------------------------------------------------------
/docs/classes/models/mistral.rst:
--------------------------------------------------------------------------------
1 | Mistral
2 | -----------------------------------------------------------------------------------------------------------------------
3 |
4 | The Mistral model was proposed in `Mistral 7B `__ by
5 | Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas,
6 | Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux,
7 | Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.
8 | It is a foundation language model with 7.3B parameters.
9 |
10 | The abstract from the paper is the following:
11 |
12 | *We introduce Mistral 7B, a 7-billion-parameter language model engineered for
13 | superior performance and efficiency. Mistral 7B outperforms the best open 13B
14 | model (Llama 2) across all evaluated benchmarks, and the best released 34B
15 | model (Llama 1) in reasoning, mathematics, and code generation. Our model
16 | leverages grouped-query attention (GQA) for faster inference, coupled with sliding
17 | window attention (SWA) to effectively handle sequences of arbitrary length with a
18 | reduced inference cost. We also provide a model fine-tuned to follow instructions,
19 | Mistral 7B - Instruct, that surpasses Llama 2 13B - chat model both on human and
20 | automated benchmarks. Our models are released under the Apache 2.0 license.*
21 |
22 | Code: https://github.com/mistralai/mistral-src
23 | Webpage: https://mistral.ai/news/announcing-mistral-7b/
24 |
25 |
26 | MistralAdapterModel
27 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 |
29 | .. autoclass:: adapters.MistralAdapterModel
30 | :members:
31 | :inherited-members: MistralPreTrainedModel
32 |
--------------------------------------------------------------------------------
/docs/classes/models/mt5.rst:
--------------------------------------------------------------------------------
1 | MT5
2 | =====
3 |
4 | The mT5 model was presented in `mT5: A massively multilingual pre-trained text-to-text transformer
5 | `__ by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou,
6 | Aditya Siddhant, Aditya Barua, Colin Raffel.
7 |
8 | The abstract from the paper is the following,
9 |
10 |
11 | - The recent "Text-to-Text Transfer Transformer" (T5) leveraged a unified text-to-text format and scale to attain
12 | state-of-the-art results on a wide variety of English-language NLP tasks. In this paper, we introduce mT5, a
13 | multilingual variant of T5 that was pre-trained on a new Common Crawl-based dataset covering 101 languages. We detail
14 | the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual
15 | benchmarks. We also describe a simple technique to prevent "accidental translation" in the zero-shot setting, where a
16 | generative model chooses to (partially) translate its prediction into the wrong language. All of the code and model
17 | checkpoints used in this work are publicly available.
18 |
19 | MT5AdapterModel
20 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21 |
22 | .. autoclass:: adapters.MT5AdapterModel
23 | :members:
24 | :inherited-members: MT5PreTrainedModel
--------------------------------------------------------------------------------
/docs/classes/models/plbart.rst:
--------------------------------------------------------------------------------
1 | PLBART
2 | ======
3 |
4 | The PLBART model was proposed in [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
5 | This is a BART-like model which can be used to perform code-summarization, code-generation, and code-translation tasks. The pre-trained model `plbart-base` has been trained using multilingual denoising task
6 | on Java, Python and English.
7 |
8 | According to the abstract,
9 |
10 | - PLBART is a sequence-to-sequence model capable of performing a broad spectrum of program and language understanding and generation tasks
11 | - PLBART is pre-trained on an extensive collection of Java and Python functions and associated NL text via denoising autoencoding.
12 | - PLBART learns program syntax, style (e.g., identifier naming convention) and logical flow.
13 |
14 |
15 | PLBartAdapterModel
16 | ~~~~~~~~~~~~~~~~~~~~
17 |
18 | .. autoclass:: adapters.PLBartAdapterModel
19 | :members:
20 | :inherited-members: PLBartPretrainedModel
21 |
--------------------------------------------------------------------------------
/docs/classes/models/roberta.rst:
--------------------------------------------------------------------------------
1 | RoBERTa
2 | ========
3 |
4 | The RoBERTa model was proposed in `RoBERTa: A Robustly Optimized BERT Pretraining Approach `_
5 | by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
6 | Veselin Stoyanov. It is based on Google's BERT model released in 2018.
7 |
8 |
9 | RobertaAdapterModel
10 | ~~~~~~~~~~~~~~~~~~~~~~~~~
11 |
12 | .. autoclass:: adapters.RobertaAdapterModel
13 | :members:
14 | :inherited-members: RobertaPreTrainedModel
15 |
--------------------------------------------------------------------------------
/docs/classes/models/t5.rst:
--------------------------------------------------------------------------------
1 | T5
2 | =====
3 |
4 | The T5 model was presented in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
5 | `__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
6 | Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.
7 |
8 | The abstract from the paper is the following,
9 |
10 |
11 | - T5 is an encoder-decoder model pre-trained on a multi-task mixture of unsupervised and supervised tasks and for which
12 | each task is converted into a text-to-text format. T5 works well on a variety of tasks out-of-the-box by prepending a
13 | different prefix to the input corresponding to each task, e.g., for translation: *translate English to German: ...*,
14 | for summarization: *summarize: ...*.
15 |
16 | For more information about which prefix to use, it is easiest to look into Appendix D of the `paper
17 | `__.
18 |
19 |
20 | T5AdapterModel
21 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 |
23 | .. autoclass:: adapters.T5AdapterModel
24 | :members:
25 | :inherited-members: T5PreTrainedModel
26 |
--------------------------------------------------------------------------------
/docs/classes/models/vit.rst:
--------------------------------------------------------------------------------
1 | Vision Transformer (ViT)
2 | =========================
3 |
4 | The Vision Transformer (ViT) model was proposed in `An Image is Worth 16x16 Words: Transformers for Image Recognition
5 | at Scale `__ by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk
6 | Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob
7 | Uszkoreit, Neil Houlsby. It's the first paper that successfully trains a Transformer encoder on ImageNet, attaining
8 | very good results compared to familiar convolutional architectures.
9 |
10 |
11 | The abstract from the paper is the following:
12 |
13 | *While the Transformer architecture has become the de-facto standard for natural language processing tasks, its
14 | applications to computer vision remain limited. In vision, attention is either applied in conjunction with
15 | convolutional networks, or used to replace certain components of convolutional networks while keeping their overall
16 | structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to
17 | sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of
18 | data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.),
19 | Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring
20 | substantially fewer computational resources to train.*
21 |
22 | ViTAdapterModel
23 | ~~~~~~~~~~~~~~~~~~~~
24 |
25 | .. autoclass:: adapters.ViTAdapterModel
26 | :members:
27 | :inherited-members: ViTPreTrainedModel
28 |
--------------------------------------------------------------------------------
/docs/classes/models/whisper.rst:
--------------------------------------------------------------------------------
1 | Whisper
2 | -----------------------------------------------------------------------------------------------------------------------
3 |
4 | The Whisper model was presented in `Robust Speech Recognition via Large-Scale Weak Supervision
5 | `_ by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine
6 | McLeavey, Ilya Sutskever.
7 |
8 | Whisper is a state-of-the-art speech recognition model trained on 680,000 hours of multilingual and multitask data, presented by OpenAI.
9 |
10 | The abstract from the paper is the following:
11 |
12 | *We study the capabilities of speech processing systems trained simply to predict large amounts of
13 | transcripts of audio on the internet. When scaled to 680,000 hours of multilingual and multitask
14 | supervision, the resulting models generalize well to standard benchmarks and are often competitive
15 | with prior fully supervised results but in a zeroshot transfer setting without the need for any finetuning. When compared to humans, the models
16 | approach their accuracy and robustness. We are releasing models and inference code to serve as
17 | a foundation for further work on robust speech processing.*
18 |
19 |
20 | WhisperAdapterModel
21 | ~~~~~~~~~~~~~~~~~~~~
22 |
23 | .. autoclass:: adapters.WhisperAdapterModel
24 | :members:
25 | :inherited-members: WhisperPreTrainedModel
--------------------------------------------------------------------------------
/docs/classes/models/xlmroberta.rst:
--------------------------------------------------------------------------------
1 | XLM-RoBERTa
2 | ============
3 |
4 | The XLM-RoBERTa model was proposed in `Unsupervised Cross-lingual Representation Learning at Scale `__
5 | by Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán,
6 | Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on Facebook's RoBERTa model released in 2019.
7 | It is a large multi-lingual language model, trained on 2.5TB of filtered CommonCrawl data.
8 |
9 |
10 | XLMRobertaAdapterModel
11 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 |
13 | .. autoclass:: adapters.XLMRobertaAdapterModel
14 | :members:
15 |
--------------------------------------------------------------------------------
/docs/classes/models/xmod.rst:
--------------------------------------------------------------------------------
1 | X-MOD
2 | =====
3 |
4 | .. important::
5 | The X-MOD implementation integrated into Transformers already supports adapters.
6 | To make this implementation compatible with Adapters, a few changes were necessary:
7 |
8 | - Pre-trained X-MOD checkpoints require conversion before they can be used with Adapters. We provide pre-converted checkpoints for the following models:
9 | - ``facebook/xmod-base`` -> ``AdapterHub/xmod-base`` with languages adapters split into separate repos (e.g. ``AdapterHub/xmod-base-af_ZA``)
10 | - In Adapters, the X-MOD classes rely on the usual adapter methods instead of the custom methods introduced in Transformers, i.e.:
11 | - ``set_active_adapters()`` instead of ``set_default_language()``.
12 | - ``AdapterSetup`` context instead of ``lang_ids`` parameter.
13 |
14 | The abstract from the paper is the following:
15 |
16 | *Multilingual pre-trained models are known to suffer from the curse of multilinguality, which causes per-language performance to drop as they cover more languages. We address this issue by introducing language-specific modules, which allows us to grow the total capacity of the model, while keeping the total number of trainable parameters per language constant. In contrast with prior work that learns language-specific components post-hoc, we pre-train the modules of our Cross-lingual Modular (X-MOD) models from the start. Our experiments on natural language inference, named entity recognition and question answering show that our approach not only mitigates the negative interference between languages, but also enables positive transfer, resulting in improved monolingual and cross-lingual performance. Furthermore, our approach enables adding languages post-hoc with no measurable drop in performance, no longer limiting the model usage to the set of pre-trained languages.*
17 |
18 | XmodAdapterModel
19 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 |
21 | .. autoclass:: adapters.XmodAdapterModel
22 | :members:
23 | :inherited-members: XmodPreTrainedModel
24 |
--------------------------------------------------------------------------------
/docs/extending.md:
--------------------------------------------------------------------------------
1 | # Extending the Library
2 |
3 | ## Integrating new Transformer models
4 | Currently, not all model types included in Hugging Face's `transformers` support adapters yet.
5 | However, it is possible to add the existing adapter implementation to new models.
6 | For detailed instructions, see [Adding Adapters to a Model](https://docs.adapterhub.ml/contributing/adding_adapters_to_a_model.html).
7 |
8 | ## Loading custom module weights
9 |
10 | `adapters` provides support for saving and loading adapter and prediction head modules from the local file system or the Hub out of the box.
11 | However, countless additional module integrations into language models are thinkable.
12 | To provide a basis for such new custom model plugins, `adapters` integrates a basic mechanism to save and load custom weights.
13 |
14 | All adapter and head module weights are extracted, saved and loaded by implementations of the `WeightsLoader` class, the two preincluded being `AdapterLoader` and `PredictionHeadLoader`. To add basic saving and loading functionalities to your custom module weights, you can implement a new subclass of `WeightsLoader`. The two required abstract methods to be implemented are:
15 |
16 | - `filter_func(self, name: str) -> Callable[[str], bool]`: The callable returned by this method is used to extract the module weights to be saved or loaded based on their names.
17 |
18 | - `rename_func(self, old_name: str, new_name: str) -> Callable[[str], str]`: The callable returned by this method is used to optionally rename the module weights after loading.
19 |
20 | For more advanced functionalities, you may also want to override the `save()` and `load()` method.
21 |
22 | Using the custom loader class, weights can now be saved with:
23 | ```python
24 | loader = MyCustomWeightsLoader(model)
25 | loader.save("path/to/save/dir", "custom_weights_name")
26 | ```
27 |
28 | You can also upload these weights to the Hub and then load them from there together with an adapter:
29 | ```python
30 | model.load_adapter(
31 | "adapter_name",
32 | custom_weights_loaders=[MyCustomWeightsLoader]
33 | )
34 | ```
35 |
--------------------------------------------------------------------------------
/docs/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/favicon.png
--------------------------------------------------------------------------------
/docs/hub_contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing Adapters to the Hub
2 |
3 | ```{eval-rst}
4 | .. warning::
5 | The original approach of contributing adapters via the Hub repository is deprecated. Please upload all new adapters to HuggingFace's Model Hub as described in `Integration with Hugging Face's Model Hub `_.
6 | For the legacy documentation, refer to `here `_.
7 | ```
8 |
--------------------------------------------------------------------------------
/docs/img/Fusion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/Fusion.png
--------------------------------------------------------------------------------
/docs/img/adapter-bert.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/adapter-bert.png
--------------------------------------------------------------------------------
/docs/img/adapter_blocks_nesting.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/adapter_blocks_nesting.png
--------------------------------------------------------------------------------
/docs/img/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/architecture.png
--------------------------------------------------------------------------------
/docs/img/compacter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/compacter.png
--------------------------------------------------------------------------------
/docs/img/ia3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/ia3.png
--------------------------------------------------------------------------------
/docs/img/lora.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/lora.png
--------------------------------------------------------------------------------
/docs/img/parallel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/parallel.png
--------------------------------------------------------------------------------
/docs/img/prefix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/prefix.png
--------------------------------------------------------------------------------
/docs/img/splitting_adapters.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/splitting_adapters.png
--------------------------------------------------------------------------------
/docs/img/stacking_adapters.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/stacking_adapters.png
--------------------------------------------------------------------------------
/docs/img/unipelt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/img/unipelt.png
--------------------------------------------------------------------------------
/docs/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | The `adapters` package is designed as an add-on for Hugging Face's Transformers library.
4 | It currently supports Python 3.9+ and PyTorch 2.0+. You will have to [install PyTorch](https://pytorch.org/get-started/locally/) first.
5 |
6 | ```{eval-rst}
7 | .. important::
8 | Each ``adapters`` version is built for one specific version of Transformers.
9 | While using a different version of Transformers with an ``adapters`` might work, it is highly recommended to use the intended version.
10 | ``adapters`` will automatically install the correct Transformers version if not installed.
11 | ```
12 |
13 | ## Using pip
14 |
15 | ### From PyPI
16 |
17 | The simplest way of installation is by using pip to install the package from the Python Package Index:
18 |
19 | ```
20 | pip install adapters
21 | ```
22 |
23 | ### From GitHub
24 |
25 | You can also install the latest development version directly from our GitHub repository:
26 |
27 | ```
28 | pip install git+https://github.com/adapter-hub/adapters.git
29 | ```
30 |
31 | ## From repository
32 |
33 | Alternatively, you can clone the repository first and install the package from source.
34 | This allows you to run the included example scripts directly:
35 |
36 | ```
37 | git clone https://github.com/adapter-hub/adapters.git
38 | cd adapters
39 | pip install .
40 | ```
41 |
--------------------------------------------------------------------------------
/docs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/docs/logo.png
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/multi_task_methods.md:
--------------------------------------------------------------------------------
1 | # Multi Task Methods
2 |
3 | ## MTL-LoRA
4 |
5 | _Configuration class_: [`MTLLoRAConfig`](adapters.MTLLoRAConfig)
6 |
7 | "MTL-LoRA: Low-Rank Adaptation for Multi-Task Learning" ([Yang et al., 2024](https://arxiv.org/pdf/2410.09437)). MTL-LoRA enhances LoRA for multi-task learning (MTL) by improving task differentiation and knowledge sharing. It introduces a task-specific low-rank learnable matrix $\Lambda_t$ to better capture task-specific information and utilizes $n$ low-rank up-projection matrices for diverse information-sharing. A weighted averaging mechanism integrates these matrices, allowing adaptive knowledge transfer across tasks. Specifically, the MTL-LoRA output for task $t$ is formulated as:
8 |
9 | $$
10 | h_t = (W + \Delta W_t)x_t = Wx_t + \sum_{i=1}^n\frac{\text{exp}(w_t^i/\tau)B^i}{\sum_{j=1}^n\text{exp}(w_t^{j}/\tau)}\Lambda_t A x_t
11 | $$
12 |
13 | where $\tau$ controls the sharpness of weight distribution.
14 |
15 | `MTL-LoRA` is trainable with `MultiTask` composition and a datasets wich contains `task_ids` column (see. [`MultiTask` Composition](adapter_composition.md#multitask)).
16 |
17 |
18 | _Example_:
19 | ```python
20 | from adapters import MTLLoRAConfig
21 | import adapters.composition as ac
22 |
23 | config = MTLLoRAConfig(
24 | r=8,
25 | alpha=16,
26 | n_up_projection=3,
27 | )
28 |
29 | model.add_adapter("i", config)
30 | model.add_adapter("k", config)
31 | model.add_adapter("l", config)
32 |
33 | model.share_parameters(
34 | adapter_names=["i", "k", "l"],
35 | )
36 |
37 | model.active_adapters = ac.MultiTask("i", "k", "l")
38 | ```
39 |
--------------------------------------------------------------------------------
/docs/scripts/post_build.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import sys
4 |
5 |
6 | BUILD_DIR = sys.argv[1]
7 |
8 | for folder in os.listdir(BUILD_DIR):
9 | path = os.path.join(BUILD_DIR, folder)
10 | if folder == "main":
11 | file_names = os.listdir(path)
12 | for file_name in file_names:
13 | shutil.move(os.path.join(path, file_name), BUILD_DIR)
14 | os.rmdir(path)
15 | else:
16 | shutil.move(path, path.replace("adapters", "v"))
17 |
--------------------------------------------------------------------------------
/examples/pytorch/_tests_requirements.txt:
--------------------------------------------------------------------------------
1 | tensorboard
2 | scikit-learn
3 | seqeval
4 | psutil
5 | sacrebleu >= 1.4.12
6 | git+https://github.com/huggingface/accelerate@main#egg=accelerate
7 | rouge-score
8 | tensorflow_datasets
9 | matplotlib
10 | git-python==1.0.3
11 | faiss-cpu
12 | streamlit
13 | elasticsearch
14 | nltk
15 | pandas
16 | datasets >= 1.13.3
17 | fire
18 | pytest
19 | conllu
20 | sentencepiece != 0.1.92
21 | protobuf
22 | torchvision
23 | jiwer
24 | librosa
25 | evaluate >= 0.2.0
26 |
--------------------------------------------------------------------------------
/examples/pytorch/adapterdrop/drop_at_inference.py:
--------------------------------------------------------------------------------
1 | # TODO: Replace this with a proper colab notebook
2 | import torch
3 |
4 | import adapters
5 | from transformers import AutoModelForSequenceClassification, AutoTokenizer
6 |
7 |
8 | if __name__ == "__main__":
9 | """A temporary example to highlight changes implemented for AdapterDrop at inference"""
10 | model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
11 | # Convert the model into an adapter model
12 | adapters.init(model)
13 | model.load_adapter("sentiment/sst-2@ukp")
14 |
15 | tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
16 | tokens = tokenizer.tokenize("AdapterHub is awesome!")
17 | input_tensor = torch.tensor([tokenizer.convert_tokens_to_ids(tokens)])
18 |
19 | model.set_active_adapters("sst-2")
20 | outputs_nodrop = model(input_tensor)
21 |
22 | model.set_active_adapters("sst-2", skip_layers=[0, 1])
23 | outputs_adapterdrop = model(input_tensor)
24 |
25 | # different probs
26 | assert not torch.equal(outputs_nodrop[0], outputs_adapterdrop[0])
27 | # but they should still result in the same prediction
28 | assert torch.equal(torch.argmax(outputs_nodrop[0]), torch.argmax(outputs_adapterdrop[0]))
29 |
--------------------------------------------------------------------------------
/examples/pytorch/adapterfusion/README.md:
--------------------------------------------------------------------------------
1 | # AdapterFusion examples
2 |
3 | More information, including an example of how to run the script, can be found here: https://docs.adapterhub.ml/training.html#train-adapterfusion.
4 |
--------------------------------------------------------------------------------
/examples/pytorch/conftest.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 The HuggingFace Team. All rights reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | # tests directory-specific settings - this file is run automatically
16 | # by pytest before any tests are run
17 |
18 | import sys
19 | import warnings
20 | from os.path import abspath, dirname, join
21 |
22 |
23 | # allow having multiple repository checkouts and not needing to remember to rerun
24 | # 'pip install -e .[dev]' when switching between checkouts and running tests.
25 | git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src"))
26 | sys.path.insert(1, git_repo_path)
27 |
28 |
29 | # silence FutureWarning warnings in tests since often we can't act on them until
30 | # they become normal warnings - i.e. the tests still need to test the current functionality
31 | warnings.simplefilter(action="ignore", category=FutureWarning)
32 |
33 |
34 | def pytest_addoption(parser):
35 | from transformers.testing_utils import pytest_addoption_shared
36 |
37 | pytest_addoption_shared(parser)
38 |
39 |
40 | def pytest_terminal_summary(terminalreporter):
41 | from transformers.testing_utils import pytest_terminal_summary_main
42 |
43 | make_reports = terminalreporter.config.getoption("--make-reports")
44 | if make_reports:
45 | pytest_terminal_summary_main(terminalreporter, id=make_reports)
46 |
--------------------------------------------------------------------------------
/examples/pytorch/dependency-parsing/README.md:
--------------------------------------------------------------------------------
1 | # Dependency parsing on Universal Dependencies with Adapters
2 |
3 | These example scripts are based on the fine-tuning code from the repository of ["How Good is Your Tokenizer? On the Monolingual Performance of Multilingual Language Models"](https://github.com/Adapter-Hub/hgiyt).
4 | The scripts were upgraded to `adapters` v2.x and modified to use [flex heads](https://docs.adapterhub.ml/prediction_heads.html#models-with-flexible-heads) and HuggingFace Datasets.
5 |
6 | The used biaffine dependency parsing prediction head is described in ["Is Supervised Syntactic Parsing Beneficial for Language Understanding Tasks? An Empirical Investigation" (Glavaš & Vulić, 2021)](https://arxiv.org/pdf/2008.06788.pdf).
7 |
8 | A new prediction head can be added to BERT-based models via the `add_dependency_parsing_head()` methods, e.g.:
9 | ```python
10 | model = AutoAdapterModel.from_pretrained("bert-base-uncased")
11 | model.add_dependency_parsing_head(
12 | "dependency_parsing",
13 | num_labels=num_labels,
14 | id2label=label_map,
15 | )
16 | ```
17 |
18 | ## Training on Universal Dependencies
19 |
20 | Script: [`run_udp.py`](https://github.com/Adapter-Hub/adapters/blob/master/examples/dependency-parsing/run_udp.py).
21 |
22 | Fine-tuning on the treebanks of [Universal Dependencies](https://universaldependencies.org/).
23 | The datasets are loaded from [HuggingFace Datasets](https://huggingface.co/datasets/universal_dependencies) and which dataset to use can be specified via the `--task_name` option.
24 |
25 | Training an adapter on the English Web Treebank (`en_ewt`) could be done as follows:
26 |
27 | ```bash
28 | export TASK_NAME="en_ewt"
29 |
30 | python run_udp.py \
31 | --model_name_or_path bert-base-cased \
32 | --do_train \
33 | --do_eval \
34 | --do_predict \
35 | --task_name $TASK_NAME \
36 | --per_device_train_batch_size 12 \
37 | --learning_rate 5e-4 \
38 | --num_train_epochs 10 \
39 | --max_seq_length 256 \
40 | --output_dir experiments/$TASK_NAME \
41 | --overwrite_output_dir \
42 | --store_best_model \
43 | --evaluation_strategy epoch \
44 | --metric_score las \
45 | --train_adapter
46 | ```
47 |
48 | Fore more information, also visit the original code at https://github.com/Adapter-Hub/hgiyt/tree/master/finetuning.
49 |
--------------------------------------------------------------------------------
/examples/pytorch/dependency-parsing/requirements.txt:
--------------------------------------------------------------------------------
1 | datasets >= 1.8.0
2 | torch >= 1.3
3 | conllu
4 |
--------------------------------------------------------------------------------
/examples/pytorch/language-modeling/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate >= 0.12.0
2 | torch >= 1.3
3 | datasets >= 1.8.0
4 | sentencepiece != 0.1.92
5 | protobuf
6 | evaluate
7 | scikit-learn
8 |
--------------------------------------------------------------------------------
/examples/pytorch/multiple-choice/README.md:
--------------------------------------------------------------------------------
1 |
16 |
17 | # Multiple Choice with Adapters
18 |
19 | ## Fine-tuning on SWAG with the Trainer
20 |
21 | `run_swag` allows you to fine-tune any model from our [hub](https://huggingface.co/models) (as long as its architecture as a `ForMultipleChoice` version in the library) on the SWAG dataset or your own csv/jsonlines files as long as they are structured the same way. To make it works on another dataset, you will need to tweak the `preprocess_function` inside the script.
22 |
23 | ```bash
24 | python run_swag.py \
25 | --model_name_or_path roberta-base \
26 | --do_train \
27 | --do_eval \
28 | --learning_rate 5e-5 \
29 | --num_train_epochs 3 \
30 | --output_dir /tmp/swag_base \
31 | --per_gpu_eval_batch_size=16 \
32 | --per_device_train_batch_size=16 \
33 | --overwrite_output \
34 | --train_adapter \
35 | --adapter_config seq_bn \
36 | --overwrite_output_dir
37 | ```
38 |
39 | Training with the defined hyper-parameters yields the following results:
40 | ```
41 | ***** Eval results *****
42 | eval_acc = 0.8338998300509847
43 | eval_loss = 0.44457291918821606
44 | ```
45 |
46 | ## With Accelerate
47 |
48 | We have not adapted the `run_swag_no_trainer.py` script of Hugging Face Transformers to use Adapters. To avoid confusion we have not included the non-adapted version in the examples of Adapters.
49 |
--------------------------------------------------------------------------------
/examples/pytorch/multiple-choice/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate >= 0.12.0
2 | sentencepiece != 0.1.92
3 | protobuf
4 | torch >= 1.3
5 | evaluate
6 |
--------------------------------------------------------------------------------
/examples/pytorch/question-answering/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate >= 0.12.0
2 | datasets >= 1.8.0
3 | torch >= 1.3.0
4 | evaluate
--------------------------------------------------------------------------------
/examples/pytorch/summarization/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate >= 0.12.0
2 | datasets >= 1.8.0
3 | sentencepiece != 0.1.92
4 | protobuf
5 | rouge-score
6 | nltk
7 | py7zr
8 | torch >= 1.3
9 | evaluate
10 |
--------------------------------------------------------------------------------
/examples/pytorch/text-classification/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate >= 0.12.0
2 | datasets >= 1.8.0
3 | sentencepiece != 0.1.92
4 | scipy
5 | scikit-learn
6 | protobuf
7 | torch >= 1.3
8 | evaluate
--------------------------------------------------------------------------------
/examples/pytorch/text-generation/README.md:
--------------------------------------------------------------------------------
1 |
16 |
17 | ## Language generation with Adapters
18 | > **Note:** We have not adapted the following scripts of Hugging Face Transformers:
19 | > - `run_generation_contrastive_search.py`
20 | >
21 | > To avoid confusion we have not included these non-adapted versions in the examples of Adapters.
22 |
23 | Based on the script [`run_generation.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-generation/run_generation.py).
24 |
25 | Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL, XLNet, CTRL.
26 | A similar script is used for our official demo [Write With Transfomer](https://transformer.huggingface.co), where you
27 | can try out the different models available in the library.
28 |
29 | Example usage:
30 |
31 | ```bash
32 | python run_generation.py \
33 | --model_type=gpt2 \
34 | --model_name_or_path=gpt2
35 | ```
36 |
37 | This can also be done by using a trained adapter. With the `--adapter_path` argument you can specify an adapter to load
38 | for language generation.
39 |
40 | Example with Adapters:
41 | ```bash
42 | python run_generation.py \
43 | --model_type=gpt2 \
44 | --model_name_or_path=gpt2
45 | --load_adapter=./tmp/poem
46 | ```
--------------------------------------------------------------------------------
/examples/pytorch/text-generation/requirements.txt:
--------------------------------------------------------------------------------
1 | sentencepiece != 0.1.92
2 | protobuf
3 | torch >= 1.3
4 |
--------------------------------------------------------------------------------
/examples/pytorch/token-classification/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate >= 0.12.0
2 | seqeval
3 | datasets >= 1.8.0
4 | torch >= 1.3
5 | evaluate
--------------------------------------------------------------------------------
/examples/pytorch/token-classification/run.sh:
--------------------------------------------------------------------------------
1 | # Copyright 2020 The HuggingFace Team. All rights reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | python3 run_ner.py \
16 | --model_name_or_path bert-base-uncased \
17 | --dataset_name conll2003 \
18 | --output_dir /tmp/test-ner \
19 | --do_train \
20 | --do_eval \
21 | --overwrite_output_dir \
22 | --train_adapter \
23 | --adapter_config seq_bn
24 |
--------------------------------------------------------------------------------
/examples/pytorch/translation/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate >= 0.12.0
2 | datasets >= 1.8.0
3 | sentencepiece != 0.1.92
4 | protobuf
5 | sacrebleu >= 1.4.12
6 | py7zr
7 | torch >= 1.3
8 | evaluate
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 119
3 | target-version = ['py38', 'py39', 'py310']
4 | [tool.pytest.ini_options]
5 | markers = [
6 | "core: marks tests as core adapter test",
7 | "composition: marks tests as composition adapter test",
8 | "heads: marks tests as heads adapter test",
9 | "embeddings: marks tests as embeddings adapter test",
10 | "class_conversion: marks tests as class conversion adapter test",
11 | "prefix_tuning: marks tests as prefix tuning adapter test",
12 | "prompt_tuning: marks tests as prompt tuning adapter test",
13 | "reft: marks tests as reft adapter test",
14 | "unipelt: marks tests as unipelt adapter test",
15 | "compacter: marks tests as compacter adapter test",
16 | "bottleneck: marks tests as bottleneck adapter test",
17 | "ia3: marks tests as ia3 adapter test",
18 | "lora: marks tests as lora adapter test",
19 | "flash_attn_test: marks tests related to flash attention (deselect with '-m \"not flash_attn_test\"')",
20 | "bitsandbytes: select (or deselect with `not`) bitsandbytes integration tests",
21 | "generate: marks tests that use the GenerationTesterMixin"
22 | ]
23 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [isort]
2 | default_section = FIRSTPARTY
3 | ensure_newline_before_comments = True
4 | force_grid_wrap = 0
5 | include_trailing_comma = True
6 | known_first_party = transformers
7 | known_third_party =
8 | absl
9 | conllu
10 | datasets
11 | elasticsearch
12 | fairseq
13 | faiss-cpu
14 | fastprogress
15 | fire
16 | fugashi
17 | git
18 | h5py
19 | matplotlib
20 | nltk
21 | numpy
22 | packaging
23 | pandas
24 | PIL
25 | psutil
26 | pytest
27 | pytorch_lightning
28 | rouge_score
29 | sacrebleu
30 | seqeval
31 | sklearn
32 | streamlit
33 | tensorboardX
34 | tensorflow
35 | tensorflow_datasets
36 | timeout_decorator
37 | torch
38 | torchaudio
39 | torchtext
40 | torchvision
41 | torch_xla
42 | tqdm
43 |
44 | line_length = 119
45 | lines_after_imports = 2
46 | multi_line_output = 3
47 | use_parentheses = True
48 |
49 | [flake8]
50 | ignore = E203, E501, E731, E741, W503, W605
51 | max-line-length = 119
52 |
53 | [tool:pytest]
54 | doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
--------------------------------------------------------------------------------
/src/adapters/configuration/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | from .adapter_config import *
3 | from .adapter_fusion_config import *
4 | from .model_adapters_config import ModelAdaptersConfig, build_full_config
5 |
--------------------------------------------------------------------------------
/src/adapters/heads/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | from .base import *
3 | from .dependency_parsing import *
4 | from .language_modeling import BertStyleMaskedLMHead, CausalLMHead, Seq2SeqLMHead
5 | from .model_mixin import ModelWithFlexibleHeadsAdaptersMixin
6 |
--------------------------------------------------------------------------------
/src/adapters/methods/__init__.py:
--------------------------------------------------------------------------------
1 | from .bottleneck import init_bottleneck
2 | from .invertible import init_invertible_adapters
3 | from .lora import init_lora
4 | from .prompt_tuning import init_prompt_tuning
5 | from .reft import init_reft
6 |
7 |
8 | METHOD_INIT_MAPPING = {
9 | "bottleneck": init_bottleneck,
10 | "lora": init_lora,
11 | "prompt_tuning": init_prompt_tuning,
12 | "reft": init_reft,
13 | "invertible": init_invertible_adapters,
14 | }
15 |
--------------------------------------------------------------------------------
/src/adapters/methods/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023-present the HuggingFace Inc. team.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from typing import Optional
16 |
17 | import torch
18 |
19 |
20 | def fix_seed(seed: Optional[int] = None):
21 | """
22 | Helper function to fix the torch seed on cpu and gpu for initializing adapters with the same weights.
23 | Is only executed if the config provides a respective seed.
24 | """
25 | if seed:
26 | torch.manual_seed(seed)
27 | if torch.cuda.is_available():
28 | torch.cuda.manual_seed_all(seed)
29 |
30 |
31 | # Copied from https://github.com/huggingface/peft/blob/main/src/peft/utils/integrations.py.
32 | def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None):
33 | """
34 | Helper function to dequantize 4bit or 8bit bnb weights.
35 |
36 | If the weight is not a bnb quantized weight, it will be returned as is.
37 | """
38 | if not isinstance(weight, torch.nn.Parameter):
39 | raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead")
40 |
41 | cls_name = weight.__class__.__name__
42 | if cls_name not in ("Params4bit", "Int8Params"):
43 | return weight
44 |
45 | import bitsandbytes as bnb
46 |
47 | if cls_name == "Params4bit":
48 | return bnb.functional.dequantize_4bit(weight.data, weight.quant_state)
49 |
50 | if state.SCB is None:
51 | state.SCB = weight.SCB
52 |
53 | im = torch.eye(weight.data.shape[-1]).contiguous().half().to(weight.device)
54 | im, imt, SCim, SCimt, coo_tensorim = bnb.functional.double_quant(im)
55 | im, Sim = bnb.functional.transform(im, "col32")
56 | if state.CxB is None:
57 | state.CxB, state.SB = bnb.functional.transform(weight.data, to_order=state.formatB)
58 | out32, Sout32 = bnb.functional.igemmlt(im, state.CxB, Sim, state.SB)
59 | return bnb.functional.mm_dequant(out32, Sout32, SCim, state.SCB, bias=None).t()
60 |
--------------------------------------------------------------------------------
/src/adapters/models/albert/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["AlbertAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import AlbertAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/auto/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": [
26 | "ADAPTER_MODEL_MAPPING",
27 | "AutoAdapterModel",
28 | ],
29 | }
30 |
31 |
32 | if TYPE_CHECKING:
33 | from .adapter_model import ADAPTER_MODEL_MAPPING, AutoAdapterModel
34 |
35 | else:
36 | import sys
37 |
38 | sys.modules[__name__] = _LazyModule(
39 | __name__,
40 | globals()["__file__"],
41 | _import_structure,
42 | )
43 |
--------------------------------------------------------------------------------
/src/adapters/models/auto/adapter_model.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 |
3 | from transformers.models.auto.auto_factory import _BaseAutoModelClass, auto_class_update
4 | from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES
5 |
6 | from .auto_factory import _LazyAdapterModelAutoMapping
7 |
8 |
9 | # Make sure that children are placed before parents!
10 | ADAPTER_MODEL_MAPPING_NAMES = OrderedDict(
11 | [
12 | ("albert", "AlbertAdapterModel"),
13 | ("bart", "BartAdapterModel"),
14 | ("beit", "BeitAdapterModel"),
15 | ("bert", "BertAdapterModel"),
16 | ("bert-generation", "BertGenerationAdapterModel"),
17 | ("clip", "CLIPAdapterModel"),
18 | ("deberta", "DebertaAdapterModel"),
19 | ("deberta-v2", "DebertaV2AdapterModel"),
20 | ("distilbert", "DistilBertAdapterModel"),
21 | ("electra", "ElectraAdapterModel"),
22 | ("gpt2", "GPT2AdapterModel"),
23 | ("gptj", "GPTJAdapterModel"),
24 | ("llama", "LlamaAdapterModel"),
25 | ("mbart", "MBartAdapterModel"),
26 | ("mistral", "MistralAdapterModel"),
27 | ("mt5", "MT5AdapterModel"),
28 | ("plbart", "PLBartAdapterModel"),
29 | ("roberta", "RobertaAdapterModel"),
30 | ("t5", "T5AdapterModel"),
31 | ("vit", "ViTAdapterModel"),
32 | ("whisper", "WhisperAdapterModel"),
33 | ("xlm-roberta", "XLMRobertaAdapterModel"),
34 | ("xmod", "XmodAdapterModel"),
35 | ]
36 | )
37 |
38 |
39 | ADAPTER_MODEL_MAPPING = _LazyAdapterModelAutoMapping(CONFIG_MAPPING_NAMES, ADAPTER_MODEL_MAPPING_NAMES)
40 |
41 |
42 | class AutoAdapterModel(_BaseAutoModelClass):
43 | _model_mapping = ADAPTER_MODEL_MAPPING
44 |
45 |
46 | AutoAdapterModel = auto_class_update(AutoAdapterModel, head_doc="adapters and flexible heads")
47 |
--------------------------------------------------------------------------------
/src/adapters/models/auto/auto_factory.py:
--------------------------------------------------------------------------------
1 | import importlib
2 |
3 | from transformers.models.auto.auto_factory import _LazyAutoMapping, getattribute_from_module, model_type_to_module_name
4 |
5 |
6 | class _LazyAdapterModelAutoMapping(_LazyAutoMapping):
7 | def _load_attr_from_module(self, model_type, attr):
8 | module_name = model_type_to_module_name(model_type)
9 | if module_name not in self._modules:
10 | self._modules[module_name] = importlib.import_module(f".{module_name}", "adapters.models")
11 | return getattribute_from_module(self._modules[module_name], attr)
12 |
--------------------------------------------------------------------------------
/src/adapters/models/bart/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["BartAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import BartAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/beit/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["BeitAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import BeitAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/bert/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": [
26 | "BertAdapterModel",
27 | "BertForSequenceClassificationAdapterModel",
28 | ],
29 | }
30 |
31 |
32 | if TYPE_CHECKING:
33 | from .adapter_model import BertAdapterModel, BertForSequenceClassificationAdapterModel
34 |
35 | else:
36 | import sys
37 |
38 | sys.modules[__name__] = _LazyModule(
39 | __name__,
40 | globals()["__file__"],
41 | _import_structure,
42 | )
43 |
--------------------------------------------------------------------------------
/src/adapters/models/bert_generation/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["BertGenerationAdapterModel"],
26 | "modeling_bert_generation": [
27 | "BertGenerationDecoder",
28 | "BertGenerationEncoder",
29 | "BertGenerationPreTrainedModel",
30 | "load_tf_weights_in_bert_generation",
31 | ],
32 | }
33 |
34 |
35 | if TYPE_CHECKING:
36 | from .adapter_model import BertGenerationAdapterModel
37 | from .modeling_bert_generation import (
38 | BertGenerationDecoder,
39 | BertGenerationEncoder,
40 | BertGenerationPreTrainedModel,
41 | load_tf_weights_in_bert_generation,
42 | )
43 |
44 | else:
45 | import sys
46 |
47 | sys.modules[__name__] = _LazyModule(
48 | __name__,
49 | globals()["__file__"],
50 | _import_structure,
51 | )
52 |
--------------------------------------------------------------------------------
/src/adapters/models/clip/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["CLIPAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import CLIPAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/clip/adapter_model.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import torch
4 |
5 | from transformers.models.clip.modeling_clip import (
6 | CLIP_INPUTS_DOCSTRING,
7 | CLIP_START_DOCSTRING,
8 | CLIPModel,
9 | CLIPPreTrainedModel,
10 | )
11 | from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward
12 |
13 | from ...context import AdapterSetup, ForwardContext
14 | from ...heads import ModelWithFlexibleHeadsAdaptersMixin
15 | from ...model_mixin import EmbeddingAdaptersWrapperMixin
16 | from ...wrappers import init
17 |
18 |
19 | @add_start_docstrings(CLIP_START_DOCSTRING)
20 | class CLIPAdapterModel(EmbeddingAdaptersWrapperMixin, ModelWithFlexibleHeadsAdaptersMixin, CLIPPreTrainedModel):
21 | def __init__(self, config):
22 | super().__init__(config)
23 |
24 | self.clip = CLIPModel(config)
25 | init(self.clip)
26 |
27 | self._init_head_modules()
28 |
29 | self.post_init()
30 |
31 | @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
32 | @ForwardContext.wrap
33 | def forward(
34 | self,
35 | input_ids: Optional[torch.LongTensor] = None,
36 | pixel_values: Optional[torch.FloatTensor] = None,
37 | attention_mask: Optional[torch.Tensor] = None,
38 | position_ids: Optional[torch.LongTensor] = None,
39 | return_loss: Optional[bool] = None,
40 | output_attentions: Optional[bool] = None,
41 | output_hidden_states: Optional[bool] = None,
42 | return_dict: Optional[bool] = None,
43 | head=None,
44 | **kwargs,
45 | ):
46 | outputs = self.clip(
47 | input_ids=input_ids,
48 | pixel_values=pixel_values,
49 | attention_mask=attention_mask,
50 | position_ids=position_ids,
51 | return_loss=return_loss,
52 | output_attentions=output_attentions,
53 | output_hidden_states=output_hidden_states,
54 | return_dict=return_dict,
55 | )
56 | if head or AdapterSetup.get_context_head_setup() or self.active_head:
57 | head_outputs = self.forward_head(
58 | outputs,
59 | head_name=head,
60 | attention_mask=attention_mask,
61 | return_dict=return_dict,
62 | **kwargs,
63 | )
64 | return head_outputs
65 | else:
66 | # in case no head is used just return the output of the base model (including pooler output)
67 | return outputs
68 |
--------------------------------------------------------------------------------
/src/adapters/models/deberta/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["DebertaAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import DebertaAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/deberta/mixin_deberta.py:
--------------------------------------------------------------------------------
1 | from ...methods.lora import LoRAMergedLinear
2 | from ...methods.prefix_tuning import PrefixTuningLayer
3 | from ...utils import patch_forward
4 | from ..bert.mixin_bert import BertModelAdaptersMixin
5 |
6 |
7 | class DebertaSelfAttentionAdaptersMixin:
8 | """Adds adapters to the BertSelfAttention module."""
9 |
10 | def init_adapters(self, model_config, adapters_config):
11 | # Wrap layers for LoRA
12 | self.in_proj = LoRAMergedLinear.wrap(self.in_proj, "selfattn", model_config, adapters_config)
13 |
14 | self.prefix_tuning = PrefixTuningLayer(
15 | self.location_key + "_prefix" if self.location_key else None, model_config, adapters_config
16 | )
17 | patch_forward(self)
18 |
19 |
20 | class DebertaModelAdaptersMixin(BertModelAdaptersMixin):
21 | # Same as BERT, except that Deberta does not support the "lora_delta_w_svd" combine_strategy
22 | support_lora_delta_w_svd = False
23 |
--------------------------------------------------------------------------------
/src/adapters/models/deberta_v2/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["DebertaV2AdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import DebertaV2AdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/deberta_v2/mixin_deberta_v2.py:
--------------------------------------------------------------------------------
1 | from ...methods.lora import LoRALinear
2 | from ...methods.prefix_tuning import PrefixTuningLayer
3 | from ...utils import patch_forward
4 |
5 |
6 | class DebertaV2SelfAttentionAdaptersMixin:
7 | """Adds adapters to the BertSelfAttention module."""
8 |
9 | def init_adapters(self, model_config, adapters_config):
10 | # Wrap layers for LoRA
11 | self.query_proj = LoRALinear.wrap(self.query_proj, "selfattn", model_config, adapters_config, attn_key="q")
12 | self.key_proj = LoRALinear.wrap(self.key_proj, "selfattn", model_config, adapters_config, attn_key="k")
13 | self.value_proj = LoRALinear.wrap(self.value_proj, "selfattn", model_config, adapters_config, attn_key="v")
14 |
15 | self.prefix_tuning = PrefixTuningLayer(
16 | self.location_key + "_prefix" if self.location_key else None, model_config, adapters_config
17 | )
18 | patch_forward(self)
19 |
--------------------------------------------------------------------------------
/src/adapters/models/distilbert/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["DistilBertAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import DistilBertAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/electra/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["ElectraAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import ElectraAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/encoder_decoder/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/src/adapters/models/encoder_decoder/__init__.py
--------------------------------------------------------------------------------
/src/adapters/models/encoder_decoder/modeling_encoder_decoder.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The HuggingFace Inc. team.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Classes to support Encoder-Decoder architectures"""
16 |
17 | from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel
18 |
19 | from .mixin_encoder_decoder import EncoderDecoderModelAdaptersMixin
20 |
21 |
22 | # Although this class is empty, we cannot add the mixin via the MODEL_MIXIN_MAPPING, as this would result in a circular import.
23 | class EncoderDecoderModelWithAdapters(EncoderDecoderModelAdaptersMixin, EncoderDecoderModel):
24 | pass
25 |
--------------------------------------------------------------------------------
/src/adapters/models/gpt2/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["GPT2AdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import GPT2AdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/gptj/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["GPTJAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import GPTJAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/gptj/mixin_gptj.py:
--------------------------------------------------------------------------------
1 | from typing import Iterable, Tuple
2 |
3 | import torch.nn as nn
4 |
5 | from ...methods.bottleneck import BottleneckLayer
6 | from ...methods.lora import LoRALinear
7 | from ...methods.prefix_tuning import PrefixTuningLayer
8 | from ...model_mixin import EmbeddingAdaptersMixin, InvertibleAdaptersMixin, ModelBaseAdaptersMixin
9 | from ...utils import patch_forward
10 |
11 |
12 | class GPTJAttentionAdaptersMixin:
13 | def init_adapters(self, model_config, adapters_config):
14 | self.location_key = "self"
15 |
16 | # Wrap layers for LoRA
17 | self.q_proj = LoRALinear.wrap(self.q_proj, "selfattn", model_config, adapters_config, attn_key="q")
18 | self.k_proj = LoRALinear.wrap(self.k_proj, "selfattn", model_config, adapters_config, attn_key="k")
19 | self.v_proj = LoRALinear.wrap(self.v_proj, "selfattn", model_config, adapters_config, attn_key="v")
20 |
21 | self.prefix_tuning = PrefixTuningLayer(
22 | self.location_key + "_prefix" if self.location_key else None, model_config, adapters_config
23 | )
24 | patch_forward(self)
25 |
26 |
27 | class GPTJMLPAdaptersMixin:
28 | def init_adapters(self, model_config, adapters_config):
29 | # Wrap layers for LoRA
30 | self.fc_in = LoRALinear.wrap(self.fc_in, "intermediate", model_config, adapters_config)
31 | self.fc_out = LoRALinear.wrap(self.fc_out, "output", model_config, adapters_config)
32 |
33 |
34 | class GPTJDecoderBlockAdaptersMixin:
35 | """Adds adapters to the TransformerBlock module of GPTJ."""
36 |
37 | def init_adapters(self, model_config, adapters_config):
38 | self.attention_adapters = BottleneckLayer("mh_adapter")
39 | self.output_adapters = BottleneckLayer("output_adapter")
40 |
41 | patch_forward(self)
42 |
43 |
44 | class GPTJModelAdapterMixin(EmbeddingAdaptersMixin, InvertibleAdaptersMixin, ModelBaseAdaptersMixin):
45 | support_prompt_tuning = False
46 |
47 | def init_adapters(self, model_config, adapters_config):
48 | super().init_adapters(model_config, adapters_config)
49 |
50 | # Register hook for post embedding forward
51 | self.drop.register_forward_hook(self.post_embedding_forward)
52 |
53 | def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
54 | for i, layer in enumerate(self.base_model.h):
55 | yield i, layer
56 |
57 | def post_embedding_forward(self, module, args, embedding_output):
58 | embedding_output = self.invertible_adapters_forward(embedding_output)
59 | # Prompt tuning not yet supported
60 | return embedding_output
61 |
--------------------------------------------------------------------------------
/src/adapters/models/llama/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["LlamaAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import LlamaAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/mbart/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["MBartAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import MBartAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/mistral/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["MistralAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import MistralAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/mistral/mixin_mistral.py:
--------------------------------------------------------------------------------
1 | from typing import Iterable, Tuple
2 |
3 | import torch.nn as nn
4 |
5 | from ...methods.bottleneck import BottleneckLayer
6 | from ...methods.lora import LoRALinear
7 | from ...methods.prefix_tuning import PrefixTuningLayer
8 | from ...model_mixin import EmbeddingAdaptersMixin, InvertibleAdaptersMixin, ModelBaseAdaptersMixin
9 |
10 |
11 | class MistralAttentionMixin:
12 | def init_adapters(self, model_config, adapters_config):
13 | self.q_proj = LoRALinear.wrap(self.q_proj, "selfattn", model_config, adapters_config, attn_key="q")
14 | self.k_proj = LoRALinear.wrap(self.k_proj, "selfattn", model_config, adapters_config, attn_key="k")
15 | self.v_proj = LoRALinear.wrap(self.v_proj, "selfattn", model_config, adapters_config, attn_key="v")
16 |
17 | self.prefix_tuning = PrefixTuningLayer("self_prefix", model_config, adapters_config)
18 |
19 |
20 | class MistralDecoderLayerMixin:
21 | def init_adapters(self, model_config, adapters_config):
22 | # Wrap layers for LoRA
23 | self.mlp.down_proj = LoRALinear.wrap(self.mlp.down_proj, "intermediate", model_config, adapters_config)
24 | self.mlp.up_proj = LoRALinear.wrap(self.mlp.up_proj, "output", model_config, adapters_config)
25 |
26 | self.attention_adapters = BottleneckLayer("mh_adapter")
27 | self.output_adapters = BottleneckLayer("output_adapter")
28 |
29 |
30 | class MistralModelAdapterMixin(EmbeddingAdaptersMixin, InvertibleAdaptersMixin, ModelBaseAdaptersMixin):
31 | support_prompt_tuning = False
32 |
33 | def init_adapters(self, model_config, adapters_config):
34 | super().init_adapters(model_config, adapters_config)
35 |
36 | # Register hook for post embedding forward
37 | self.embed_tokens.register_forward_hook(self.post_embedding_forward)
38 |
39 | def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
40 | for i, layer in enumerate(self.layers):
41 | yield i, layer
42 |
43 | def post_embedding_forward(self, module, args, embedding_output):
44 | embedding_output = self.invertible_adapters_forward(embedding_output)
45 | # Prompt tuning not yet supported
46 | return embedding_output
47 |
--------------------------------------------------------------------------------
/src/adapters/models/mt5/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["MT5AdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import MT5AdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/plbart/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["PLBartAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import PLBartAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/roberta/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["RobertaAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import RobertaAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/t5/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["T5AdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import T5AdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/vit/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["ViTAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import ViTAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/vit/mixin_vit.py:
--------------------------------------------------------------------------------
1 | from typing import Iterable, Tuple
2 |
3 | import torch.nn as nn
4 |
5 | from ...methods.bottleneck import BottleneckLayer
6 | from ...methods.lora import LoRALinear
7 | from ...methods.prefix_tuning import PrefixTuningLayer
8 | from ...model_mixin import ModelBaseAdaptersMixin
9 | from ...utils import patch_forward
10 |
11 |
12 | class ViTSelfAttentionAdaptersMixin:
13 | def init_adapters(self, model_config, adapters_config):
14 | self.location_key = "self"
15 |
16 | # Wrap layers for LoRA
17 | self.query = LoRALinear.wrap(self.query, "selfattn", model_config, adapters_config, attn_key="q")
18 | self.key = LoRALinear.wrap(self.key, "selfattn", model_config, adapters_config, attn_key="k")
19 | self.value = LoRALinear.wrap(self.value, "selfattn", model_config, adapters_config, attn_key="v")
20 |
21 | self.prefix_tuning = PrefixTuningLayer(
22 | self.location_key + "_prefix" if self.location_key else None, model_config, adapters_config
23 | )
24 | patch_forward(self)
25 |
26 |
27 | class ViTIntermediateAdaptersMixin:
28 | def init_adapters(self, model_config, adapters_config):
29 | # Wrap layers for LoRA
30 | self.dense = LoRALinear.wrap(self.dense, "intermediate", model_config, adapters_config)
31 |
32 |
33 | class ViTOutputAdaptersMixin:
34 | """Adds adapters to the ViTOutput module."""
35 |
36 | def init_adapters(self, model_config, adapters_config):
37 | self.output_adapters = BottleneckLayer("output_adapter")
38 |
39 | # Wrap layers for LoRA
40 | self.dense = LoRALinear.wrap(self.dense, "output", model_config, adapters_config)
41 |
42 | patch_forward(self)
43 |
44 |
45 | # Unlike BERT, self attention adapters are added to Layer module in ViT
46 | class ViTLayerAdaptersMixin:
47 | """Adds adapters to the ViTSelfOutput module."""
48 |
49 | def init_adapters(self, model_config, adapters_config):
50 | self.attention_adapters = BottleneckLayer("mh_adapter")
51 | patch_forward(self)
52 |
53 |
54 | class ViTModelAdaptersMixin(ModelBaseAdaptersMixin):
55 | """Adds adapters to the ViTModel class."""
56 |
57 | def init_adapters(self, model_config, adapters_config):
58 | super().init_adapters(model_config, adapters_config)
59 |
60 | # Register hook for post embedding forward
61 | self.embeddings.register_forward_hook(self.post_embedding_forward)
62 |
63 | def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
64 | for i, layer in enumerate(self.encoder.layer):
65 | yield i, layer
66 |
--------------------------------------------------------------------------------
/src/adapters/models/whisper/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["WhisperAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import WhisperAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/xlm_roberta/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["XLMRobertaAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import XLMRobertaAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/models/xmod/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2023 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {
25 | "adapter_model": ["XmodAdapterModel"],
26 | }
27 |
28 |
29 | if TYPE_CHECKING:
30 | from .adapter_model import XmodAdapterModel
31 |
32 | else:
33 | import sys
34 |
35 | sys.modules[__name__] = _LazyModule(
36 | __name__,
37 | globals()["__file__"],
38 | _import_structure,
39 | )
40 |
--------------------------------------------------------------------------------
/src/adapters/wrappers/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 | # module, but to preserve other warnings. So, don't check this module at all.
4 |
5 | # Copyright 2020 The Adapter-Hub Team. All rights reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from typing import TYPE_CHECKING
20 |
21 | from transformers.utils import _LazyModule
22 |
23 |
24 | _import_structure = {"configuration": ["init_adapters_config"], "model": ["init", "load_model"]}
25 |
26 |
27 | if TYPE_CHECKING:
28 | from .configuration import init_adapters_config
29 | from .model import init, load_model
30 |
31 | else:
32 | import sys
33 |
34 | sys.modules[__name__] = _LazyModule(
35 | __name__,
36 | globals()["__file__"],
37 | _import_structure,
38 | )
39 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/__init__.py
--------------------------------------------------------------------------------
/tests/fixtures/SiBERT/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "BertForMaskedLM"
4 | ],
5 | "attention_probs_dropout_prob": 0.1,
6 | "bos_token_id": null,
7 | "directionality": "bidi",
8 | "do_sample": false,
9 | "eos_token_ids": null,
10 | "finetuning_task": null,
11 | "hidden_act": "gelu",
12 | "hidden_dropout_prob": 0.1,
13 | "hidden_size": 768,
14 | "initializer_range": 0.02,
15 | "intermediate_size": 3072,
16 | "is_decoder": false,
17 | "layer_norm_eps": 1e-12,
18 | "length_penalty": 1.0,
19 | "max_length": 20,
20 | "max_position_embeddings": 512,
21 | "model_type": "bert",
22 | "num_attention_heads": 12,
23 | "num_beams": 1,
24 | "num_hidden_layers": 12,
25 | "num_labels": 2,
26 | "num_return_sequences": 1,
27 | "output_attentions": false,
28 | "output_hidden_states": false,
29 | "output_past": true,
30 | "pad_token_id": 0,
31 | "pooler_fc_size": 768,
32 | "pooler_num_attention_heads": 12,
33 | "pooler_num_fc_layers": 3,
34 | "pooler_size_per_head": 128,
35 | "pooler_type": "first_token_transform",
36 | "pruned_heads": {},
37 | "repetition_penalty": 1.0,
38 | "temperature": 1.0,
39 | "top_k": 50,
40 | "top_p": 1.0,
41 | "torchscript": false,
42 | "type_vocab_size": 2,
43 | "use_bfloat16": false,
44 | "vocab_size": 10000
45 | }
46 |
--------------------------------------------------------------------------------
/tests/fixtures/SiBERT/special_tokens_map.json:
--------------------------------------------------------------------------------
1 | {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
--------------------------------------------------------------------------------
/tests/fixtures/SiBERT/tokenizer_config.json:
--------------------------------------------------------------------------------
1 | {"do_lower_case": false, "max_len": 512}
--------------------------------------------------------------------------------
/tests/fixtures/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/__init__.py
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/common_voice_encoded/dataset_dict.json:
--------------------------------------------------------------------------------
1 | {"splits": ["train"]}
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/common_voice_encoded/train/data-00000-of-00001.arrow:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/audio_datasets/common_voice_encoded/train/data-00000-of-00001.arrow
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/common_voice_encoded/train/dataset_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "citation": "",
3 | "description": "",
4 | "features": {
5 | "input_features": {
6 | "feature": {
7 | "feature": {
8 | "dtype": "float32",
9 | "_type": "Value"
10 | },
11 | "_type": "Sequence"
12 | },
13 | "_type": "Sequence"
14 | },
15 | "labels": {
16 | "feature": {
17 | "dtype": "int64",
18 | "_type": "Value"
19 | },
20 | "_type": "Sequence"
21 | }
22 | },
23 | "homepage": "",
24 | "license": ""
25 | }
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/common_voice_encoded/train/state.json:
--------------------------------------------------------------------------------
1 | {
2 | "_data_files": [
3 | {
4 | "filename": "data-00000-of-00001.arrow"
5 | }
6 | ],
7 | "_fingerprint": "138e80d328a5e394",
8 | "_format_columns": null,
9 | "_format_kwargs": {},
10 | "_format_type": "torch",
11 | "_output_all_columns": false,
12 | "_split": null
13 | }
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/common_voice_org/dataset_dict.json:
--------------------------------------------------------------------------------
1 | {"splits": ["train"]}
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/common_voice_org/train/data-00000-of-00001.arrow:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/audio_datasets/common_voice_org/train/data-00000-of-00001.arrow
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/common_voice_org/train/dataset_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "citation": "",
3 | "description": "",
4 | "features": {
5 | "client_id": {
6 | "dtype": "string",
7 | "_type": "Value"
8 | },
9 | "path": {
10 | "dtype": "string",
11 | "_type": "Value"
12 | },
13 | "audio": {
14 | "array": {
15 | "feature": {
16 | "dtype": "float64",
17 | "_type": "Value"
18 | },
19 | "_type": "Sequence"
20 | },
21 | "path": {
22 | "dtype": "string",
23 | "_type": "Value"
24 | },
25 | "sampling_rate": {
26 | "dtype": "int64",
27 | "_type": "Value"
28 | }
29 | },
30 | "sentence": {
31 | "dtype": "string",
32 | "_type": "Value"
33 | },
34 | "up_votes": {
35 | "dtype": "int64",
36 | "_type": "Value"
37 | },
38 | "down_votes": {
39 | "dtype": "int64",
40 | "_type": "Value"
41 | },
42 | "age": {
43 | "dtype": "string",
44 | "_type": "Value"
45 | },
46 | "gender": {
47 | "dtype": "string",
48 | "_type": "Value"
49 | },
50 | "accent": {
51 | "dtype": "string",
52 | "_type": "Value"
53 | },
54 | "locale": {
55 | "dtype": "string",
56 | "_type": "Value"
57 | },
58 | "segment": {
59 | "dtype": "string",
60 | "_type": "Value"
61 | }
62 | },
63 | "homepage": "",
64 | "license": ""
65 | }
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/common_voice_org/train/state.json:
--------------------------------------------------------------------------------
1 | {
2 | "_data_files": [
3 | {
4 | "filename": "data-00000-of-00001.arrow"
5 | }
6 | ],
7 | "_fingerprint": "4db5194a70f28e75",
8 | "_format_columns": null,
9 | "_format_kwargs": {},
10 | "_format_type": null,
11 | "_output_all_columns": false,
12 | "_split": null
13 | }
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/speech_commands_org/dataset_dict.json:
--------------------------------------------------------------------------------
1 | {"splits": ["train"]}
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/speech_commands_org/train/data-00000-of-00001.arrow:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/audio_datasets/speech_commands_org/train/data-00000-of-00001.arrow
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/speech_commands_org/train/dataset_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "citation": "",
3 | "description": "",
4 | "features": {
5 | "file": {
6 | "dtype": "string",
7 | "_type": "Value"
8 | },
9 | "audio": {
10 | "array": {
11 | "feature": {
12 | "dtype": "float64",
13 | "_type": "Value"
14 | },
15 | "_type": "Sequence"
16 | },
17 | "path": {
18 | "dtype": "string",
19 | "_type": "Value"
20 | },
21 | "sampling_rate": {
22 | "dtype": "int64",
23 | "_type": "Value"
24 | }
25 | },
26 | "label": {
27 | "feature": {
28 | "dtype": "int64",
29 | "_type": "Value"
30 | },
31 | "_type": "Sequence"
32 | },
33 | "is_unknown": {
34 | "dtype": "bool",
35 | "_type": "Value"
36 | },
37 | "speaker_id": {
38 | "dtype": "string",
39 | "_type": "Value"
40 | },
41 | "utterance_id": {
42 | "dtype": "int64",
43 | "_type": "Value"
44 | }
45 | },
46 | "homepage": "",
47 | "license": ""
48 | }
--------------------------------------------------------------------------------
/tests/fixtures/audio_datasets/speech_commands_org/train/state.json:
--------------------------------------------------------------------------------
1 | {
2 | "_data_files": [
3 | {
4 | "filename": "data-00000-of-00001.arrow"
5 | }
6 | ],
7 | "_fingerprint": "c0cc06d19bea0105",
8 | "_format_columns": null,
9 | "_format_kwargs": {},
10 | "_format_type": null,
11 | "_output_all_columns": false,
12 | "_split": null
13 | }
--------------------------------------------------------------------------------
/tests/fixtures/hub-index.sample.json:
--------------------------------------------------------------------------------
1 | {
2 | "t": {
3 | "s": {
4 | "default": "path/to/default",
5 | "9076f36a74755ac4": {
6 | "default": "path/to/pfeiffer/default",
7 | "versions": {
8 | "example-org": "path/to/pfeiffer/example-org",
9 | "ukp": "path/to/pfeiffer/ukp"
10 | }
11 | },
12 | "b1017368d7a97b11": {
13 | "versions": {
14 | "example-org": "path/to/houlsby/example-org"
15 | }
16 | }
17 | }
18 | }
19 | }
--------------------------------------------------------------------------------
/tests/fixtures/samples/MRPC/dev.csv:
--------------------------------------------------------------------------------
1 | label,sentence1,sentence2
2 | equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,""" The foodservice pie business does not fit our long-term growth strategy ."
3 | not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,"His wife said he was "" 100 percent behind George Bush "" and looked forward to using his years of training in the war ."
4 | not_equivalent,"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .","The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent ."
5 | equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .
6 | not_equivalent,No dates have been set for the civil or the criminal trial .,"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty ."
7 | equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
8 |
--------------------------------------------------------------------------------
/tests/fixtures/samples/MRPC/dev.tsv:
--------------------------------------------------------------------------------
1 | Quality #1 ID #2 ID #1 String #2 String
2 | 1 1355540 1355592 He said the foodservice pie business doesn 't fit the company 's long-term growth strategy . " The foodservice pie business does not fit our long-term growth strategy .
3 | 0 2029631 2029565 Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war . His wife said he was " 100 percent behind George Bush " and looked forward to using his years of training in the war .
4 | 0 487993 487952 The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat . The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent .
5 | 1 1989515 1989458 The AFL-CIO is waiting until October to decide if it will endorse a candidate . The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .
6 | 0 1783137 1782659 No dates have been set for the civil or the criminal trial . No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty .
7 | 1 3039165 3039036 Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed . It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
8 |
--------------------------------------------------------------------------------
/tests/fixtures/samples/MRPC/train.csv:
--------------------------------------------------------------------------------
1 | label,sentence1,sentence2
2 | equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,""" The foodservice pie business does not fit our long-term growth strategy ."
3 | not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,"His wife said he was "" 100 percent behind George Bush "" and looked forward to using his years of training in the war ."
4 | not_equivalent,"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .","The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent ."
5 | equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .
6 | not_equivalent,No dates have been set for the civil or the criminal trial .,"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty ."
7 | equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
8 |
--------------------------------------------------------------------------------
/tests/fixtures/samples/MRPC/train.tsv:
--------------------------------------------------------------------------------
1 | Quality #1 ID #2 ID #1 String #2 String
2 | 1 1355540 1355592 He said the foodservice pie business doesn 't fit the company 's long-term growth strategy . " The foodservice pie business does not fit our long-term growth strategy .
3 | 0 2029631 2029565 Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war . His wife said he was " 100 percent behind George Bush " and looked forward to using his years of training in the war .
4 | 0 487993 487952 The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat . The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent .
5 | 1 1989515 1989458 The AFL-CIO is waiting until October to decide if it will endorse a candidate . The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .
6 | 0 1783137 1782659 No dates have been set for the civil or the criminal trial . No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty .
7 | 1 3039165 3039036 Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed . It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
8 |
--------------------------------------------------------------------------------
/tests/fixtures/samples/cifar10/cifar10.py:
--------------------------------------------------------------------------------
1 | """
2 | CIFAR-10 demo data, adapted from https://huggingface.co/datasets/cifar10.
3 | """
4 |
5 | import os
6 | import pickle
7 |
8 | import datasets
9 | import numpy as np
10 |
11 |
12 | class Cifar10(datasets.GeneratorBasedBuilder):
13 | """CIFAR-10 Data Set"""
14 |
15 | BUILDER_CONFIGS = [
16 | datasets.BuilderConfig(
17 | name="plain_text",
18 | version=datasets.Version("1.0.0", ""),
19 | description="Plain text import of CIFAR-10 Data Set",
20 | )
21 | ]
22 |
23 | def _info(self):
24 | return datasets.DatasetInfo(
25 | features=datasets.Features(
26 | {
27 | "img": datasets.Image(),
28 | "label": datasets.features.ClassLabel(num_classes=10),
29 | }
30 | ),
31 | )
32 |
33 | def _split_generators(self, dl_manager):
34 | return [
35 | datasets.SplitGenerator(
36 | name=datasets.Split.TRAIN,
37 | gen_kwargs={
38 | "files": ["data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", "data_batch_5"],
39 | "split": "train",
40 | },
41 | ),
42 | datasets.SplitGenerator(
43 | name=datasets.Split.TEST,
44 | gen_kwargs={"files": ["test_batch"], "split": "test"},
45 | ),
46 | ]
47 |
48 | def _generate_examples(self, files, split):
49 | for file in files:
50 | with open(os.path.join(self.config.data_dir, file), "rb") as fo:
51 | dict = pickle.load(fo, encoding="bytes")
52 |
53 | labels = dict[b"labels"]
54 | images = dict[b"data"]
55 |
56 | for idx, _ in enumerate(images):
57 |
58 | img_reshaped = np.transpose(np.reshape(images[idx], (3, 32, 32)), (1, 2, 0))
59 |
60 | yield f"{file}_{idx}", {
61 | "img": img_reshaped,
62 | "label": labels[idx],
63 | }
64 |
--------------------------------------------------------------------------------
/tests/fixtures/samples/cifar10/data_batch_1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/samples/cifar10/data_batch_1
--------------------------------------------------------------------------------
/tests/fixtures/samples/cifar10/data_batch_2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/samples/cifar10/data_batch_2
--------------------------------------------------------------------------------
/tests/fixtures/samples/cifar10/data_batch_3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/samples/cifar10/data_batch_3
--------------------------------------------------------------------------------
/tests/fixtures/samples/cifar10/data_batch_4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/samples/cifar10/data_batch_4
--------------------------------------------------------------------------------
/tests/fixtures/samples/cifar10/data_batch_5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/samples/cifar10/data_batch_5
--------------------------------------------------------------------------------
/tests/fixtures/samples/cifar10/test_batch:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/samples/cifar10/test_batch
--------------------------------------------------------------------------------
/tests/fixtures/samples/wmt16/sample.json:
--------------------------------------------------------------------------------
1 | {"translation": {"en": "Membership of Parliament: see Minutes", "ro": "Componenţa Parlamentului: a se vedea procesul-verbal"}}
2 | {"translation": {"en": "Approval of Minutes of previous sitting: see Minutes", "ro": "Aprobarea procesului-verbal al şedinţei precedente: a se vedea procesul-verbal"}}
3 | {"translation": {"en": "Membership of Parliament: see Minutes", "ro": "Componenţa Parlamentului: a se vedea procesul-verbal"}}
4 | {"translation": {"en": "Verification of credentials: see Minutes", "ro": "Verificarea prerogativelor: a se vedea procesul-verbal"}}
5 | {"translation": {"en": "Documents received: see Minutes", "ro": "Depunere de documente: a se vedea procesul-verbal"}}
6 | {"translation": {"en": "Written statements and oral questions (tabling): see Minutes", "ro": "Declaraţii scrise şi întrebări orale (depunere): consultaţi procesul-verbal"}}
7 | {"translation": {"en": "Petitions: see Minutes", "ro": "Petiţii: a se vedea procesul-verbal"}}
8 | {"translation": {"en": "Texts of agreements forwarded by the Council: see Minutes", "ro": "Transmiterea de către Consiliu a textelor acordurilor: a se vedea procesul-verbal"}}
9 | {"translation": {"en": "Action taken on Parliament's resolutions: see Minutes", "ro": "Cursul dat rezoluţiilor Parlamentului: a se vedea procesul-verbal"}}
10 | {"translation": {"en": "Agenda for next sitting: see Minutes", "ro": "Ordinea de zi a următoarei şedinţe: a se vedea procesul-verbal"}}
11 |
--------------------------------------------------------------------------------
/tests/fixtures/tests_samples/COCO/000000039769.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/tests_samples/COCO/000000039769.png
--------------------------------------------------------------------------------
/tests/fixtures/tests_samples/COCO/coco_panoptic/000000039769.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/fixtures/tests_samples/COCO/coco_panoptic/000000039769.png
--------------------------------------------------------------------------------
/tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt:
--------------------------------------------------------------------------------
1 | [{"id": 8222595, "category_id": 17, "iscrowd": 0, "bbox": [18, 54, 301, 415], "area": 53306}, {"id": 8225432, "category_id": 17, "iscrowd": 0, "bbox": [349, 26, 291, 343], "area": 59627}, {"id": 8798150, "category_id": 63, "iscrowd": 0, "bbox": [1, 0, 639, 474], "area": 174579}, {"id": 14466198, "category_id": 75, "iscrowd": 0, "bbox": [42, 74, 133, 45], "area": 4068}, {"id": 12821912, "category_id": 75, "iscrowd": 0, "bbox": [333, 80, 38, 106], "area": 2118}, {"id": 10898909, "category_id": 93, "iscrowd": 0, "bbox": [0, 0, 640, 480], "area": 2750}]
--------------------------------------------------------------------------------
/tests/test_methods/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_methods/__init__.py
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_methods/method_test_impl/__init__.py
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/composition/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_methods/method_test_impl/composition/__init__.py
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_methods/method_test_impl/core/__init__.py
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/core/test_adapter_backward_compability.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import tempfile
4 |
5 | from adapters import SeqBnConfig, __version__
6 | from tests.test_methods.method_test_impl.utils import create_twin_models
7 | from transformers.testing_utils import require_torch
8 |
9 |
10 | @require_torch
11 | class CompabilityTestMixin:
12 | def create_twin_models(self):
13 | return create_twin_models(self.model_class, self.config)
14 |
15 | def test_load_old_non_linearity(self):
16 | model1, model2 = self.create_twin_models()
17 | config = SeqBnConfig(non_linearity="gelu")
18 | name = "dummy"
19 | model1.add_adapter(name, config=config)
20 | model1.set_active_adapters(name)
21 | with tempfile.TemporaryDirectory() as temp_dir:
22 | model1.save_adapter(temp_dir, name)
23 |
24 | with open(os.path.join(temp_dir, "adapter_config.json"), "r") as file:
25 | data = json.load(file)
26 | data["config"]["non_linearity"] = "gelu_orig"
27 | del data["version"]
28 | with open(os.path.join(temp_dir, "adapter_config.json"), "w") as file:
29 | json.dump(data, file)
30 |
31 | # also tests that set_active works
32 | model2.load_adapter(temp_dir, set_active=True)
33 |
34 | # check if adapter was correctly loaded
35 | self.assertTrue(name in model2.adapters_config)
36 | self.assertEqual(
37 | "gelu", model2.adapters_config.config_map[model2.adapters_config.adapters[name]]["non_linearity"]
38 | )
39 |
40 | def test_save_version_with_adapter(self):
41 | model = self.get_model()
42 | config = SeqBnConfig(non_linearity="gelu")
43 | name = "dummy"
44 | model.add_adapter(name, config=config)
45 | model.set_active_adapters(name)
46 | with tempfile.TemporaryDirectory() as temp_dir:
47 | model.save_adapter(temp_dir, name)
48 |
49 | with open(os.path.join(temp_dir, "adapter_config.json"), "r") as file:
50 | data = json.load(file)
51 | self.assertEqual(__version__, data["version"].replace("adapters.", ""))
52 |
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/embeddings/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_methods/method_test_impl/embeddings/__init__.py
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/heads/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_methods/method_test_impl/heads/__init__.py
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/peft/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_methods/method_test_impl/peft/__init__.py
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/peft/test_compacter.py:
--------------------------------------------------------------------------------
1 | from adapters import CompacterPlusPlusConfig
2 | from tests.test_methods.method_test_impl.base import AdapterMethodBaseTestMixin
3 | from transformers.testing_utils import require_torch
4 |
5 |
6 | @require_torch
7 | class CompacterTestMixin(AdapterMethodBaseTestMixin):
8 | default_config = CompacterPlusPlusConfig(phm_dim=2, reduction_factor=8)
9 |
10 | def test_add_compacter(self):
11 | model = self.get_model()
12 | self.run_add_test(model, self.default_config, ["adapters.{name}."])
13 |
14 | def test_leave_out_compacter(self):
15 | model = self.get_model()
16 | self.run_leave_out_test(model, self.default_config, self.leave_out_layers)
17 |
18 | def test_linear_average_compacter(self):
19 | model = self.get_model()
20 | self.run_linear_average_test(model, self.default_config, ["adapters.{name}."])
21 |
22 | def test_delete_compacter(self):
23 | model = self.get_model()
24 | self.run_delete_test(model, self.default_config, ["adapters.{name}."])
25 |
26 | def test_get_compacter(self):
27 | model = self.get_model()
28 | n_layers = len(list(model.iter_layers()))
29 | self.run_get_test(model, self.default_config, n_layers + 1)
30 |
31 | def test_forward_compacter(self):
32 | model = self.get_model()
33 | self.run_forward_test(model, self.default_config)
34 |
35 | def test_forward_shared_phm_compacter(self):
36 | model = self.get_model()
37 | adapter_config = CompacterPlusPlusConfig(phm_dim=4, shared_W_phm=True, reduction_factor=4)
38 | self.run_forward_test(model, adapter_config)
39 |
40 | def test_load_compacter(self):
41 | self.run_load_test(self.default_config)
42 |
43 | def test_train_shared_w_compacter(self):
44 | adapter_config = CompacterPlusPlusConfig(
45 | phm_dim=2, shared_W_phm=True, shared_phm_rule=False, reduction_factor=8
46 | )
47 | self.run_train_test(adapter_config, ["adapters.{name}."])
48 |
49 | def test_train_shared_phm_compacter(self):
50 | self.run_train_test(self.default_config, ["adapters.{name}."])
51 |
52 | def test_compacter_generate(self):
53 | self.run_generate_test(CompacterPlusPlusConfig(phm_dim=2, reduction_factor=8))
54 |
55 | def test_same_weights_after_adding_adapter(self):
56 | # setting init_weights_seed should leed to every adapter layer having the same weights after initialization
57 | self.run_same_weights_test(
58 | CompacterPlusPlusConfig(phm_dim=2, reduction_factor=8, init_weights_seed=42), ["adapters.{name}."]
59 | )
60 |
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/peft/test_config_union.py:
--------------------------------------------------------------------------------
1 | from adapters.configuration import (
2 | CompacterConfig,
3 | ConfigUnion,
4 | LoRAConfig,
5 | ParBnConfig,
6 | PrefixTuningConfig,
7 | SeqBnConfig,
8 | )
9 | from tests.test_methods.method_test_impl.base import AdapterMethodBaseTestMixin
10 | from transformers.testing_utils import require_torch
11 |
12 |
13 | @require_torch
14 | class ConfigUnionAdapterTest(AdapterMethodBaseTestMixin):
15 | adapter_configs_to_test = [
16 | (
17 | ConfigUnion(
18 | PrefixTuningConfig(),
19 | ParBnConfig(),
20 | ),
21 | ["adapters.{name}.", "prefix_tunings.{name}."],
22 | ),
23 | (
24 | ConfigUnion(
25 | CompacterConfig(
26 | reduction_factor=8
27 | ), # set to smaller value than default due to smaller hidden size of test models
28 | LoRAConfig(init_weights="bert"), # set to bert to avoid zero initialization
29 | ),
30 | ["adapters.{name}.", "loras.{name}."],
31 | ),
32 | (
33 | ConfigUnion(
34 | SeqBnConfig(phm_dim=1),
35 | LoRAConfig(init_weights="bert"), # set to bert to avoid zero initialization
36 | ),
37 | ["adapters.{name}.", "loras.{name}."],
38 | ),
39 | ]
40 |
41 | def test_add_union_adapter(self):
42 | model = self.get_model()
43 | model.eval()
44 | for adapter_config, filter_keys in self.adapter_configs_to_test:
45 | config = (
46 | "ConfigUnion: "
47 | + adapter_config.configs[0].__class__.__name__
48 | + adapter_config.configs[1].__class__.__name__
49 | )
50 | with self.subTest(model_class=model.__class__.__name__, config=config):
51 | self.run_add_test(model, adapter_config, filter_keys)
52 |
53 | def test_union_adapter_forward(self):
54 | model = self.get_model()
55 | model.eval()
56 | for adapter_config, _ in self.adapter_configs_to_test:
57 | config = (
58 | "ConfigUnion: "
59 | + adapter_config.configs[0].__class__.__name__
60 | + adapter_config.configs[1].__class__.__name__
61 | )
62 | with self.subTest(model_class=model.__class__.__name__, config=config):
63 | self.run_forward_test(model, adapter_config)
64 |
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/peft/test_ia3.py:
--------------------------------------------------------------------------------
1 | from adapters import IA3Config
2 | from tests.test_methods.method_test_impl.base import AdapterMethodBaseTestMixin
3 | from transformers.testing_utils import require_torch
4 |
5 |
6 | @require_torch
7 | class IA3TestMixin(AdapterMethodBaseTestMixin):
8 | def test_add_ia3(self):
9 | model = self.get_model()
10 | self.run_add_test(model, IA3Config(), ["loras.{name}."])
11 |
12 | def test_leave_out_ia3(self):
13 | model = self.get_model()
14 | self.run_leave_out_test(model, IA3Config(), self.leave_out_layers)
15 |
16 | def test_linear_average_ia3(self):
17 | model = self.get_model()
18 | self.run_linear_average_test(model, IA3Config(), ["loras.{name}."])
19 |
20 | def test_delete_ia3(self):
21 | model = self.get_model()
22 | self.run_delete_test(model, IA3Config(), ["loras.{name}."])
23 |
24 | def test_get_ia3(self):
25 | model = self.get_model()
26 | n_layers = len(list(model.iter_layers()))
27 | self.run_get_test(model, IA3Config(intermediate_lora=True, output_lora=True), n_layers * 3)
28 |
29 | def test_forward_ia3(self):
30 | model = self.get_model()
31 | self.run_forward_test(model, IA3Config(init_weights="bert", intermediate_lora=True, output_lora=True))
32 |
33 | def test_load_ia3(self):
34 | self.run_load_test(IA3Config())
35 |
36 | def test_load_full_model_ia3(self):
37 | self.run_full_model_load_test(IA3Config(init_weights="bert"))
38 |
39 | def test_train_ia3(self):
40 | self.run_train_test(IA3Config(init_weights="bert"), ["loras.{name}."])
41 |
42 | def test_merge_ia3(self):
43 | self.run_merge_test(IA3Config(init_weights="bert"))
44 |
45 | def test_reset_ia3(self):
46 | self.run_reset_test(IA3Config(init_weights="bert"))
47 |
48 | def test_ia3_gradient_checkpointing_single_adapter(self):
49 | self.run_gradient_checkpointing_single_adapter_test(IA3Config())
50 |
51 | def test_same_weights_after_adding_adapter(self):
52 | # setting init_weights_seed should leed to every adapter layer having the same weights after initialization
53 | self.run_same_weights_test(IA3Config(init_weights_seed=42), ["loras.{name}."])
54 |
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/peft/test_prompt_tuning.py:
--------------------------------------------------------------------------------
1 | from adapters import PromptTuningConfig
2 | from tests.test_methods.method_test_impl.base import AdapterMethodBaseTestMixin
3 | from transformers.testing_utils import require_torch
4 |
5 |
6 | @require_torch
7 | class PromptTuningTestMixin(AdapterMethodBaseTestMixin):
8 | def test_add_prompt_tuning(self):
9 | model = self.get_model()
10 | self.run_add_test(model, PromptTuningConfig(prompt_length=10), ["prompt_tunings.{name}."])
11 |
12 | def test_linear_average_prompt_tuning(self):
13 | model = self.get_model()
14 | self.run_linear_average_test(model, PromptTuningConfig(prompt_length=10), ["prompt_tunings.{name}."])
15 |
16 | def test_delete_prompt_tuning(self):
17 | model = self.get_model()
18 | self.run_delete_test(model, PromptTuningConfig(prompt_length=10), ["prompt_tunings.{name}."])
19 |
20 | def test_get_prompt_tuning(self):
21 | model = self.get_model()
22 | self.run_get_test(model, PromptTuningConfig(prompt_length=10), 1)
23 |
24 | def test_forward_prompt_tuning(self):
25 | model = self.get_model()
26 | for dtype in self.dtypes_to_test:
27 | with self.subTest(model_class=model.__class__.__name__, dtype=dtype):
28 | self.run_forward_test(model, PromptTuningConfig(prompt_length=10), dtype=dtype)
29 |
30 | def test_load_prompt_tuning(self):
31 | self.run_load_test(PromptTuningConfig(prompt_length=10))
32 |
33 | def test_load_full_model_prompt_tuning(self):
34 | self.run_full_model_load_test(PromptTuningConfig(prompt_length=10))
35 |
36 | def test_train_prompt_tuning(self):
37 | self.run_train_test(PromptTuningConfig(prompt_length=10), ["prompt_tunings.{name}."])
38 |
39 | def test_prompt_tuning_gradient_checkpointing_single_adapter(self):
40 | self.run_gradient_checkpointing_single_adapter_test(PromptTuningConfig(prompt_length=10))
41 |
42 | def test_same_weights_after_adding_adapter(self):
43 | # setting init_weights_seed should leed to every adapter layer having the same weights after initialization
44 | self.run_same_weights_test(
45 | PromptTuningConfig(init_weights_seed=42, prompt_length=10), ["prompt_tunings.{name}."]
46 | )
47 |
--------------------------------------------------------------------------------
/tests/test_methods/method_test_impl/utils.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import random
3 |
4 | import torch
5 |
6 | from adapters import ADAPTER_MODEL_MAPPING, init
7 | from transformers.testing_utils import torch_device
8 |
9 |
10 | global_rng = random.Random()
11 |
12 |
13 | def create_twin_models(model_class, config_creator=None, interface=None):
14 | if config_creator and model_class.__name__.startswith("Auto"):
15 | model_config = config_creator()
16 | model1 = model_class.from_config(model_config)
17 | elif config_creator:
18 | model_config = config_creator()
19 | model1 = model_class(model_config)
20 | else:
21 | model_config = model_class.config_class()
22 | model1 = model_class(model_config)
23 | init(model1, interface=interface)
24 | model1.eval()
25 | # create a twin initialized with the same random weights
26 | model2 = copy.deepcopy(model1)
27 | model2.eval()
28 | return model1, model2
29 |
30 |
31 | def add_lm_head(config_class, model, adapter_name):
32 | """Add appropriate language model head based on model type"""
33 | if "seq2seq_lm" in ADAPTER_MODEL_MAPPING[config_class].head_types:
34 | model.add_seq2seq_lm_head(adapter_name)
35 | else:
36 | model.add_causal_lm_head(adapter_name)
37 |
38 |
39 | def make_config(config_class, **kwargs):
40 | return staticmethod(lambda: config_class(**kwargs))
41 |
42 |
43 | def ids_tensor(shape, vocab_size=5000, dtype=torch.long):
44 | total_dims = 1
45 | for dim in shape:
46 | total_dims *= dim
47 | values = [global_rng.randint(0, vocab_size - 1) for _ in range(total_dims)]
48 | return torch.tensor(data=values, dtype=dtype, device=torch_device).view(shape).contiguous()
49 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_albert.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from math import ceil
3 |
4 | import pytest
5 |
6 | from transformers import AlbertConfig
7 | from transformers.testing_utils import require_torch
8 |
9 | from .base import TextAdapterTestBase
10 | from .generator import generate_method_tests
11 | from .method_test_impl.heads.test_adapter_heads import PredictionHeadModelTestMixin
12 | from .method_test_impl.utils import make_config
13 |
14 |
15 | class AlbertAdapterTestBase(TextAdapterTestBase):
16 | """Model configuration for testing methods on Albert."""
17 |
18 | config_class = AlbertConfig
19 | config = make_config(
20 | AlbertConfig,
21 | embedding_size=16,
22 | hidden_size=64,
23 | num_hidden_layers=5,
24 | num_attention_heads=4,
25 | intermediate_size=37,
26 | num_hidden_groups=2,
27 | )
28 | tokenizer_name = "albert-base-v2"
29 | leave_out_layers = [0]
30 |
31 |
32 | method_tests = generate_method_tests(AlbertAdapterTestBase, not_supported=["Heads"])
33 |
34 | for test_class_name, test_class in method_tests.items():
35 | globals()[test_class_name] = test_class
36 |
37 |
38 | @require_torch
39 | @pytest.mark.heads
40 | class Heads(
41 | AlbertAdapterTestBase,
42 | PredictionHeadModelTestMixin,
43 | unittest.TestCase,
44 | ):
45 |
46 | def test_context_simple(self):
47 | expected_number_of_adapter_calls = ceil(self.config().num_hidden_layers / self.config().num_hidden_groups)
48 | super().test_context_simple(expected_number_of_adapter_calls=expected_number_of_adapter_calls)
49 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_bart.py:
--------------------------------------------------------------------------------
1 | from transformers import BartConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class BartAdapterTestBase(TextAdapterTestBase):
9 | config_class = BartConfig
10 | config = make_config(
11 | BartConfig,
12 | d_model=16,
13 | encoder_layers=2,
14 | decoder_layers=2,
15 | encoder_attention_heads=4,
16 | decoder_attention_heads=4,
17 | encoder_ffn_dim=4,
18 | decoder_ffn_dim=4,
19 | )
20 | tokenizer_name = "facebook/bart-base"
21 |
22 |
23 | method_tests = generate_method_tests(
24 | BartAdapterTestBase, not_supported=["PromptTuning"], redundant=["ConfigUnion", "Embeddings"]
25 | )
26 |
27 | for test_class_name, test_class in method_tests.items():
28 | globals()[test_class_name] = test_class
29 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_beit.py:
--------------------------------------------------------------------------------
1 | from transformers import BeitConfig
2 |
3 | from .base import VisionAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class BeitAdapterTestBase(VisionAdapterTestBase):
9 | config_class = BeitConfig
10 | config = make_config(
11 | BeitConfig,
12 | image_size=224,
13 | hidden_size=32,
14 | num_hidden_layers=4,
15 | num_attention_heads=4,
16 | intermediate_size=37,
17 | )
18 | feature_extractor_name = "microsoft/beit-base-patch16-224-pt22k"
19 |
20 |
21 | method_tests = generate_method_tests(BeitAdapterTestBase, not_supported=["Composition", "Embeddings"])
22 |
23 | for test_class_name, test_class in method_tests.items():
24 | globals()[test_class_name] = test_class
25 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_bert.py:
--------------------------------------------------------------------------------
1 | from transformers import BertConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class BertAdapterTestBase(TextAdapterTestBase):
9 | config_class = BertConfig
10 | config = make_config(
11 | BertConfig,
12 | hidden_size=32,
13 | num_hidden_layers=4,
14 | num_attention_heads=4,
15 | intermediate_size=37,
16 | )
17 | tokenizer_name = "bert-base-uncased"
18 |
19 |
20 | method_tests = generate_method_tests(BertAdapterTestBase)
21 |
22 | for test_class_name, test_class in method_tests.items():
23 | globals()[test_class_name] = test_class
24 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_clip/test_textmodel.py:
--------------------------------------------------------------------------------
1 | from tests.test_methods.base import TextAdapterTestBase
2 | from tests.test_methods.generator import generate_method_tests
3 | from tests.test_methods.method_test_impl.utils import make_config
4 | from transformers import CLIPTextConfig, CLIPTextModel
5 |
6 |
7 | class CLIPTextAdapterTestBase(TextAdapterTestBase):
8 | model_class = CLIPTextModel
9 | config_class = CLIPTextConfig
10 | config = make_config(
11 | CLIPTextConfig,
12 | hidden_size=32,
13 | num_hidden_layers=4,
14 | num_attention_heads=4,
15 | intermediate_size=37,
16 | )
17 | tokenizer_name = "openai/clip-vit-base-patch32"
18 |
19 |
20 | method_tests = generate_method_tests(
21 | model_test_base=CLIPTextAdapterTestBase,
22 | not_supported=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
23 | )
24 |
25 |
26 | for test_class_name, test_class in method_tests.items():
27 | globals()[test_class_name] = test_class
28 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_clip/test_textwithprojectionmodel.py:
--------------------------------------------------------------------------------
1 | from tests.test_methods.base import TextAdapterTestBase
2 | from tests.test_methods.generator import generate_method_tests
3 | from tests.test_methods.method_test_impl.utils import make_config
4 | from transformers import CLIPTextConfig, CLIPTextModelWithProjection
5 |
6 |
7 | class CLIPTextWithProjectionAdapterTestBase(TextAdapterTestBase):
8 | model_class = CLIPTextModelWithProjection
9 | config_class = CLIPTextConfig
10 | config = make_config(
11 | CLIPTextConfig,
12 | hidden_size=32,
13 | num_hidden_layers=4,
14 | num_attention_heads=4,
15 | intermediate_size=37,
16 | )
17 | tokenizer_name = "openai/clip-vit-base-patch32"
18 |
19 |
20 | method_tests = generate_method_tests(
21 | model_test_base=CLIPTextWithProjectionAdapterTestBase,
22 | not_supported=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
23 | )
24 |
25 |
26 | for test_class_name, test_class in method_tests.items():
27 | globals()[test_class_name] = test_class
28 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_clip/test_visionmodel.py:
--------------------------------------------------------------------------------
1 | from tests.test_methods.base import VisionAdapterTestBase
2 | from tests.test_methods.generator import generate_method_tests
3 | from tests.test_methods.method_test_impl.utils import make_config
4 | from transformers import CLIPVisionConfig, CLIPVisionModel
5 |
6 |
7 | class CLIPVisionAdapterTestBase(VisionAdapterTestBase):
8 | model_class = CLIPVisionModel
9 | config_class = CLIPVisionConfig
10 | config = make_config(
11 | CLIPVisionConfig,
12 | image_size=224,
13 | hidden_size=32,
14 | num_hidden_layers=4,
15 | num_attention_heads=4,
16 | intermediate_size=37,
17 | )
18 | feature_extractor_name = "openai/clip-vit-base-patch32"
19 |
20 |
21 | method_tests = generate_method_tests(
22 | model_test_base=CLIPVisionAdapterTestBase,
23 | not_supported=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
24 | )
25 |
26 |
27 | for test_class_name, test_class in method_tests.items():
28 | globals()[test_class_name] = test_class
29 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_clip/test_visionwithprojectionmodel.py:
--------------------------------------------------------------------------------
1 | from tests.test_methods.base import VisionAdapterTestBase
2 | from tests.test_methods.generator import generate_method_tests
3 | from tests.test_methods.method_test_impl.utils import make_config
4 | from transformers import CLIPVisionConfig, CLIPVisionModelWithProjection
5 |
6 |
7 | class CLIPVisionWithProjectionAdapterTestBase(VisionAdapterTestBase):
8 | model_class = CLIPVisionModelWithProjection
9 | config_class = CLIPVisionConfig
10 | config = make_config(
11 | CLIPVisionConfig,
12 | image_size=224,
13 | hidden_size=32,
14 | num_hidden_layers=4,
15 | num_attention_heads=4,
16 | intermediate_size=37,
17 | )
18 | feature_extractor_name = "openai/clip-vit-base-patch32"
19 |
20 |
21 | method_tests = generate_method_tests(
22 | model_test_base=CLIPVisionWithProjectionAdapterTestBase,
23 | not_supported=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
24 | )
25 |
26 |
27 | for test_class_name, test_class in method_tests.items():
28 | globals()[test_class_name] = test_class
29 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_deberta.py:
--------------------------------------------------------------------------------
1 | from transformers import DebertaConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class DebertaAdapterTestBase(TextAdapterTestBase):
9 | config_class = DebertaConfig
10 | config = make_config(
11 | DebertaConfig,
12 | hidden_size=32,
13 | num_hidden_layers=5,
14 | num_attention_heads=4,
15 | intermediate_size=37,
16 | hidden_act="gelu",
17 | relative_attention=True,
18 | pos_att_type="p2c|c2p",
19 | )
20 | tokenizer_name = "microsoft/deberta-base"
21 |
22 | def test_parallel_training_lora(self):
23 | self.skipTest("Not supported for DeBERTa")
24 |
25 | def test_mtl_forward_mtl_lora(self):
26 | self.skipTest("Not supported for DeBERTa")
27 |
28 | def test_mtl_train_mtl_lora(self):
29 | self.skipTest("Not supported for DeBERTa")
30 |
31 |
32 | method_tests = generate_method_tests(DebertaAdapterTestBase, not_supported=["MTLLoRA", "Vera"])
33 |
34 | for test_class_name, test_class in method_tests.items():
35 | globals()[test_class_name] = test_class
36 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_debertaV2.py:
--------------------------------------------------------------------------------
1 | from transformers import DebertaV2Config
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class DebertaV2AdapterTestBase(TextAdapterTestBase):
9 | config_class = DebertaV2Config
10 | config = make_config(
11 | DebertaV2Config,
12 | hidden_size=32,
13 | num_hidden_layers=5,
14 | num_attention_heads=4,
15 | intermediate_size=37,
16 | hidden_act="gelu",
17 | relative_attention=True,
18 | pos_att_type="p2c|c2p",
19 | )
20 | tokenizer_name = "microsoft/deberta-v3-base"
21 |
22 |
23 | method_tests = generate_method_tests(DebertaV2AdapterTestBase)
24 |
25 | for test_class_name, test_class in method_tests.items():
26 | globals()[test_class_name] = test_class
27 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_distilbert.py:
--------------------------------------------------------------------------------
1 | from transformers import DistilBertConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class DistilBertAdapterTestBase(TextAdapterTestBase):
9 | config_class = DistilBertConfig
10 | config = make_config(
11 | DistilBertConfig,
12 | dim=32,
13 | n_layers=4,
14 | n_heads=4,
15 | hidden_dim=37,
16 | )
17 | tokenizer_name = "distilbert-base-uncased"
18 |
19 |
20 | method_tests = generate_method_tests(DistilBertAdapterTestBase)
21 |
22 | for test_class_name, test_class in method_tests.items():
23 | globals()[test_class_name] = test_class
24 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_electra.py:
--------------------------------------------------------------------------------
1 | from transformers import ElectraConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class ElectraAdapterTestBase(TextAdapterTestBase):
9 | config_class = ElectraConfig
10 | config = make_config(
11 | ElectraConfig,
12 | # vocab_size=99,
13 | hidden_size=32,
14 | num_hidden_layers=5,
15 | num_attention_heads=4,
16 | intermediate_size=37,
17 | )
18 | tokenizer_name = "google/electra-base-generator"
19 |
20 |
21 | method_tests = generate_method_tests(ElectraAdapterTestBase)
22 |
23 | for test_class_name, test_class in method_tests.items():
24 | globals()[test_class_name] = test_class
25 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_gpt2.py:
--------------------------------------------------------------------------------
1 | from transformers import GPT2Config
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class GPT2AdapterTestBase(TextAdapterTestBase):
9 | config_class = GPT2Config
10 | config = make_config(
11 | GPT2Config,
12 | n_embd=32,
13 | n_layer=4,
14 | n_head=4,
15 | # set pad token to eos token
16 | pad_token_id=50256,
17 | )
18 | tokenizer_name = "gpt2"
19 |
20 | def test_parallel_training_lora(self):
21 | self.skipTest("Not supported for GPT2")
22 |
23 | def test_mtl_forward_mtl_lora(self):
24 | self.skipTest("Not supported for GPT2")
25 |
26 | def test_mtl_train_mtl_lora(self):
27 | self.skipTest("Not supported for GPT2")
28 |
29 |
30 | method_tests = generate_method_tests(
31 | GPT2AdapterTestBase,
32 | not_supported=["PromptTuning", "MTLLoRA", "Vera"],
33 | )
34 |
35 | for test_class_name, test_class in method_tests.items():
36 | globals()[test_class_name] = test_class
37 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_llama.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from transformers.models.llama.configuration_llama import LlamaConfig
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import TextAdapterTestBase
7 | from .generator import generate_method_tests
8 | from .method_test_impl.core.test_adapter_conversion import ModelClassConversionTestMixin
9 | from .method_test_impl.utils import make_config
10 |
11 |
12 | class LlamaAdapterTestBase(TextAdapterTestBase):
13 | config_class = LlamaConfig
14 | config = make_config(
15 | LlamaConfig,
16 | hidden_size=32,
17 | num_hidden_layers=5,
18 | num_attention_heads=4,
19 | intermediate_size=37,
20 | hidden_act="gelu",
21 | pad_token_id=0,
22 | )
23 | tokenizer_name = "openlm-research/open_llama_13b"
24 |
25 |
26 | method_tests = generate_method_tests(LlamaAdapterTestBase, not_supported=["PromptTuning"])
27 |
28 | for test_class_name, test_class in method_tests.items():
29 | globals()[test_class_name] = test_class
30 |
31 |
32 | @require_torch
33 | class ClassConversion(
34 | ModelClassConversionTestMixin,
35 | LlamaAdapterTestBase,
36 | unittest.TestCase,
37 | ):
38 | def test_conversion_question_answering_model(self):
39 | raise self.skipTest("We don't support the Llama QA model.")
40 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_mbart.py:
--------------------------------------------------------------------------------
1 | from transformers import MBartConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class MBartAdapterTestBase(TextAdapterTestBase):
9 | config_class = MBartConfig
10 | config = make_config(
11 | MBartConfig,
12 | d_model=16,
13 | encoder_layers=2,
14 | decoder_layers=2,
15 | encoder_attention_heads=4,
16 | decoder_attention_heads=4,
17 | encoder_ffn_dim=4,
18 | decoder_ffn_dim=4,
19 | vocab_size=250027,
20 | )
21 | tokenizer_name = "facebook/mbart-large-cc25"
22 |
23 | def test_parallel_training_lora(self):
24 | self.skipTest("Not supported for MBart")
25 |
26 |
27 | method_tests = generate_method_tests(
28 | MBartAdapterTestBase, redundant=["ConfigUnion", "Embeddings"], not_supported=["PromptTuning"]
29 | )
30 | for test_class_name, test_class in method_tests.items():
31 | globals()[test_class_name] = test_class
32 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_mistral.py:
--------------------------------------------------------------------------------
1 | from transformers.models.mistral.configuration_mistral import MistralConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class MistralAdapterTestBase(TextAdapterTestBase):
9 | config_class = MistralConfig
10 | config = make_config(
11 | MistralConfig,
12 | hidden_size=32,
13 | num_hidden_layers=5,
14 | num_attention_heads=8,
15 | intermediate_size=37,
16 | hidden_act="gelu",
17 | hidden_dropout_prob=0.1,
18 | pad_token_id=0,
19 | )
20 | tokenizer_name = "HuggingFaceH4/zephyr-7b-beta"
21 |
22 |
23 | test_methods = generate_method_tests(MistralAdapterTestBase, not_supported=["PromptTuning", "ConfigUnion"])
24 |
25 | for test_class_name, test_class in test_methods.items():
26 | globals()[test_class_name] = test_class
27 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_mt5.py:
--------------------------------------------------------------------------------
1 | from transformers import MT5Config
2 | from transformers.testing_utils import require_torch
3 |
4 | from .base import TextAdapterTestBase
5 | from .generator import generate_method_tests
6 | from .method_test_impl.utils import make_config
7 |
8 |
9 | @require_torch
10 | class MT5AdapterTestBase(TextAdapterTestBase):
11 | config_class = MT5Config
12 | config = make_config(
13 | MT5Config,
14 | d_model=16,
15 | num_layers=2,
16 | num_decoder_layers=2,
17 | num_heads=4,
18 | d_ff=4,
19 | d_kv=16 // 4,
20 | tie_word_embeddings=False,
21 | decoder_start_token_id=0,
22 | )
23 | tokenizer_name = "google/mt5-base"
24 |
25 |
26 | method_tests = generate_method_tests(MT5AdapterTestBase, not_supported=["PromptTuning", "ConfigUnion"])
27 |
28 | for test_name, test_class in method_tests.items():
29 | globals()[test_name] = test_class
30 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_plbart.py:
--------------------------------------------------------------------------------
1 | from transformers import PLBartConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class PLBartAdapterTestBase(TextAdapterTestBase):
9 | config_class = PLBartConfig
10 | config = make_config(
11 | PLBartConfig,
12 | d_model=32,
13 | encoder_layers=2,
14 | decoder_layers=2,
15 | encoder_attention_heads=4,
16 | decoder_attention_heads=4,
17 | encoder_ffn_dim=4,
18 | decoder_ffn_dim=4,
19 | scale_embedding=False, # Required for embedding tests
20 | )
21 | tokenizer_name = "uclanlp/plbart-base"
22 |
23 |
24 | method_tests = generate_method_tests(PLBartAdapterTestBase, not_supported=["PromptTuning"])
25 |
26 | for test_name, test_class in method_tests.items():
27 | globals()[test_name] = test_class
28 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_roberta.py:
--------------------------------------------------------------------------------
1 | from transformers import RobertaConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class RobertaAdapterTestBase(TextAdapterTestBase):
9 | config_class = RobertaConfig
10 | config = make_config(
11 | RobertaConfig,
12 | hidden_size=32,
13 | num_hidden_layers=4,
14 | num_attention_heads=4,
15 | intermediate_size=37,
16 | vocab_size=50265,
17 | )
18 | tokenizer_name = "roberta-base"
19 |
20 |
21 | method_tests = generate_method_tests(RobertaAdapterTestBase)
22 |
23 | for test_name, test_class in method_tests.items():
24 | globals()[test_name] = test_class
25 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_t5.py:
--------------------------------------------------------------------------------
1 | from transformers import T5Config
2 | from transformers.testing_utils import require_torch
3 |
4 | from .base import TextAdapterTestBase
5 | from .generator import generate_method_tests
6 | from .method_test_impl.utils import make_config
7 |
8 |
9 | @require_torch
10 | class T5AdapterTestBase(TextAdapterTestBase):
11 | config_class = T5Config
12 | config = make_config(
13 | T5Config,
14 | d_model=16,
15 | num_layers=2,
16 | num_decoder_layers=2,
17 | num_heads=4,
18 | d_ff=4,
19 | d_kv=16 // 4,
20 | tie_word_embeddings=False,
21 | decoder_start_token_id=0,
22 | )
23 | tokenizer_name = "t5-base"
24 |
25 |
26 | method_tests = generate_method_tests(T5AdapterTestBase, not_supported=["ConfigUnion", "PromptTuning"])
27 | for test_class_name, test_class in method_tests.items():
28 | globals()[test_class_name] = test_class
29 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_vit.py:
--------------------------------------------------------------------------------
1 | from transformers import ViTConfig
2 |
3 | from .base import VisionAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class ViTAdapterTestBase(VisionAdapterTestBase):
9 | config_class = ViTConfig
10 | config = make_config(
11 | ViTConfig,
12 | image_size=224,
13 | hidden_size=32,
14 | num_hidden_layers=4,
15 | num_attention_heads=4,
16 | intermediate_size=37,
17 | )
18 | feature_extractor_name = "google/vit-base-patch16-224-in21k"
19 |
20 |
21 | method_tests = generate_method_tests(ViTAdapterTestBase, not_supported=["ConfigUnion", "Embeddings", "Composition"])
22 | for test_class_name, test_class in method_tests.items():
23 | globals()[test_class_name] = test_class
24 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_whisper.py:
--------------------------------------------------------------------------------
1 | from transformers import WhisperConfig
2 |
3 | from .base import AudioAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class WhisperAdapterTestBase(AudioAdapterTestBase):
9 | config_class = WhisperConfig
10 | config = make_config(
11 | WhisperConfig,
12 | d_model=32,
13 | encoder_layers=2,
14 | decoder_layers=2,
15 | encoder_attention_heads=4,
16 | decoder_attention_heads=4,
17 | encoder_ffn_dim=4,
18 | decoder_ffn_dim=4,
19 | vocab_size=51865,
20 | )
21 | tokenizer_name = "openai/whisper-small"
22 | sampling_rate = 16000
23 | decoder_start_token_id = 50257
24 |
25 | def test_parallel_training_lora(self):
26 | self.skipTest("Not supported for Whisper")
27 |
28 |
29 | method_tests = generate_method_tests(WhisperAdapterTestBase, not_supported=["PromptTuning"])
30 | for test_class_name, test_class in method_tests.items():
31 | globals()[test_class_name] = test_class
32 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_xlm_roberta.py:
--------------------------------------------------------------------------------
1 | from transformers import XLMRobertaConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class XLMRobertaAdapterTestBase(TextAdapterTestBase):
9 | config_class = XLMRobertaConfig
10 | config = make_config(
11 | XLMRobertaConfig,
12 | hidden_size=32,
13 | num_hidden_layers=4,
14 | num_attention_heads=4,
15 | intermediate_size=37,
16 | vocab_size=250002,
17 | )
18 | tokenizer_name = "xlm-roberta-base"
19 |
20 |
21 | method_tests = generate_method_tests(XLMRobertaAdapterTestBase, redundant=["ConfigUnion", "Embeddings"])
22 | for test_class_name, test_class in method_tests.items():
23 | globals()[test_class_name] = test_class
24 |
--------------------------------------------------------------------------------
/tests/test_methods/test_on_xmod.py:
--------------------------------------------------------------------------------
1 | from transformers import XmodConfig
2 |
3 | from .base import TextAdapterTestBase
4 | from .generator import generate_method_tests
5 | from .method_test_impl.utils import make_config
6 |
7 |
8 | class XmodAdapterTestBase(TextAdapterTestBase):
9 | config_class = XmodConfig
10 | config = make_config(
11 | XmodConfig,
12 | hidden_size=32,
13 | num_hidden_layers=4,
14 | num_attention_heads=4,
15 | intermediate_size=37,
16 | vocab_size=250002,
17 | max_position_embeddings=512,
18 | default_language="en_XX",
19 | )
20 | tokenizer_name = "xlm-roberta-base"
21 |
22 |
23 | method_tests = generate_method_tests(XmodAdapterTestBase, not_supported=["ConfigUnion", "Embeddings"])
24 | for test_class_name, test_class in method_tests.items():
25 | globals()[test_class_name] = test_class
26 |
--------------------------------------------------------------------------------
/tests/test_misc/test_adapter_fusion_config.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from dataclasses import FrozenInstanceError
3 |
4 | from adapters import ADAPTERFUSION_CONFIG_MAP, AdapterFusionConfig
5 | from transformers.testing_utils import require_torch
6 |
7 |
8 | @require_torch
9 | class AdapterFusionConfigTest(unittest.TestCase):
10 |
11 | config_names = ADAPTERFUSION_CONFIG_MAP.keys()
12 |
13 | def test_config_load(self):
14 | for config_name in self.config_names:
15 | with self.subTest(config_name=config_name):
16 | config = AdapterFusionConfig.load(config_name, temperature=True)
17 | self.assertTrue(isinstance(config, AdapterFusionConfig))
18 | self.assertEqual(config.temperature, True)
19 |
20 | def test_config_immutable(self):
21 | def set_attr(config: AdapterFusionConfig):
22 | config.temperature = True
23 |
24 | for config in ADAPTERFUSION_CONFIG_MAP.values():
25 | with self.subTest(config=config.__class__.__name__):
26 | self.assertRaises(FrozenInstanceError, lambda: set_attr(config))
27 |
28 | def test_custom_attr(self):
29 | for config in ADAPTERFUSION_CONFIG_MAP.values():
30 | with self.subTest(config=config.__class__.__name__):
31 | config.dummy_attr = "test_value"
32 | self.assertEqual(config.dummy_attr, "test_value")
33 |
--------------------------------------------------------------------------------
/tests/test_misc/test_adapter_trainer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_misc/test_adapter_trainer/__init__.py
--------------------------------------------------------------------------------
/tests/test_models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/adapter-hub/adapters/26140fcca9f48c9c2c684b63ebfc6e43c05b5fd6/tests/test_models/__init__.py
--------------------------------------------------------------------------------
/tests/test_models/base.py:
--------------------------------------------------------------------------------
1 | from transformers.testing_utils import require_torch
2 |
3 |
4 | @require_torch
5 | class AdapterModelTesterMixin:
6 | @property
7 | def all_generative_model_classes(self):
8 | return tuple() # AdapterModel classes are not generative as is (ie without a LM head)
9 |
10 | def test_training(self):
11 | self.skipTest("Not applicable.")
12 |
13 | def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None):
14 | self.skipTest("Not applicable.")
15 |
16 | def test_training_gradient_checkpointing(self):
17 | self.skipTest("Not applicable.")
18 |
19 | def test_correct_missing_keys(self):
20 | self.skipTest("Not applicable.")
21 |
22 | def test_generation_tester_mixin_inheritance(self):
23 | self.skipTest("Not applicable.") # AdapterModel classes are not generative as is (ie without a LM head)
24 |
--------------------------------------------------------------------------------
/tests/test_models/test_albert_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import AlbertAdapterModel
3 | from hf_transformers.tests.models.albert.test_modeling_albert import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class AlbertAdapterModelTest(AdapterModelTesterMixin, AlbertModelTest):
11 | all_model_classes = (AlbertAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_bart_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import BartAdapterModel
3 | from hf_transformers.tests.models.bart.test_modeling_bart import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class BartAdapterModelTest(AdapterModelTesterMixin, BartModelTest):
11 | all_model_classes = (BartAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_beit_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import BeitAdapterModel
3 | from hf_transformers.tests.models.beit.test_modeling_beit import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class BeitAdapterModelTest(AdapterModelTesterMixin, BeitModelTest):
11 | all_model_classes = (BeitAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_bert_generation_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import BertGenerationAdapterModel
3 | from hf_transformers.tests.models.bert_generation.test_modeling_bert_generation import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class BertGenerationAdapterModelTest(AdapterModelTesterMixin, BertGenerationEncoderTest):
11 | all_model_classes = (BertGenerationAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_bert_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import BertAdapterModel
3 | from hf_transformers.tests.models.bert.test_modeling_bert import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class BertAdapterModelTest(AdapterModelTesterMixin, BertModelTest):
11 | all_model_classes = (BertAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_debertaV2_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import DebertaV2AdapterModel
3 | from hf_transformers.tests.models.deberta_v2.test_modeling_deberta_v2 import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class DebertaV2AdapterModelTest(AdapterModelTesterMixin, DebertaV2ModelTest):
11 | all_model_classes = (DebertaV2AdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_deberta_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import DebertaAdapterModel
3 | from hf_transformers.tests.models.deberta.test_modeling_deberta import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class DebertaAdapterModelTest(AdapterModelTesterMixin, DebertaModelTest):
11 | all_model_classes = (DebertaAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_distilbert_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import DistilBertAdapterModel
3 | from hf_transformers.tests.models.distilbert.test_modeling_distilbert import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class DistilBertAdapterModelTest(AdapterModelTesterMixin, DistilBertModelTest):
11 | all_model_classes = (DistilBertAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_electra_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import ElectraAdapterModel
3 | from hf_transformers.tests.models.electra.test_modeling_electra import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class ElectraAdapterModelTest(AdapterModelTesterMixin, ElectraModelTester):
11 | all_model_classes = (ElectraAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_encoder_decoder_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | from hf_transformers.tests.models.encoder_decoder.test_modeling_encoder_decoder import * # Imported to execute model tests
3 |
--------------------------------------------------------------------------------
/tests/test_models/test_gpt2_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import GPT2AdapterModel
3 | from hf_transformers.tests.models.gpt2.test_modeling_gpt2 import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class GPT2AdapterModelTest(AdapterModelTesterMixin, GPT2ModelTest):
11 | all_model_classes = (GPT2AdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_gptj_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import GPTJAdapterModel
3 | from hf_transformers.tests.models.gptj.test_modeling_gptj import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class GPTJAdapterModelTest(AdapterModelTesterMixin, GPTJModelTest):
11 | all_model_classes = (GPTJAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_llama_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import LlamaAdapterModel
3 | from hf_transformers.tests.models.llama.test_modeling_llama import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class LlamaAdapterModelTest(AdapterModelTesterMixin, LlamaModelTest):
11 | all_model_classes = (LlamaAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_mbart_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import MBartAdapterModel
3 | from hf_transformers.tests.models.mbart.test_modeling_mbart import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class MBartAdapterModelTest(AdapterModelTesterMixin, MBartModelTest):
11 | all_model_classes = (MBartAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_mistral_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import MistralAdapterModel
3 | from hf_transformers.tests.models.mistral.test_modeling_mistral import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class MistralAdapterModelTest(AdapterModelTesterMixin, MistralModelTest):
11 | all_model_classes = (MistralAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_mt5_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import MT5AdapterModel
3 | from hf_transformers.tests.models.mt5.test_modeling_mt5 import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class MT5AdapterModelTest(AdapterModelTesterMixin, MT5IntegrationTest):
11 | all_model_classes = (MT5AdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_plbart_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import PLBartAdapterModel
3 | from hf_transformers.tests.models.plbart.test_modeling_plbart import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class PLBartAdapterModelTest(AdapterModelTesterMixin, PLBartModelTest):
11 | all_model_classes = (PLBartAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_roberta_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import RobertaAdapterModel
3 | from hf_transformers.tests.models.roberta.test_modeling_roberta import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class RobertaAdapterModelTest(AdapterModelTesterMixin, RobertaModelTest):
11 | all_model_classes = (RobertaAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_t5_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import T5AdapterModel
3 | from hf_transformers.tests.models.t5.test_modeling_t5 import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class T5AdapterModelTest(AdapterModelTesterMixin, T5ModelTest):
11 | all_model_classes = (T5AdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_vit_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import ViTAdapterModel
3 | from hf_transformers.tests.models.vit.test_modeling_vit import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class ViTAdapterModelTest(AdapterModelTesterMixin, ViTModelTest):
11 | all_model_classes = (ViTAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_whisper_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import WhisperAdapterModel
3 | from hf_transformers.tests.models.whisper.test_modeling_whisper import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class WhisperAdapterModelTest(AdapterModelTesterMixin, WhisperModelTest):
11 | all_model_classes = (WhisperAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/tests/test_models/test_xlm_roberta_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | from hf_transformers.tests.models.xlm_roberta.test_modeling_xlm_roberta import * # Imported to execute model tests
3 |
--------------------------------------------------------------------------------
/tests/test_models/test_xmod_model.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: F403,F405
2 | from adapters import XmodAdapterModel
3 | from hf_transformers.tests.models.xmod.test_modeling_xmod import *
4 | from transformers.testing_utils import require_torch
5 |
6 | from .base import AdapterModelTesterMixin
7 |
8 |
9 | @require_torch
10 | class XmodAdapterModelTest(AdapterModelTesterMixin, XmodModelTest):
11 | all_model_classes = (XmodAdapterModel,)
12 | fx_compatible = False
13 |
--------------------------------------------------------------------------------
/utils/back_comp/README.md:
--------------------------------------------------------------------------------
1 | # Backwards Compatibility Tests
2 |
3 | ## Motivation
4 |
5 | This directory contains a set of tests that can be run to ensure that newly introduced changes or refactorings do not break existing functionalities. These tests verify model output consistency between two branches; here, we use the names `dev` and `main` for demonstration purposes, but these tests can be performed between any two branches where the `back_comp` directory with tests is available.
6 | For this, the test script performs a forward pass for each supported model and compares the outputs between `dev` and `main` to identify any differences.
7 |
8 | ## Requirements
9 |
10 | To execute these tests, you must meet the following requirements:
11 |
12 | - Ability to run bash scripts (in-built on Linux/macOS; for Windows, consider using third-party software like [GNU Bash](https://www.gnu.org/software/bash/)).
13 | - Git as the version control system to switch between branches.
14 | - The ability to check out the desired branch. If the branch is from another fork, you may need to add the repository as a remote. Refer to [GitHub's instructions](https://docs.github.com/en/get-started/getting-started-with-git/managing-remote-repositories) for details.
15 | - A Python virtual environment to modify the installed package version of `adapters`.
16 |
17 | ## Procedure
18 |
19 | To perform the compatibility tests, follow these steps:
20 |
21 | 1. Determine a directory where you want to save the model output generated by the tests. Save this directory path to the variable `SaveDir` in the shell script `compare.sh`. (Careful: select a directory OUTSIDE of the repository; otherwise, the saved model output is no longer available when changing the branch.)
22 | 2. Select the branch you want to compare with `main` and save its name to the variable `Branch`.
23 | 3. Make sure you are checked out in `main` before starting the test script.
24 | 4. In your command line, navigate to the `back_comp` directory and execute the script by running `sh compare.sh`.
25 |
26 | The results will be displayed in the command line for visualization.
--------------------------------------------------------------------------------
/utils/back_comp/compare.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script performs backward compatibility tests by comparing adapter versions of different branches.
4 | # The goal is to check if the model output produced under the same conditions is identical between branches.
5 | # To do this, we need to determine a directory path to save the reference output produced by the current branch.
6 | # It's important that this directory is outside the adapters repository to remain accessible when switching branches.
7 |
8 | # Select a directory to save the reference outputs (must be outside the repository!)
9 | SaveDir=""
10 |
11 | # Now, determine the branch you want to compare against.
12 | Branch=
13 |
14 | # After setting these variables, you can execute this script from the back_comp directory using the command: `sh compare.sh`
15 |
16 |
17 | cd ..
18 | pip install -e ".[dev]" # # Ensure that the adapters version of the current branch is installed
19 | cd back_comp
20 |
21 | echo "Creating reference outputs..."
22 | python create_outputs.py --path="$SaveDir"
23 | cd ..
24 |
25 |
26 | git checkout $Branch # Switch branch
27 | pip install -e ".[dev]" # Install the other adapter version
28 |
29 | cd back_comp
30 | echo "Comparing to reference outputs..."
31 | python compare_outputs.py --path="$SaveDir"
--------------------------------------------------------------------------------
/utils/back_comp/compare_outputs.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | from Utils import (
5 | compare_lists_close,
6 | convert_tensors_to_list,
7 | create_output,
8 | fix_seeds,
9 | get_model_names,
10 | get_new_adapter_config_strings,
11 | load_model,
12 | restore_from_jsonl,
13 | )
14 |
15 |
16 | parser = argparse.ArgumentParser()
17 | parser.add_argument("--path", type=str)
18 | args = parser.parse_args()
19 |
20 |
21 | # Create the root path
22 | base_dir = os.path.join(args.path, "model_outputs")
23 | fix_seeds()
24 |
25 | for model_name in get_model_names():
26 | # Load the reference model
27 | print(f"Model = {model_name}")
28 | model_dir = os.path.join(base_dir, model_name)
29 | model = load_model(model_name, os.path.join(model_dir, "model_weights"))
30 |
31 | for adapter_config in get_new_adapter_config_strings():
32 | # Create a new model output
33 | adapter_name = model.load_adapter(os.path.join(model_dir, "weights_" + adapter_config))
34 | model.set_active_adapters(adapter_name)
35 | model_output = create_output(model, model_name)
36 |
37 | # Compare the model output to the reference output
38 | model_output_n, last_hidden_state = convert_tensors_to_list(model_output)
39 | ref_output = restore_from_jsonl(config=adapter_config, file_path=os.path.join(model_dir, "output.jsonl"))
40 | is_equal = compare_lists_close(ref_output, model_output_n, rtol=1e-05, atol=1e-08)
41 | print(f"Adapter: {adapter_config} -> {is_equal}")
42 |
43 | model.delete_adapter(adapter_name)
44 |
--------------------------------------------------------------------------------
/utils/back_comp/create_outputs.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | from adapters import AutoAdapterModel, CompacterConfig, CompacterPlusPlusConfig
5 | from Utils import (
6 | convert_tensors_to_list,
7 | create_model,
8 | create_output,
9 | fix_seeds,
10 | get_model_names,
11 | get_new_adapter_config_strings,
12 | load_model,
13 | save_to_jsonl,
14 | )
15 |
16 |
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument("--path", type=str)
19 | args = parser.parse_args()
20 |
21 |
22 | # Create the root path
23 | base_dir = os.path.join(args.path, "model_outputs")
24 | fix_seeds()
25 |
26 | for model_name in get_model_names():
27 | print(f"Model = {model_name}")
28 | # Create the dir to contain model- and adapter-weights and model outputs
29 | model_dir = os.path.join(base_dir, model_name)
30 | os.makedirs(model_dir, exist_ok=True)
31 |
32 | model = create_model(model_name=model_name, model_class=AutoAdapterModel)
33 | # Save the model weights to reuse later
34 | model_save_dir = os.path.join(model_dir, "model_weights")
35 | os.makedirs(model_save_dir, exist_ok=True)
36 | model.save_pretrained(model_save_dir, from_pt=True) # save the base model
37 |
38 | for config in get_new_adapter_config_strings():
39 | # Load the reference model
40 | model = load_model(model_name, os.path.join(model_dir, "model_weights"))
41 |
42 | # Add the adapter which is tested
43 | # For the compacter style adapters the phm_dim and reduction factor are set manually to ensure that the bottleneck dimension is divisible by phm_dim
44 | if config == "compacter++":
45 | adapter_config = CompacterPlusPlusConfig(phm_dim=2, reduction_factor=8)
46 | elif config == "compacter":
47 | adapter_config = CompacterConfig(phm_dim=2, reduction_factor=8)
48 | else:
49 | adapter_config = config
50 | adapter_name = "weights_" + config
51 | model.add_adapter(adapter_name, config=adapter_config)
52 | model.set_active_adapters(adapter_name)
53 |
54 | model_output = create_output(model, model_name)
55 |
56 | # Process and save the output
57 | model_output_n, last_hidden_state = convert_tensors_to_list(model_output)
58 | save_to_jsonl(model_output_n, config, os.path.join(model_dir, "output.jsonl"))
59 |
60 | # Save the adapter weights
61 | adapter_save_dir = os.path.join(model_dir, adapter_name)
62 | os.makedirs(adapter_save_dir, exist_ok=True)
63 | model.save_adapter(save_directory=adapter_save_dir, adapter_name=adapter_name)
64 | model.delete_adapter(adapter_name)
65 |
--------------------------------------------------------------------------------
/utils/check_inits.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | import sys
3 | from os.path import abspath, dirname, join
4 |
5 |
6 | sys.path.insert(1, abspath(join(dirname(dirname(__file__)), "hf_transformers")))
7 |
8 | import utils
9 | from utils.check_inits import check_all_inits
10 |
11 |
12 | utils.check_inits.PATH_TO_TRANSFORMERS = "src/adapters"
13 | check_all_inits()
14 |
--------------------------------------------------------------------------------
/utils/custom_init_isort.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | import argparse
3 | import sys
4 | from os.path import abspath, dirname, join
5 |
6 |
7 | sys.path.insert(1, abspath(join(dirname(dirname(__file__)), "hf_transformers")))
8 |
9 | import utils
10 | from utils.custom_init_isort import sort_imports_in_all_inits
11 |
12 |
13 | utils.custom_init_isort.PATH_TO_TRANSFORMERS = "src/adapters"
14 |
15 |
16 | if __name__ == "__main__":
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
19 | args = parser.parse_args()
20 |
21 | sort_imports_in_all_inits(check_only=args.check_only)
22 |
--------------------------------------------------------------------------------
/utils/sort_auto_mappings.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | import argparse
3 | import sys
4 | from os.path import abspath, dirname, join
5 |
6 |
7 | sys.path.insert(1, abspath(join(dirname(dirname(__file__)), "hf_transformers")))
8 |
9 | import utils
10 | from utils.sort_auto_mappings import sort_all_auto_mappings
11 |
12 |
13 | utils.sort_auto_mappings.PATH_TO_AUTO_MODULE = "src/adapters/models/auto"
14 |
15 |
16 | if __name__ == "__main__":
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
19 | args = parser.parse_args()
20 |
21 | sort_all_auto_mappings(not args.check_only)
22 |
--------------------------------------------------------------------------------