├── .gitattributes ├── .github └── workflows │ ├── check_code_quality.yml │ ├── check_templates.yml │ └── show_new_templates.yml ├── .gitignore ├── API_DOCUMENTATION.md ├── CITATION.cff ├── CODEOWNERS ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── assets ├── PromptSource ACL Demo Figure.png └── promptsource_app.png ├── promptsource ├── __init__.py ├── app.py ├── session.py ├── templates.py ├── templates │ ├── Zaid │ │ ├── coqa_expanded │ │ │ └── templates.yaml │ │ └── quac_expanded │ │ │ └── templates.yaml │ ├── acronym_identification │ │ └── templates.yaml │ ├── ade_corpus_v2 │ │ ├── Ade_corpus_v2_classification │ │ │ └── templates.yaml │ │ ├── Ade_corpus_v2_drug_ade_relation │ │ │ └── templates.yaml │ │ └── Ade_corpus_v2_drug_dosage_relation │ │ │ └── templates.yaml │ ├── adversarial_qa │ │ ├── adversarialQA │ │ │ └── templates.yaml │ │ ├── dbert │ │ │ └── templates.yaml │ │ ├── dbidaf │ │ │ └── templates.yaml │ │ └── droberta │ │ │ └── templates.yaml │ ├── aeslc │ │ └── templates.yaml │ ├── ag_news │ │ └── templates.yaml │ ├── ai2_arc │ │ ├── ARC-Challenge │ │ │ └── templates.yaml │ │ └── ARC-Easy │ │ │ └── templates.yaml │ ├── amazon_polarity │ │ └── templates.yaml │ ├── amazon_reviews_multi │ │ └── en │ │ │ └── templates.yaml │ ├── amazon_us_reviews │ │ └── Wireless_v1_00 │ │ │ └── templates.yaml │ ├── ambig_qa │ │ └── light │ │ │ └── templates.yaml │ ├── anli │ │ └── templates.yaml │ ├── app_reviews │ │ └── templates.yaml │ ├── aqua_rat │ │ └── raw │ │ │ └── templates.yaml │ ├── art │ │ └── templates.yaml │ ├── asnq │ │ └── templates.yaml │ ├── asset │ │ ├── ratings │ │ │ └── templates.yaml │ │ └── simplification │ │ │ └── templates.yaml │ ├── banking77 │ │ └── templates.yaml │ ├── billsum │ │ └── templates.yaml │ ├── bing_coronavirus_query_set │ │ └── templates.yaml │ ├── biosses │ │ └── templates.yaml │ ├── blbooksgenre │ │ └── title_genre_classifiction │ │ │ └── templates.yaml │ ├── blended_skill_talk │ │ └── templates.yaml │ ├── cbt │ │ ├── CN │ │ │ └── templates.yaml │ │ ├── NE │ │ │ └── templates.yaml │ │ ├── P │ │ │ └── templates.yaml │ │ ├── V │ │ │ └── templates.yaml │ │ └── raw │ │ │ └── templates.yaml │ ├── cc_news │ │ └── templates.yaml │ ├── circa │ │ └── templates.yaml │ ├── climate_fever │ │ └── templates.yaml │ ├── cnn_dailymail │ │ └── 3.0.0 │ │ │ └── templates.yaml │ ├── codah │ │ ├── codah │ │ │ └── templates.yaml │ │ ├── fold_0 │ │ │ └── templates.yaml │ │ ├── fold_1 │ │ │ └── templates.yaml │ │ ├── fold_2 │ │ │ └── templates.yaml │ │ ├── fold_3 │ │ │ └── templates.yaml │ │ └── fold_4 │ │ │ └── templates.yaml │ ├── code_x_glue_tc_text_to_code │ │ └── templates.yaml │ ├── common_gen │ │ └── templates.yaml │ ├── commonsense_qa │ │ └── templates.yaml │ ├── conv_ai │ │ └── templates.yaml │ ├── conv_ai_2 │ │ └── templates.yaml │ ├── conv_ai_3 │ │ └── templates.yaml │ ├── coqa │ │ └── templates.yaml │ ├── cord19 │ │ └── metadata │ │ │ └── templates.yaml │ ├── cos_e │ │ ├── v1.0 │ │ │ └── templates.yaml │ │ └── v1.11 │ │ │ └── templates.yaml │ ├── cosmos_qa │ │ └── templates.yaml │ ├── covid_qa_castorini │ │ └── templates.yaml │ ├── craffel │ │ └── openai_lambada │ │ │ └── templates.yaml │ ├── craigslist_bargains │ │ └── templates.yaml │ ├── crows_pairs │ │ └── templates.yaml │ ├── dbpedia_14 │ │ └── templates.yaml │ ├── discofuse │ │ ├── discofuse-sport │ │ │ └── templates.yaml │ │ └── discofuse-wikipedia │ │ │ └── templates.yaml │ ├── discovery │ │ └── discovery │ │ │ └── templates.yaml │ ├── docred │ │ └── templates.yaml │ ├── dream │ │ └── templates.yaml │ ├── drop │ │ └── templates.yaml │ ├── duorc │ │ ├── ParaphraseRC │ │ │ └── templates.yaml │ │ └── SelfRC │ │ │ └── templates.yaml │ ├── e2e_nlg_cleaned │ │ └── templates.yaml │ ├── ecthr_cases │ │ └── alleged-violation-prediction │ │ │ └── templates.yaml │ ├── emo │ │ └── templates.yaml │ ├── emotion │ │ └── templates.yaml │ ├── enriched_web_nlg │ │ └── en │ │ │ └── templates.yaml │ ├── esnli │ │ └── templates.yaml │ ├── evidence_infer_treatment │ │ ├── 1.1 │ │ │ └── templates.yaml │ │ └── 2.0 │ │ │ └── templates.yaml │ ├── fever │ │ ├── v1.0 │ │ │ └── templates.yaml │ │ └── v2.0 │ │ │ └── templates.yaml │ ├── financial_phrasebank │ │ └── sentences_allagree │ │ │ └── templates.yaml │ ├── freebase_qa │ │ └── templates.yaml │ ├── generated_reviews_enth │ │ └── templates.yaml │ ├── gigaword │ │ └── templates.yaml │ ├── glue │ │ ├── ax │ │ │ └── templates.yaml │ │ ├── cola │ │ │ └── templates.yaml │ │ ├── mnli │ │ │ └── templates.yaml │ │ ├── mnli_matched │ │ │ └── templates.yaml │ │ ├── mnli_mismatched │ │ │ └── templates.yaml │ │ ├── mrpc │ │ │ └── templates.yaml │ │ ├── qnli │ │ │ └── templates.yaml │ │ ├── qqp │ │ │ └── templates.yaml │ │ ├── rte │ │ │ └── templates.yaml │ │ ├── sst2 │ │ │ └── templates.yaml │ │ ├── stsb │ │ │ └── templates.yaml │ │ └── wnli │ │ │ └── templates.yaml │ ├── google_wellformed_query │ │ └── templates.yaml │ ├── great_code │ │ └── templates.yaml │ ├── guardian_authorship │ │ ├── cross_genre_1 │ │ │ └── templates.yaml │ │ ├── cross_topic_1 │ │ │ └── templates.yaml │ │ ├── cross_topic_4 │ │ │ └── templates.yaml │ │ └── cross_topic_7 │ │ │ └── templates.yaml │ ├── gutenberg_time │ │ └── templates.yaml │ ├── hans │ │ └── templates.yaml │ ├── hate_speech18 │ │ └── templates.yaml │ ├── head_qa │ │ └── en │ │ │ └── templates.yaml │ ├── health_fact │ │ └── templates.yaml │ ├── hellaswag │ │ └── templates.yaml │ ├── hlgd │ │ └── templates.yaml │ ├── hotpot_qa │ │ ├── distractor │ │ │ └── templates.yaml │ │ └── fullwiki │ │ │ └── templates.yaml │ ├── humicroedit │ │ ├── subtask-1 │ │ │ └── templates.yaml │ │ └── subtask-2 │ │ │ └── templates.yaml │ ├── hyperpartisan_news_detection │ │ ├── byarticle │ │ │ └── templates.yaml │ │ └── bypublisher │ │ │ └── templates.yaml │ ├── imdb │ │ └── templates.yaml │ ├── jfleg │ │ └── templates.yaml │ ├── jigsaw_unintended_bias │ │ └── templates.yaml │ ├── kelm │ │ └── templates.yaml │ ├── kilt_tasks │ │ ├── hotpotqa │ │ │ └── templates.yaml │ │ └── nq │ │ │ └── templates.yaml │ ├── lama │ │ └── trex │ │ │ └── templates.yaml │ ├── lambada │ │ └── templates.yaml │ ├── liar │ │ └── templates.yaml │ ├── limit │ │ └── templates.yaml │ ├── math_dataset │ │ ├── algebra__linear_1d │ │ │ └── templates.yaml │ │ ├── algebra__linear_1d_composed │ │ │ └── templates.yaml │ │ ├── algebra__linear_2d │ │ │ └── templates.yaml │ │ └── algebra__linear_2d_composed │ │ │ └── templates.yaml │ ├── math_qa │ │ └── templates.yaml │ ├── mc_taco │ │ └── templates.yaml │ ├── mdd │ │ ├── task1_qa │ │ │ └── templates.yaml │ │ ├── task2_recs │ │ │ └── templates.yaml │ │ └── task3_qarecs │ │ │ └── templates.yaml │ ├── medal │ │ └── templates.yaml │ ├── medical_questions_pairs │ │ └── templates.yaml │ ├── meta_woz │ │ └── dialogues │ │ │ └── templates.yaml │ ├── mocha │ │ └── templates.yaml │ ├── movie_rationales │ │ └── templates.yaml │ ├── multi_news │ │ └── templates.yaml │ ├── multi_nli │ │ └── templates.yaml │ ├── multi_x_science_sum │ │ └── templates.yaml │ ├── mwsc │ │ └── templates.yaml │ ├── narrativeqa │ │ └── templates.yaml │ ├── ncbi_disease │ │ └── templates.yaml │ ├── neural_code_search │ │ └── evaluation_dataset │ │ │ └── templates.yaml │ ├── newspop │ │ └── templates.yaml │ ├── nlu_evaluation_data │ │ └── templates.yaml │ ├── nq_open │ │ └── templates.yaml │ ├── numer_sense │ │ └── templates.yaml │ ├── onestop_english │ │ └── templates.yaml │ ├── openai_humaneval │ │ └── templates.yaml │ ├── openbookqa │ │ ├── additional │ │ │ └── templates.yaml │ │ └── main │ │ │ └── templates.yaml │ ├── paws-x │ │ └── en │ │ │ └── templates.yaml │ ├── paws │ │ ├── labeled_final │ │ │ └── templates.yaml │ │ ├── labeled_swap │ │ │ └── templates.yaml │ │ └── unlabeled_final │ │ │ └── templates.yaml │ ├── piqa │ │ └── templates.yaml │ ├── poem_sentiment │ │ └── templates.yaml │ ├── pubmed_qa │ │ └── pqa_labeled │ │ │ └── templates.yaml │ ├── qa_srl │ │ └── templates.yaml │ ├── qa_zre │ │ └── templates.yaml │ ├── qasc │ │ └── templates.yaml │ ├── qed │ │ └── templates.yaml │ ├── quac │ │ └── templates.yaml │ ├── quail │ │ └── templates.yaml │ ├── quarel │ │ └── templates.yaml │ ├── quartz │ │ └── templates.yaml │ ├── quora │ │ └── templates.yaml │ ├── quoref │ │ └── templates.yaml │ ├── race │ │ ├── all │ │ │ └── templates.yaml │ │ ├── high │ │ │ └── templates.yaml │ │ └── middle │ │ │ └── templates.yaml │ ├── riddle_sense │ │ └── templates.yaml │ ├── ropes │ │ └── templates.yaml │ ├── rotten_tomatoes │ │ └── templates.yaml │ ├── samsum │ │ └── templates.yaml │ ├── scan │ │ ├── addprim_jump │ │ │ └── templates.yaml │ │ ├── addprim_turn_left │ │ │ └── templates.yaml │ │ ├── filler_num0 │ │ │ └── templates.yaml │ │ ├── filler_num1 │ │ │ └── templates.yaml │ │ ├── filler_num2 │ │ │ └── templates.yaml │ │ ├── filler_num3 │ │ │ └── templates.yaml │ │ ├── length │ │ │ └── templates.yaml │ │ ├── simple │ │ │ └── templates.yaml │ │ ├── template_around_right │ │ │ └── templates.yaml │ │ ├── template_jump_around_right │ │ │ └── templates.yaml │ │ ├── template_opposite_right │ │ │ └── templates.yaml │ │ └── template_right │ │ │ └── templates.yaml │ ├── scicite │ │ └── templates.yaml │ ├── scientific_papers │ │ ├── arxiv │ │ │ └── templates.yaml │ │ └── pubmed │ │ │ └── templates.yaml │ ├── sciq │ │ └── templates.yaml │ ├── scitail │ │ ├── snli_format │ │ │ └── templates.yaml │ │ └── tsv_format │ │ │ └── templates.yaml │ ├── scitldr │ │ └── Abstract │ │ │ └── templates.yaml │ ├── selqa │ │ └── answer_selection_analysis │ │ │ └── templates.yaml │ ├── sem_eval_2010_task_8 │ │ └── templates.yaml │ ├── sem_eval_2014_task_1 │ │ └── templates.yaml │ ├── sent_comp │ │ └── templates.yaml │ ├── sick │ │ └── templates.yaml │ ├── sms_spam │ │ └── templates.yaml │ ├── snips_built_in_intents │ │ └── templates.yaml │ ├── snli │ │ └── templates.yaml │ ├── social_i_qa │ │ └── templates.yaml │ ├── species_800 │ │ └── templates.yaml │ ├── squad │ │ └── templates.yaml │ ├── squad_adversarial │ │ └── AddSent │ │ │ └── templates.yaml │ ├── squad_v2 │ │ └── templates.yaml │ ├── squadshifts │ │ ├── amazon │ │ │ └── templates.yaml │ │ ├── new_wiki │ │ │ └── templates.yaml │ │ └── nyt │ │ │ └── templates.yaml │ ├── sst │ │ └── default │ │ │ └── templates.yaml │ ├── story_cloze │ │ └── 2016 │ │ │ └── templates.yaml │ ├── stsb_multi_mt │ │ └── en │ │ │ └── templates.yaml │ ├── subjqa │ │ ├── books │ │ │ └── templates.yaml │ │ ├── electronics │ │ │ └── templates.yaml │ │ ├── grocery │ │ │ └── templates.yaml │ │ ├── movies │ │ │ └── templates.yaml │ │ ├── restaurants │ │ │ └── templates.yaml │ │ └── tripadvisor │ │ │ └── templates.yaml │ ├── super_glue │ │ ├── axb │ │ │ └── templates.yaml │ │ ├── axg │ │ │ └── templates.yaml │ │ ├── boolq │ │ │ └── templates.yaml │ │ ├── cb │ │ │ └── templates.yaml │ │ ├── copa │ │ │ └── templates.yaml │ │ ├── multirc │ │ │ └── templates.yaml │ │ ├── record │ │ │ └── templates.yaml │ │ ├── rte │ │ │ └── templates.yaml │ │ ├── wic │ │ │ └── templates.yaml │ │ └── wsc.fixed │ │ │ └── templates.yaml │ ├── swag │ │ └── regular │ │ │ └── templates.yaml │ ├── tab_fact │ │ └── tab_fact │ │ │ └── templates.yaml │ ├── tmu_gfm_dataset │ │ └── templates.yaml │ ├── trec │ │ └── templates.yaml │ ├── trivia_qa │ │ └── unfiltered │ │ │ └── templates.yaml │ ├── turk │ │ └── templates.yaml │ ├── tweet_eval │ │ ├── emoji │ │ │ └── templates.yaml │ │ ├── emotion │ │ │ └── templates.yaml │ │ ├── hate │ │ │ └── templates.yaml │ │ ├── irony │ │ │ └── templates.yaml │ │ ├── offensive │ │ │ └── templates.yaml │ │ ├── sentiment │ │ │ └── templates.yaml │ │ ├── stance_abortion │ │ │ └── templates.yaml │ │ ├── stance_atheism │ │ │ └── templates.yaml │ │ ├── stance_climate │ │ │ └── templates.yaml │ │ ├── stance_feminist │ │ │ └── templates.yaml │ │ └── stance_hillary │ │ │ └── templates.yaml │ ├── tydiqa │ │ ├── primary_task │ │ │ └── templates.yaml │ │ └── secondary_task │ │ │ └── templates.yaml │ ├── web_questions │ │ └── templates.yaml │ ├── wiki_bio │ │ └── templates.yaml │ ├── wiki_hop │ │ ├── masked │ │ │ └── templates.yaml │ │ └── original │ │ │ └── templates.yaml │ ├── wiki_qa │ │ └── templates.yaml │ ├── wiki_split │ │ └── templates.yaml │ ├── wino_bias │ │ ├── type1_anti │ │ │ └── templates.yaml │ │ ├── type1_pro │ │ │ └── templates.yaml │ │ ├── type2_anti │ │ │ └── templates.yaml │ │ └── type2_pro │ │ │ └── templates.yaml │ ├── winograd_wsc │ │ ├── wsc273 │ │ │ └── templates.yaml │ │ └── wsc285 │ │ │ └── templates.yaml │ ├── winogrande │ │ ├── winogrande_debiased │ │ │ └── templates.yaml │ │ ├── winogrande_l │ │ │ └── templates.yaml │ │ ├── winogrande_m │ │ │ └── templates.yaml │ │ ├── winogrande_s │ │ │ └── templates.yaml │ │ ├── winogrande_xl │ │ │ └── templates.yaml │ │ └── winogrande_xs │ │ │ └── templates.yaml │ ├── wiqa │ │ └── templates.yaml │ ├── xnli │ │ └── en │ │ │ └── templates.yaml │ ├── xquad │ │ └── xquad.en │ │ │ └── templates.yaml │ ├── xquad_r │ │ └── en │ │ │ └── templates.yaml │ ├── xsum │ │ └── templates.yaml │ ├── yahoo_answers_qa │ │ └── templates.yaml │ ├── yahoo_answers_topics │ │ └── templates.yaml │ ├── yelp_polarity │ │ └── templates.yaml │ ├── yelp_review_full │ │ └── templates.yaml │ └── zest │ │ └── templates.yaml └── utils.py ├── setup.cfg ├── setup.py └── test ├── show_templates.py └── test_templates.py /.gitattributes: -------------------------------------------------------------------------------- 1 | assets/PromptSource[[:space:]]ACL[[:space:]]Demo[[:space:]]Figure.png filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.github/workflows/check_code_quality.yml: -------------------------------------------------------------------------------- 1 | name: check_code_quality 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Set up Python 3.7 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: '3.7' 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --upgrade pip 21 | pip install black isort flake8 22 | - run: black --check --line-length 119 --target-version py38 promptsource 23 | - run: isort --check-only promptsource 24 | - run: flake8 promptsource --max-line-length 119 --per-file-ignores="__init__.py:F401" 25 | -------------------------------------------------------------------------------- /.github/workflows/check_templates.yml: -------------------------------------------------------------------------------- 1 | name: check_templates 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Set up Python 3.7 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: '3.7' 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --upgrade pip 21 | pip install . 22 | - name: Check templates 23 | run: | 24 | pytest test/test_templates.py 25 | -------------------------------------------------------------------------------- /.github/workflows/show_new_templates.yml: -------------------------------------------------------------------------------- 1 | name: show_new_templates 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | workflow_dispatch: 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | timeout-minutes: 60 14 | steps: 15 | - uses: actions/checkout@v2 16 | - name: Set up Python 3.7 17 | uses: actions/setup-python@v2 18 | with: 19 | python-version: '3.7' 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install --upgrade pip 23 | pip install . 24 | pip install black isort flake8 25 | - id: files 26 | uses: jitterbit/get-changed-files@v1 27 | continue-on-error: true 28 | - name: Prompt outputs 29 | run: | 30 | for changed_file in ${{ steps.files.outputs.all }}; do 31 | python test/show_templates.py ${changed_file} 32 | done 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # OS X 2 | .DS_Store 3 | 4 | # PyCharm 5 | .idea 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | pip-wheel-metadata/ 30 | share/python-wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | db.sqlite3-journal 69 | 70 | # Flask stuff: 71 | instance/ 72 | .webassets-cache 73 | 74 | # Scrapy stuff: 75 | .scrapy 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # PyBuilder 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 101 | __pypackages__/ 102 | 103 | # Celery stuff 104 | celerybeat-schedule 105 | celerybeat.pid 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # Environments 111 | .env 112 | .venv 113 | env/ 114 | venv/ 115 | ENV/ 116 | env.bak/ 117 | venv.bak/ 118 | 119 | # Spyder project settings 120 | .spyderproject 121 | .spyproject 122 | 123 | # Rope project settings 124 | .ropeproject 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | 137 | # Locked files 138 | *.lock 139 | -------------------------------------------------------------------------------- /API_DOCUMENTATION.md: -------------------------------------------------------------------------------- 1 | # Manipulating prompts 2 | PromptSource implements 4 classes to store, manipulate and use prompts and their metadata: `Template`, `Metadata`, `DatasetTemplates` and `TemplateCollection`. All of them are implemented in [`templates.py`](promptsource/templates.py) 3 | 4 | ## Class `Template` and `Metadata` 5 | `Template` is a class that wraps a prompt, its associated metadata, and implements the helper functions to use the prompt. 6 | 7 | Instances of `Template` have the following main methods that will come handy: 8 | * `apply(example, truncate=True, highlight_variables=False)`: Create a prompted example by applying the template to the given example 9 | - `example` (Dict): the dataset example to create a prompt for 10 | - `truncate` (Bool, default to `True`): if True, example fields will be truncated to `TEXT_VAR_LENGTH` chars 11 | - `highlight_variables`(Bool, default to `False`): highlight the added variables (internal use for the app rendering) 12 | * `get_id()`: Get the uuid of the prompt 13 | * `get_name()`: Get the name of the prompt 14 | * `get_reference()`: Get any additional information about the prompt (such as bibliographic reference) 15 | * `get_answer_choices_list(example)`: If applicable, returns a list of answer choices for a given example. 16 | 17 | Each `Template` also has a `metadata` attribute, an instance of the class `Metadata` that encapsulates the following 3 attributes: 18 | * `original_task`: If True, this prompt asks a model to perform the original task designed for this dataset. 19 | * `choices_in_prompt`: If True, the answer choices are included in the templates such that models see those choices in the input. Only applicable to classification tasks. 20 | * `metrics`: List of strings denoting metrics to use for evaluation 21 | 22 | ## Class `DatasetTemplates` 23 | `DatasetTemplates` is a class that wraps all the prompts (each of them are instances of `Template`) for a specific dataset/subset and implements all the helper functions necessary to read/write to the YAML file in which the prompts are saved. 24 | 25 | You will likely mainly be interested in getting the existing prompts and their names for a given dataset. You can do that with the following instantiation: 26 | ```python 27 | >>> template_key = f"{dataset_name}/{subset_name}" if subset_name is not None else dataset_name 28 | >>> prompts = DatasetTemplates(template_key) 29 | >>> len(prompts) # Returns the number of prompts for the given dataset 30 | >>> prompts.all_template_names # Returns a sorted list of all templates names for this dataset 31 | ``` 32 | 33 | ## Class `TemplateCollection` 34 | `TemplateCollection` is a class that encapsulates all the prompts available under PromptSource by wrapping the `DatasetTemplates` class. It initializes the `DatasetTemplates` for all existing template folders, gives access to each `DatasetTemplates`, and provides aggregated counts overall `DatasetTemplates`. 35 | 36 | The main methods are: 37 | * `get_dataset(dataset_name, subset_name)`: Return the DatasetTemplates object corresponding to the dataset name 38 | - `dataset_name` (Str): name of the dataset to get 39 | - `subset_name` (Str, default to None): name of the subset 40 | * `get_templates_count()`: Return the overall number count over all datasets. NB: we don't breakdown datasets into subsets for the count, i.e subsets count are included into the dataset count 41 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: "0.2.2" 2 | date-released: 2022-02 3 | message: "If you use this software, please cite it using these metadata." 4 | title: "PromptSource" 5 | url: "https://github.com/bigscience-workshop/promptsource" 6 | authors: 7 | - family-names: Bach 8 | given-names: "Stephen H." 9 | - family-names: Sanh 10 | given-names: Victor 11 | - family-names: Yong 12 | given-names: Zheng-Xin 13 | - family-names: Webson 14 | given-names: Albert 15 | - family-names: Raffel 16 | given-names: Colin 17 | - family-names: Nayak 18 | given-names: "Nihal V." 19 | - family-names: Sharma 20 | given-names: Abheesht 21 | - family-names: Kim 22 | given-names: Taewoon 23 | - family-names: Bari 24 | given-names: "M Saiful" 25 | - family-names: Fevry 26 | given-names: Thibault 27 | - family-names: Alyafeaiu 28 | given-names: Zaid 29 | - family-names: Dey 30 | given-names: Manan 31 | - family-names: Santilli 32 | given-names: Andrea 33 | - family-names: Sun 34 | given-names: Zhiqing 35 | - family-names: Ben-David 36 | given-names: Srulik 37 | - family-names: Xu 38 | given-names: Canwen 39 | - family-names: Chhablani 40 | given-names: Gunjan 41 | - family-names: Wang 42 | given-names: Han 43 | - family-names: Fries 44 | given-names: "Jason Alan" 45 | - family-names: Al-shaibani 46 | given-names: "Maged S." 47 | - family-names: Sharma 48 | given-names: Shanya 49 | - family-names: Thakker 50 | given-names: Urmish 51 | - family-names: Almubarak 52 | given-names: Khalid 53 | - family-names: Tang 54 | given-names: Xiangru 55 | - family-names: Tian-Jian 56 | given-names: Mike 57 | - family-names: Rush 58 | given-names: "Alexander M." 59 | preferred-citation: 60 | type: article 61 | authors: 62 | - family-names: Bach 63 | given-names: "Stephen H." 64 | - family-names: Sanh 65 | given-names: Victor 66 | - family-names: Yong 67 | given-names: Zheng-Xin 68 | - family-names: Webson 69 | given-names: Albert 70 | - family-names: Raffel 71 | given-names: Colin 72 | - family-names: Nayak 73 | given-names: "Nihal V." 74 | - family-names: Sharma 75 | given-names: Abheesht 76 | - family-names: Kim 77 | given-names: Taewoon 78 | - family-names: Bari 79 | given-names: "M Saiful" 80 | - family-names: Fevry 81 | given-names: Thibault 82 | - family-names: Alyafeaiu 83 | given-names: Zaid 84 | - family-names: Dey 85 | given-names: Manan 86 | - family-names: Santilli 87 | given-names: Andrea 88 | - family-names: Sun 89 | given-names: Zhiqing 90 | - family-names: Ben-David 91 | given-names: Srulik 92 | - family-names: Xu 93 | given-names: Canwen 94 | - family-names: Chhablani 95 | given-names: Gunjan 96 | - family-names: Wang 97 | given-names: Han 98 | - family-names: Fries 99 | given-names: "Jason Alan" 100 | - family-names: Al-shaibani 101 | given-names: "Maged S." 102 | - family-names: Sharma 103 | given-names: Shanya 104 | - family-names: Thakker 105 | given-names: Urmish 106 | - family-names: Almubarak 107 | given-names: Khalid 108 | - family-names: Tang 109 | given-names: Xiangru 110 | - family-names: Tian-Jian 111 | given-names: Mike 112 | - family-names: Rush 113 | given-names: "Alexander M." 114 | title: "PromptSource: An Integrated Development Environment and Repository for Natural Language Prompts" 115 | year: 2022 116 | publisher: "arXiv" 117 | url: "https://arxiv.org/abs/2202.01279" 118 | address: "Online" 119 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | @bigscience-workshop/promptsource-codeowners 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: quality style 2 | 3 | check_dirs := promptsource 4 | 5 | # Check that source code meets quality standards 6 | 7 | quality: 8 | black --check --line-length 119 --target-version py38 $(check_dirs) 9 | isort --check-only $(check_dirs) 10 | flake8 $(check_dirs) --max-line-length 119 11 | 12 | # Format source code automatically 13 | 14 | style: 15 | black --line-length 119 --target-version py38 $(check_dirs) 16 | isort $(check_dirs) 17 | -------------------------------------------------------------------------------- /assets/PromptSource ACL Demo Figure.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:55f0805843a41274c819ca2a90658985d03cb026bfeaf82c929bbd2da0132a16 3 | size 3540753 4 | -------------------------------------------------------------------------------- /assets/promptsource_app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bigscience-workshop/promptsource/7dab96a3eeb3717cea633705135ebc488885d709/assets/promptsource_app.png -------------------------------------------------------------------------------- /promptsource/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | 4 | DEFAULT_PROMPTSOURCE_CACHE_HOME = str(Path("~/.cache/promptsource").expanduser()) 5 | -------------------------------------------------------------------------------- /promptsource/session.py: -------------------------------------------------------------------------------- 1 | # 2 | # Code for managing session state, which is needed for multi-input forms 3 | # See https://github.com/streamlit/streamlit/issues/1557 4 | # 5 | # This code is taken from 6 | # https://gist.github.com/okld/0aba4869ba6fdc8d49132e6974e2e662 7 | # 8 | from streamlit.hashing import _CodeHasher 9 | from streamlit.report_thread import get_report_ctx 10 | from streamlit.server.server import Server 11 | 12 | 13 | class _SessionState: 14 | def __init__(self, session, hash_funcs): 15 | """Initialize SessionState instance.""" 16 | self.__dict__["_state"] = { 17 | "data": {}, 18 | "hash": None, 19 | "hasher": _CodeHasher(hash_funcs), 20 | "is_rerun": False, 21 | "session": session, 22 | } 23 | 24 | def __call__(self, **kwargs): 25 | """Initialize state data once.""" 26 | for item, value in kwargs.items(): 27 | if item not in self._state["data"]: 28 | self._state["data"][item] = value 29 | 30 | def __getitem__(self, item): 31 | """Return a saved state value, None if item is undefined.""" 32 | return self._state["data"].get(item, None) 33 | 34 | def __getattr__(self, item): 35 | """Return a saved state value, None if item is undefined.""" 36 | return self._state["data"].get(item, None) 37 | 38 | def __setitem__(self, item, value): 39 | """Set state value.""" 40 | self._state["data"][item] = value 41 | 42 | def __setattr__(self, item, value): 43 | """Set state value.""" 44 | self._state["data"][item] = value 45 | 46 | def clear(self): 47 | """Clear session state and request a rerun.""" 48 | self._state["data"].clear() 49 | self._state["session"].request_rerun(None) 50 | 51 | def sync(self): 52 | """ 53 | Rerun the app with all state values up to date from the beginning to 54 | fix rollbacks. 55 | """ 56 | data_to_bytes = self._state["hasher"].to_bytes(self._state["data"], None) 57 | 58 | # Ensure to rerun only once to avoid infinite loops 59 | # caused by a constantly changing state value at each run. 60 | # 61 | # Example: state.value += 1 62 | if self._state["is_rerun"]: 63 | self._state["is_rerun"] = False 64 | 65 | elif self._state["hash"] is not None: 66 | if self._state["hash"] != data_to_bytes: 67 | self._state["is_rerun"] = True 68 | self._state["session"].request_rerun(None) 69 | 70 | self._state["hash"] = data_to_bytes 71 | 72 | 73 | def _get_session(): 74 | session_id = get_report_ctx().session_id 75 | session_info = Server.get_current()._get_session_info(session_id) 76 | 77 | if session_info is None: 78 | raise RuntimeError("Couldn't get your Streamlit Session object.") 79 | 80 | return session_info.session 81 | 82 | 83 | def _get_state(hash_funcs=None): 84 | session = _get_session() 85 | 86 | if not hasattr(session, "_custom_session_state"): 87 | session._custom_session_state = _SessionState(session, hash_funcs) 88 | 89 | return session._custom_session_state 90 | -------------------------------------------------------------------------------- /promptsource/templates/Zaid/quac_expanded/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: Zaid/quac_expanded 2 | templates: 3 | 01d8c949-89a7-4a44-9a39-6cf2ac3e0a7b: !Template 4 | answer_choices: null 5 | id: 01d8c949-89a7-4a44-9a39-6cf2ac3e0a7b 6 | jinja: "What is the answer to the last question in the dialogue below? If there\ 7 | \ is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\n\ 8 | Q: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" 9 | metadata: !TemplateMetadata 10 | choices_in_prompt: false 11 | languages: 12 | - en 13 | metrics: 14 | - Other 15 | original_task: true 16 | name: What is the answer 17 | reference: 'Metric: F1' 18 | 1484c6e6-bf42-47ca-9ea7-c3c552a24de1: !Template 19 | answer_choices: null 20 | id: 1484c6e6-bf42-47ca-9ea7-c3c552a24de1 21 | jinja: "{{context}}\n\nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" 22 | metadata: !TemplateMetadata 23 | choices_in_prompt: false 24 | languages: 25 | - en 26 | metrics: 27 | - Other 28 | original_task: true 29 | name: GPT-3 Style 30 | reference: 'Brown et al. NeurIPS 2020. Metric: F1' 31 | 2bca0532-01a3-4a64-a228-a57ae0965719: !Template 32 | answer_choices: null 33 | id: 2bca0532-01a3-4a64-a228-a57ae0965719 34 | jinja: "Below is a passage, followed by a series of questions and answers about\ 35 | \ the passage. Answer the last question based on the information contained in\ 36 | \ the passage. If there is no answer in the passage, say \"unknown\".\n\nPassage:\ 37 | \ {{context}}\n\nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" 38 | metadata: !TemplateMetadata 39 | choices_in_prompt: false 40 | languages: 41 | - en 42 | metrics: 43 | - Other 44 | original_task: true 45 | name: Verbose instructions 46 | reference: 'Metric: F1' 47 | 4abd0379-dbc0-4f71-901b-dd0af3581157: !Template 48 | answer_choices: null 49 | id: 4abd0379-dbc0-4f71-901b-dd0af3581157 50 | jinja: "Answer the last question based on the information contained in the passage.\ 51 | \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\ 52 | \nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" 53 | metadata: !TemplateMetadata 54 | choices_in_prompt: false 55 | languages: 56 | - en 57 | metrics: 58 | - Other 59 | original_task: true 60 | name: Answer the last question 61 | reference: 'Metric: F1' 62 | 8ebbd098-b40c-4e69-8cbb-0ffecf0fe2a6: !Template 63 | answer_choices: null 64 | id: 8ebbd098-b40c-4e69-8cbb-0ffecf0fe2a6 65 | jinja: "Complete the dialogue based on the information contained in the passage.\ 66 | \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\ 67 | \nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" 68 | metadata: !TemplateMetadata 69 | choices_in_prompt: false 70 | languages: 71 | - en 72 | metrics: 73 | - Other 74 | original_task: true 75 | name: Complete the dialogue 76 | reference: 'Metric: F1' 77 | e624695b-5d26-47cc-bdb4-ac2bee4ddaea: !Template 78 | answer_choices: null 79 | id: e624695b-5d26-47cc-bdb4-ac2bee4ddaea 80 | jinja: "Help me complete the dialogue about this passage. If there is no answer\ 81 | \ in the passage, say \"unknown\".\n\nPassage: {{context}}\n\nQ: {{question}}\ 82 | \ \nA: ||| {{answer[\"texts\"][0]}}" 83 | metadata: !TemplateMetadata 84 | choices_in_prompt: false 85 | languages: 86 | - en 87 | metrics: 88 | - Other 89 | original_task: true 90 | name: Help me 91 | reference: 'Metric: F1' 92 | -------------------------------------------------------------------------------- /promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: ade_corpus_v2 2 | subset: Ade_corpus_v2_classification 3 | templates: 4 | 56bd12a8-b8ee-464e-98cc-5f586ba9f74d: !Template 5 | answer_choices: No ||| Yes 6 | id: 56bd12a8-b8ee-464e-98cc-5f586ba9f74d 7 | jinja: 'Please answer the below Yes / No question. 8 | 9 | 10 | Is "{{text}}" related to adverse drug effect (ADE)? ||| {{answer_choices[label]}}' 11 | metadata: !TemplateMetadata 12 | choices_in_prompt: true 13 | languages: 14 | - en 15 | metrics: 16 | - Accuracy 17 | original_task: true 18 | name: binary-classification 19 | reference: '' 20 | 78c4ce65-dd66-46ed-878d-11f4eca5e544: !Template 21 | answer_choices: No ||| Yes 22 | id: 78c4ce65-dd66-46ed-878d-11f4eca5e544 23 | jinja: "Read the below text and answer the question.\n\nText: {{text}} \n\nQuestion:\ 24 | \ Is the above text related to adverse drug effect (ADE)? Your answer should\ 25 | \ be either \"Yes\" or \"No\".\n\n|||\n{{answer_choices[label]}}" 26 | metadata: !TemplateMetadata 27 | choices_in_prompt: true 28 | languages: 29 | - en 30 | metrics: 31 | - Accuracy 32 | original_task: true 33 | name: verbose-binary-classification 34 | reference: '' 35 | dabc0337-5bd3-4150-98b3-794a15ce1a3a: !Template 36 | answer_choices: null 37 | id: dabc0337-5bd3-4150-98b3-794a15ce1a3a 38 | jinja: "{% if label==1 %}\nPlease write a short medical report that is related\ 39 | \ to adverse drug effect (ADE). \n{% else %}\nWrite a medical report that is\ 40 | \ not related to adverse drug effect (ADE). \n{% endif %}\n|||\n{{text}}" 41 | metadata: !TemplateMetadata 42 | choices_in_prompt: false 43 | languages: 44 | - en 45 | metrics: 46 | - BLEU 47 | - ROUGE 48 | original_task: false 49 | name: label-to-text 50 | reference: '' 51 | -------------------------------------------------------------------------------- /promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: ade_corpus_v2 2 | subset: Ade_corpus_v2_drug_ade_relation 3 | templates: 4 | 0ec35408-652d-4ebc-9478-5a0d330c24c8: !Template 5 | answer_choices: null 6 | id: 0ec35408-652d-4ebc-9478-5a0d330c24c8 7 | jinja: 'Read the below text and answer the question. 8 | 9 | 10 | Text: {{text}} 11 | 12 | 13 | Question: What drug has an effect of {{effect}}? 14 | 15 | ||| 16 | 17 | {{drug}}' 18 | metadata: !TemplateMetadata 19 | choices_in_prompt: false 20 | languages: 21 | - en 22 | metrics: 23 | - Accuracy 24 | original_task: true 25 | name: find-drug 26 | reference: '' 27 | 2682a789-a435-4976-b34f-f376991c842a: !Template 28 | answer_choices: null 29 | id: 2682a789-a435-4976-b34f-f376991c842a 30 | jinja: '{{drug}} has an effect of {{effect}}. Please write a short medical report 31 | about this. 32 | 33 | ||| 34 | 35 | {{text}}' 36 | metadata: !TemplateMetadata 37 | choices_in_prompt: false 38 | languages: 39 | - en 40 | metrics: 41 | - ROUGE 42 | - BLEU 43 | original_task: false 44 | name: drug-and-effect-to-text 45 | reference: '' 46 | 61ba3622-72bc-4fd8-acfc-826bc2a93aa5: !Template 47 | answer_choices: null 48 | id: 61ba3622-72bc-4fd8-acfc-826bc2a93aa5 49 | jinja: 'Read the below text and answer the question. 50 | 51 | 52 | Text: {{text}} 53 | 54 | 55 | Question: What effect does {{drug}} have? 56 | 57 | ||| 58 | 59 | {{effect}}' 60 | metadata: !TemplateMetadata 61 | choices_in_prompt: false 62 | languages: 63 | - en 64 | metrics: 65 | - Accuracy 66 | original_task: true 67 | name: find-effect 68 | reference: '' 69 | 6acf3588-baa1-4ff6-87c4-4c2356855464: !Template 70 | answer_choices: null 71 | id: 6acf3588-baa1-4ff6-87c4-4c2356855464 72 | jinja: 'Read the below text and answer the question. 73 | 74 | 75 | Text: {{text}} 76 | 77 | 78 | Question: What are the drug and its effect of the above text? 79 | 80 | 81 | You should answer in the "drug" and "effect" format (e.g., alcohol and high 82 | blood pressure) 83 | 84 | ||| 85 | 86 | {{drug}} and {{effect}}.' 87 | metadata: !TemplateMetadata 88 | choices_in_prompt: false 89 | languages: 90 | - en 91 | metrics: 92 | - Accuracy 93 | original_task: true 94 | name: find-drug-and-effect 95 | reference: '' 96 | db68e609-ba92-40ae-b161-8b7710124142: !Template 97 | answer_choices: null 98 | id: db68e609-ba92-40ae-b161-8b7710124142 99 | jinja: 'Read the below text and answer the two following questions. 100 | 101 | 102 | Text: {{text}} 103 | 104 | 105 | Question 1: What is the drug in the above text? 106 | 107 | 108 | Question 2: What is the effect of it? 109 | 110 | 111 | You should answer in the "drug" and "effect" format (e.g., alcohol and high 112 | blood pressure) 113 | 114 | ||| 115 | 116 | {{drug}} and {{effect}}.' 117 | metadata: !TemplateMetadata 118 | choices_in_prompt: false 119 | languages: 120 | - en 121 | metrics: 122 | - Accuracy 123 | original_task: true 124 | name: find-drug-and-effect-two-questions 125 | reference: '' 126 | -------------------------------------------------------------------------------- /promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: ade_corpus_v2 2 | subset: Ade_corpus_v2_drug_dosage_relation 3 | templates: 4 | 1de6d411-ed0a-4d48-806e-cad009f07a65: !Template 5 | answer_choices: null 6 | id: 1de6d411-ed0a-4d48-806e-cad009f07a65 7 | jinja: 'Read the below text and answer the question. 8 | 9 | 10 | Text: {{text}} 11 | 12 | 13 | Question: What drug has a dosage of {{dosage}}? 14 | 15 | ||| 16 | 17 | {{drug}}' 18 | metadata: !TemplateMetadata 19 | choices_in_prompt: false 20 | languages: 21 | - en 22 | metrics: 23 | - Accuracy 24 | original_task: true 25 | name: find-drug 26 | reference: '' 27 | 1e719388-59c9-4b0a-9ed9-dd02b6ddd0a6: !Template 28 | answer_choices: null 29 | id: 1e719388-59c9-4b0a-9ed9-dd02b6ddd0a6 30 | jinja: '{{dosage}} of {{drug}} was given to a patient. Please write a short medical 31 | report about this. 32 | 33 | ||| 34 | 35 | {{text}}' 36 | metadata: !TemplateMetadata 37 | choices_in_prompt: false 38 | languages: 39 | - en 40 | metrics: 41 | - BLEU 42 | - ROUGE 43 | original_task: false 44 | name: drug-and-dosage-to-text 45 | reference: '' 46 | 2bed0f04-8249-4248-86ea-e3a1971b2e1b: !Template 47 | answer_choices: null 48 | id: 2bed0f04-8249-4248-86ea-e3a1971b2e1b 49 | jinja: 'Read the below text and answer the two following questions. 50 | 51 | 52 | Text: {{text}} 53 | 54 | 55 | 56 | Question 1: What is the drug in the above text? 57 | 58 | 59 | Question 2: What is the dosage of it? 60 | 61 | 62 | You should answer in the "drug" and "dosage" format (e.g., Aspirin and 500mg) 63 | 64 | ||| 65 | 66 | {{drug}} and {{dosage}}.' 67 | metadata: !TemplateMetadata 68 | choices_in_prompt: false 69 | languages: 70 | - en 71 | metrics: 72 | - Accuracy 73 | original_task: true 74 | name: find-drug-and-dosage-two-questions 75 | reference: '' 76 | ca175bed-d046-40e7-9dbb-1e50fde7e603: !Template 77 | answer_choices: null 78 | id: ca175bed-d046-40e7-9dbb-1e50fde7e603 79 | jinja: 'Read the below text and answer the question. 80 | 81 | 82 | Text: {{text}} 83 | 84 | 85 | Question: What is the dosage of {{drug}}? 86 | 87 | ||| 88 | 89 | {{dosage}}' 90 | metadata: !TemplateMetadata 91 | choices_in_prompt: false 92 | languages: 93 | - en 94 | metrics: 95 | - Accuracy 96 | original_task: true 97 | name: find-dosage 98 | reference: '' 99 | ce5208ac-6b4c-4a35-8738-e20232df1917: !Template 100 | answer_choices: null 101 | id: ce5208ac-6b4c-4a35-8738-e20232df1917 102 | jinja: "Read the below text and answer the question.\n\nText: {{text}}\n\nQuestion:\ 103 | \ What are the drug and its dosage of the above text? \n\nYou should answer\ 104 | \ in the \"drug\" and \"dosage\" format (e.g., Aspirin and 500mg)\n|||\n{{drug}}\ 105 | \ and {{dosage}}." 106 | metadata: !TemplateMetadata 107 | choices_in_prompt: false 108 | languages: 109 | - en 110 | metrics: 111 | - Accuracy 112 | original_task: true 113 | name: find-drug-and-dosage 114 | reference: '' 115 | -------------------------------------------------------------------------------- /promptsource/templates/adversarial_qa/adversarialQA/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: adversarial_qa 2 | subset: adversarialQA 3 | templates: 4 | 00755780-f3c0-44b4-b159-8f3873cdb16c: !Template 5 | answer_choices: null 6 | id: 00755780-f3c0-44b4-b159-8f3873cdb16c 7 | jinja: 'I want to test the ability of students to read a passage and answer questions 8 | about it. Could you please come up with a good question for the passage "{{context}}"? 9 | ||| 10 | 11 | {{question}}' 12 | metadata: !TemplateMetadata 13 | choices_in_prompt: false 14 | languages: 15 | - en 16 | metrics: 17 | - BLEU 18 | - ROUGE 19 | original_task: false 20 | name: generate_question 21 | reference: 'Input: Context, Output: Question (generate a question)' 22 | 3b2459cc-6600-443c-abf8-8f60c34cd998: !Template 23 | answer_choices: null 24 | id: 3b2459cc-6600-443c-abf8-8f60c34cd998 25 | jinja: '{% if metadata.split != "test" %} 26 | 27 | I know that the answer to the question "{{question}}" is in "{{context}}". Can 28 | you tell me what it is? ||| 29 | 30 | 31 | {{answers.text | choice}} 32 | 33 | {% endif %}' 34 | metadata: !TemplateMetadata 35 | choices_in_prompt: false 36 | languages: 37 | - en 38 | metrics: 39 | - Squad 40 | original_task: true 41 | name: tell_what_it_is 42 | reference: 'Input: QC, Output: A (rephrase)' 43 | 5bdb1815-5c6f-49a3-ad1d-367344420701: !Template 44 | answer_choices: null 45 | id: 5bdb1815-5c6f-49a3-ad1d-367344420701 46 | jinja: '{% if metadata.split != "test" %} 47 | 48 | Question: "{{question}}" 49 | 50 | 51 | Context: "{{context}}" 52 | 53 | 54 | Answer: 55 | 56 | ||| 57 | 58 | {{answers.text | choice}} 59 | 60 | {% endif %}' 61 | metadata: !TemplateMetadata 62 | choices_in_prompt: false 63 | languages: 64 | - en 65 | metrics: 66 | - Squad 67 | original_task: true 68 | name: question_context_answer 69 | reference: 'Input: QC, Output: Answer (short form)' 70 | a0872cde-2f19-4ae6-919a-868da47bfbcb: !Template 71 | answer_choices: null 72 | id: a0872cde-2f19-4ae6-919a-868da47bfbcb 73 | jinja: '{% if metadata.split != "test" %} 74 | 75 | Extract the answer to the question from the following context. 76 | 77 | Question: {{question}} 78 | 79 | Context: {{context}}||| 80 | 81 | {{answers.text | choice}} 82 | 83 | {% endif %}' 84 | metadata: !TemplateMetadata 85 | choices_in_prompt: false 86 | languages: 87 | - en 88 | metrics: 89 | - Squad 90 | original_task: true 91 | name: based_on 92 | reference: '' 93 | a64d5a15-68e2-4d1c-b30a-ca8250c860f9: !Template 94 | answer_choices: null 95 | id: a64d5a15-68e2-4d1c-b30a-ca8250c860f9 96 | jinja: '{% if metadata.split != "test" %} 97 | 98 | Given the following passage 99 | 100 | 101 | "{{context}}", 102 | 103 | 104 | answer the following question. Note that the answer is present within the text. 105 | 106 | 107 | Question: {{question}} ||| 108 | 109 | {{answers.text | choice}} 110 | 111 | {% endif %}' 112 | metadata: !TemplateMetadata 113 | choices_in_prompt: false 114 | languages: 115 | - en 116 | metrics: 117 | - Squad 118 | original_task: true 119 | name: answer_the_following_q 120 | reference: 'Input: QC, Output: Answer' 121 | -------------------------------------------------------------------------------- /promptsource/templates/adversarial_qa/dbert/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: adversarial_qa 2 | subset: dbert 3 | templates: 4 | 00755780-f3c0-44b4-b159-8f3873cdb16a: !Template 5 | answer_choices: null 6 | id: 00755780-f3c0-44b4-b159-8f3873cdb16a 7 | jinja: 'I want to test the ability of students to read a passage and answer questions 8 | about it. Could you please come up with a good question for the passage "{{context}}"? 9 | ||| 10 | 11 | {{question}}' 12 | metadata: !TemplateMetadata 13 | choices_in_prompt: false 14 | languages: 15 | - en 16 | metrics: 17 | - BLEU 18 | - ROUGE 19 | original_task: false 20 | name: generate_question 21 | reference: 'Input: Context, Output: Question (generate a question)' 22 | 3b2459cc-6600-443c-abf8-8f60c34cd99a: !Template 23 | answer_choices: null 24 | id: 3b2459cc-6600-443c-abf8-8f60c34cd99a 25 | jinja: '{% if metadata.split != "test" %} 26 | 27 | I know that the answer to the question "{{question}}" is in "{{context}}". Can 28 | you tell me what it is? ||| 29 | 30 | 31 | {{answers.text | choice}} 32 | 33 | {% endif %}' 34 | metadata: !TemplateMetadata 35 | choices_in_prompt: false 36 | languages: 37 | - en 38 | metrics: 39 | - Squad 40 | original_task: true 41 | name: tell_what_it_is 42 | reference: 'Input: QC, Output: A (rephrase)' 43 | 5bdb1815-5c6f-49a3-ad1d-36734442070a: !Template 44 | answer_choices: null 45 | id: 5bdb1815-5c6f-49a3-ad1d-36734442070a 46 | jinja: '{% if metadata.split != "test" %} 47 | 48 | Question: "{{question}}" 49 | 50 | 51 | Context: "{{context}}" 52 | 53 | 54 | Answer: 55 | 56 | ||| 57 | 58 | {{answers.text | choice}} 59 | 60 | {% endif %}' 61 | metadata: !TemplateMetadata 62 | choices_in_prompt: false 63 | languages: 64 | - en 65 | metrics: 66 | - Squad 67 | original_task: true 68 | name: question_context_answer 69 | reference: 'Input: QC, Output: Answer (short form)' 70 | a0872cde-2f19-4ae6-919a-868da47bfbca: !Template 71 | answer_choices: null 72 | id: a0872cde-2f19-4ae6-919a-868da47bfbca 73 | jinja: '{% if metadata.split != "test" %} 74 | 75 | Extract the answer to the question from the following context. 76 | 77 | Question: {{question}} 78 | 79 | Context: {{context}}||| 80 | 81 | {{answers.text | choice}} 82 | 83 | {% endif %}' 84 | metadata: !TemplateMetadata 85 | choices_in_prompt: false 86 | languages: 87 | - en 88 | metrics: 89 | - Squad 90 | original_task: true 91 | name: based_on 92 | reference: '' 93 | a64d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template 94 | answer_choices: null 95 | id: a64d5a15-68e2-4d1c-b30a-ca8250c860fa 96 | jinja: '{% if metadata.split != "test" %} 97 | 98 | Given the following passage 99 | 100 | 101 | "{{context}}", 102 | 103 | 104 | answer the following question. Note that the answer is present within the text. 105 | 106 | 107 | Question: {{question}} ||| 108 | 109 | {{answers.text | choice}} 110 | 111 | {% endif %}' 112 | metadata: !TemplateMetadata 113 | choices_in_prompt: false 114 | languages: 115 | - en 116 | metrics: 117 | - Squad 118 | original_task: true 119 | name: answer_the_following_q 120 | reference: 'Input: QC, Output: Answer' 121 | -------------------------------------------------------------------------------- /promptsource/templates/adversarial_qa/dbidaf/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: adversarial_qa 2 | subset: dbidaf 3 | templates: 4 | 41f28b31-d0fc-4f20-a0a2-ff21813e298e: !Template 5 | answer_choices: null 6 | id: 41f28b31-d0fc-4f20-a0a2-ff21813e298e 7 | jinja: '{% if metadata.split != "test" %} 8 | 9 | Extract the answer to the question from the following context. 10 | 11 | Question: {{question}} 12 | 13 | Context: {{context}}||| 14 | 15 | {{answers.text | choice}} 16 | 17 | {% endif %}' 18 | metadata: !TemplateMetadata 19 | choices_in_prompt: false 20 | languages: 21 | - en 22 | metrics: 23 | - Squad 24 | original_task: true 25 | name: based_on 26 | reference: '' 27 | a64d5a15-68e2-4d1c-b30a-ca8250c860d9: !Template 28 | answer_choices: null 29 | id: a64d5a15-68e2-4d1c-b30a-ca8250c860d9 30 | jinja: '{% if metadata.split != "test" %} 31 | 32 | Given the following passage 33 | 34 | 35 | "{{context}}", 36 | 37 | 38 | answer the following question. Note that the answer is present within the text. 39 | 40 | 41 | Question: {{question}} ||| 42 | 43 | {{answers.text | choice}} 44 | 45 | {% endif %}' 46 | metadata: !TemplateMetadata 47 | choices_in_prompt: false 48 | languages: 49 | - en 50 | metrics: 51 | - Squad 52 | original_task: true 53 | name: answer_the_following_q 54 | reference: 'Input: QC, Output: Answer' 55 | c7a80603-d610-4999-98a7-815b2f84592d: !Template 56 | answer_choices: null 57 | id: c7a80603-d610-4999-98a7-815b2f84592d 58 | jinja: 'I want to test the ability of students to read a passage and answer questions 59 | about it. Could you please come up with a good question for the passage "{{context}}"? 60 | ||| 61 | 62 | {{question}}' 63 | metadata: !TemplateMetadata 64 | choices_in_prompt: false 65 | languages: 66 | - en 67 | metrics: 68 | - BLEU 69 | - ROUGE 70 | original_task: false 71 | name: generate_question 72 | reference: 'Input: Context, Output: Question (generate a question)' 73 | ce9bc00a-567b-4c4e-aad7-df6f5d5d57bb: !Template 74 | answer_choices: null 75 | id: ce9bc00a-567b-4c4e-aad7-df6f5d5d57bb 76 | jinja: '{% if metadata.split != "test" %} 77 | 78 | I know that the answer to the question "{{question}}" is in "{{context}}". Can 79 | you tell me what it is? ||| 80 | 81 | 82 | {{answers.text | choice}} 83 | 84 | {% endif %}' 85 | metadata: !TemplateMetadata 86 | choices_in_prompt: false 87 | languages: 88 | - en 89 | metrics: 90 | - Squad 91 | original_task: true 92 | name: tell_what_it_is 93 | reference: 'Input: QC, Output: A (rephrase)' 94 | fa185424-6ebe-49b8-b4ed-7632ca33c361: !Template 95 | answer_choices: null 96 | id: fa185424-6ebe-49b8-b4ed-7632ca33c361 97 | jinja: '{% if metadata.split != "test" %} 98 | 99 | Question: "{{question}}" 100 | 101 | 102 | Context: "{{context}}" 103 | 104 | 105 | Answer: 106 | 107 | ||| 108 | 109 | {{answers.text | choice}} 110 | 111 | {% endif %}' 112 | metadata: !TemplateMetadata 113 | choices_in_prompt: false 114 | languages: 115 | - en 116 | metrics: 117 | - Squad 118 | original_task: true 119 | name: question_context_answer 120 | reference: 'Input: QC, Output: Answer (short form)' 121 | -------------------------------------------------------------------------------- /promptsource/templates/adversarial_qa/droberta/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: adversarial_qa 2 | subset: droberta 3 | templates: 4 | 00755780-f3c0-44b4-b159-8f3873cdb163: !Template 5 | answer_choices: null 6 | id: 00755780-f3c0-44b4-b159-8f3873cdb163 7 | jinja: 'I want to test the ability of students to read a passage and answer questions 8 | about it. Could you please come up with a good question for the passage "{{context}}"? 9 | ||| 10 | 11 | {{question}}' 12 | metadata: !TemplateMetadata 13 | choices_in_prompt: false 14 | languages: 15 | - en 16 | metrics: 17 | - BLEU 18 | - ROUGE 19 | original_task: false 20 | name: generate_question 21 | reference: 'Input: Context, Output: Question (generate a question)' 22 | 3b2459cc-6600-443c-abf8-8f60c34cd993: !Template 23 | answer_choices: null 24 | id: 3b2459cc-6600-443c-abf8-8f60c34cd993 25 | jinja: '{% if metadata.split != "test" %} 26 | 27 | I know that the answer to the question "{{question}}" is in "{{context}}". Can 28 | you tell me what it is? ||| 29 | 30 | 31 | {{answers.text | choice}} 32 | 33 | {% endif %}' 34 | metadata: !TemplateMetadata 35 | choices_in_prompt: false 36 | languages: 37 | - en 38 | metrics: 39 | - Squad 40 | original_task: true 41 | name: tell_what_it_is 42 | reference: 'Input: QC, Output: A (rephrase)' 43 | 5bdb1815-5c6f-49a3-ad1d-367344420703: !Template 44 | answer_choices: null 45 | id: 5bdb1815-5c6f-49a3-ad1d-367344420703 46 | jinja: '{% if metadata.split != "test" %} 47 | 48 | Question: "{{question}}" 49 | 50 | 51 | Context: "{{context}}" 52 | 53 | 54 | Answer: 55 | 56 | ||| 57 | 58 | {{answers.text | choice}} 59 | 60 | {% endif %}' 61 | metadata: !TemplateMetadata 62 | choices_in_prompt: false 63 | languages: 64 | - en 65 | metrics: 66 | - Squad 67 | original_task: true 68 | name: question_context_answer 69 | reference: 'Input: QC, Output: Answer (short form)' 70 | a0872cde-2f19-4ae6-919a-868da47bfbc3: !Template 71 | answer_choices: null 72 | id: a0872cde-2f19-4ae6-919a-868da47bfbc3 73 | jinja: '{% if metadata.split != "test" %} 74 | 75 | Extract the answer to the question from the following context. 76 | 77 | Question: {{question}} 78 | 79 | Context: {{context}}||| 80 | 81 | {{answers.text | choice}} 82 | 83 | {% endif %}' 84 | metadata: !TemplateMetadata 85 | choices_in_prompt: false 86 | languages: 87 | - en 88 | metrics: 89 | - Squad 90 | original_task: true 91 | name: based_on 92 | reference: '' 93 | a64d5a15-68e2-4d1c-b30a-ca8250c860f3: !Template 94 | answer_choices: null 95 | id: a64d5a15-68e2-4d1c-b30a-ca8250c860f3 96 | jinja: '{% if metadata.split != "test" %} 97 | 98 | Given the following passage 99 | 100 | 101 | "{{context}}", 102 | 103 | 104 | answer the following question. Note that the answer is present within the text. 105 | 106 | 107 | Question: {{question}} ||| 108 | 109 | {{answers.text | choice}} 110 | 111 | {% endif %}' 112 | metadata: !TemplateMetadata 113 | choices_in_prompt: false 114 | languages: 115 | - en 116 | metrics: 117 | - Squad 118 | original_task: true 119 | name: answer_the_following_q 120 | reference: 'Input: QC, Output: Answer' 121 | -------------------------------------------------------------------------------- /promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: amazon_us_reviews 2 | subset: Wireless_v1_00 3 | templates: 4 | 5feaa0d7-e4e0-46cc-8517-e00bfa7fd00e: !Template 5 | answer_choices: null 6 | id: 5feaa0d7-e4e0-46cc-8517-e00bfa7fd00e 7 | jinja: "Give a short sentence describing the following product review:\n{{review_body}}\ 8 | \ \n|||\n{{review_headline}}" 9 | metadata: !TemplateMetadata 10 | choices_in_prompt: false 11 | languages: 12 | - en 13 | metrics: 14 | - ROUGE 15 | - BLEU 16 | original_task: false 17 | name: Generate review headline based on review body 18 | reference: Generate review headline based on review body 19 | 9588a967-d698-4a33-9b96-a5254df9d260: !Template 20 | answer_choices: null 21 | id: 9588a967-d698-4a33-9b96-a5254df9d260 22 | jinja: Generate a {{star_rating}}-star review (1 being lowest and 5 being highest) 23 | about this product {{product_title}}. ||| {{review_body}} 24 | metadata: !TemplateMetadata 25 | choices_in_prompt: false 26 | languages: 27 | - en 28 | metrics: 29 | - BLEU 30 | - ROUGE 31 | original_task: false 32 | name: Generate review based on rating and category 33 | reference: Generate review based on rating and category 34 | 9a8b953d-2c68-4046-a7b7-8fd5f7469d10: !Template 35 | answer_choices: '1 ||| 2 ||| 3 ||| 4 ||| 5 ' 36 | id: 9a8b953d-2c68-4046-a7b7-8fd5f7469d10 37 | jinja: "Given the following review headline \n{{review_headline}}\npredict the\ 38 | \ the associated rating from the following choices\n- {{ answer_choices | join('\\\ 39 | n- ') }} \n(1 being lowest and 5 being highest)\n|||\n{{answer_choices[star_rating-1]}}" 40 | metadata: !TemplateMetadata 41 | choices_in_prompt: true 42 | languages: 43 | - en 44 | metrics: 45 | - Accuracy 46 | original_task: true 47 | name: Given the review headline return a categorical rating 48 | reference: 'Given the review headline, return a categorical rating. ' 49 | e40e4a53-ca5d-4fc8-a7c3-be9adfe0dbec: !Template 50 | answer_choices: null 51 | id: e40e4a53-ca5d-4fc8-a7c3-be9adfe0dbec 52 | jinja: "Generate a {{star_rating}}-star review headline (1 being lowest and 5\ 53 | \ being highest) about this product: \n{{product_title}} \n||| \ 54 | \ \n{{review_headline}}" 55 | metadata: !TemplateMetadata 56 | choices_in_prompt: false 57 | languages: 58 | - en 59 | metrics: 60 | - BLEU 61 | - ROUGE 62 | original_task: false 63 | name: Generate review headline based on rating 64 | reference: 'Generate review headline based on rating. ' 65 | e6a1bbde-715d-4dad-9178-e2bcfaf5c646: !Template 66 | answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 67 | id: e6a1bbde-715d-4dad-9178-e2bcfaf5c646 68 | jinja: "Given the following review:\n{{review_body}}\npredict the associated rating\ 69 | \ from the following choices (1 being lowest and 5 being highest)\n- {{ answer_choices\ 70 | \ | join('\\n- ') }} \n|||\n{{answer_choices[star_rating-1]}}" 71 | metadata: !TemplateMetadata 72 | choices_in_prompt: true 73 | languages: 74 | - en 75 | metrics: 76 | - Accuracy 77 | original_task: true 78 | name: Given the review body return a categorical rating 79 | reference: 'Given the review body, return a categorical rating. ' 80 | -------------------------------------------------------------------------------- /promptsource/templates/app_reviews/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: app_reviews 2 | templates: 3 | 2da8f134-58db-4f9d-b3b0-8c6b50693ab5: !Template 4 | answer_choices: Not at all ||| No ||| Maybe ||| Yes ||| Definitely 5 | id: 2da8f134-58db-4f9d-b3b0-8c6b50693ab5 6 | jinja: 'Given this review: "{{review}}" 7 | 8 | Would you recommend this app to a friend? {{answer_choices[0]}}, {{answer_choices[1]}}, 9 | {{answer_choices[2]}}, {{answer_choices[3]}}, or {{answer_choices[4]}}? 10 | 11 | ||| 12 | 13 | {{answer_choices[star-1]}}' 14 | metadata: !TemplateMetadata 15 | choices_in_prompt: true 16 | languages: 17 | - en 18 | metrics: 19 | - Accuracy 20 | - Spearman Correlation 21 | original_task: false 22 | name: categorize_rating_using_review 23 | reference: Given the review, return a categorical answer. 24 | 8086b434-a75e-45a4-87fb-4364601e2e05: !Template 25 | answer_choices: null 26 | id: 8086b434-a75e-45a4-87fb-4364601e2e05 27 | jinja: 'Generate a {{star}}-star review (1 being lowest and 5 being highest) about 28 | an app with package {{package_name}}. 29 | 30 | ||| 31 | 32 | {{review}}' 33 | metadata: !TemplateMetadata 34 | choices_in_prompt: null 35 | languages: 36 | - en 37 | metrics: 38 | - Accuracy 39 | - Spearman Correlation 40 | original_task: false 41 | name: generate_review 42 | reference: Generate a review from the rating. 43 | 9746ce4b-ac58-4dfb-9783-d77c95cb62cf: !Template 44 | answer_choices: "\u2605 ||| \u2605\u2605 ||| \u2605\u2605\u2605 ||| \u2605\u2605\ 45 | \u2605\u2605 ||| \u2605\u2605\u2605\u2605\u2605" 46 | id: 9746ce4b-ac58-4dfb-9783-d77c95cb62cf 47 | jinja: "What would be the \u2605-rating of this review (\u2605 being the lowest\ 48 | \ and \u2605\u2605\u2605\u2605\u2605 being the highest)? \"{{review}}\"\n|||\n\ 49 | {{answer_choices[star-1]}}" 50 | metadata: !TemplateMetadata 51 | choices_in_prompt: false 52 | languages: 53 | - en 54 | metrics: 55 | - Accuracy 56 | - Spearman Correlation 57 | original_task: false 58 | name: convert_to_star_rating 59 | reference: Given the review, generate a star rating. 60 | d34e1413-2699-4701-baa2-05d931d012ba: !Template 61 | answer_choices: null 62 | id: d34e1413-2699-4701-baa2-05d931d012ba 63 | jinja: 'On a scale of 1-5 (with 1 being least favorable and 5 being most favorable), 64 | how would you rate this review? "{{review}}" 65 | 66 | ||| 67 | 68 | {{star}}' 69 | metadata: !TemplateMetadata 70 | choices_in_prompt: false 71 | languages: 72 | - en 73 | metrics: 74 | - Accuracy 75 | - Spearman Correlation 76 | original_task: false 77 | name: convert_to_rating 78 | reference: Convert review to rating 79 | -------------------------------------------------------------------------------- /promptsource/templates/bing_coronavirus_query_set/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: bing_coronavirus_query_set 2 | templates: 3 | 43332782-9e92-4bb2-94bf-28759f3fe181: !Template 4 | answer_choices: null 5 | id: 43332782-9e92-4bb2-94bf-28759f3fe181 6 | jinja: "This search query talks about the coronavirus and was published on {{Date}}.\ 7 | \ In what country was it issued? \n{{Query}}\n|||\n{{Country}}" 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - Accuracy 14 | original_task: false 15 | name: 'what_country ' 16 | reference: '' 17 | 68f9c063-1907-4866-ab1b-756cc57e5695: !Template 18 | answer_choices: implicit ||| explicit 19 | id: 68f9c063-1907-4866-ab1b-756cc57e5695 20 | jinja: "The user is searching for coronavirus results on Bing.com. Is the intent\ 21 | \ implicit or explicit? \n{{Query}}\n|||\n{% if IsImplicitIntent == \"True\"\ 22 | \ %}\n{{answer_choices[0] }}\n{% else %}\n{{answer_choices[1] }}\n{% endif %}" 23 | metadata: !TemplateMetadata 24 | choices_in_prompt: true 25 | languages: 26 | - en 27 | metrics: 28 | - Accuracy 29 | original_task: false 30 | name: 'is_implicit_or_explicit ' 31 | reference: '' 32 | 992d541f-9e0c-466d-b4c4-92e9e236f863: !Template 33 | answer_choices: implicit ||| explicit 34 | id: 992d541f-9e0c-466d-b4c4-92e9e236f863 35 | jinja: "This search query about coronavirus was issued in {{Country}} on {{Date}}.\ 36 | \ Is the intent implicit or explicit? \n{{Query}}\n|||\n{% if IsImplicitIntent\ 37 | \ == \"True\" %}\n{{answer_choices[0] }}\n{% else %}\n{{answer_choices[1] }}\n\ 38 | {% endif %}" 39 | metadata: !TemplateMetadata 40 | choices_in_prompt: true 41 | languages: 42 | - en 43 | metrics: 44 | - Accuracy 45 | original_task: false 46 | name: 'is_explicit_country_date ' 47 | reference: '' 48 | df53652c-36dc-45fe-a015-d0781e32cd33: !Template 49 | answer_choices: Yes ||| No 50 | id: df53652c-36dc-45fe-a015-d0781e32cd33 51 | jinja: "Does this search engine query have an indirect relation to Covid-19? \n\ 52 | {{Query}}\n|||\n{% if IsImplicitIntent == \"True\" %}\n{{answer_choices[0] }}\n\ 53 | {% else %}\n{{answer_choices[1] }}\n{% endif %}" 54 | metadata: !TemplateMetadata 55 | choices_in_prompt: false 56 | languages: 57 | - en 58 | metrics: 59 | - Accuracy 60 | original_task: false 61 | name: is_implicit_query 62 | reference: '' 63 | df7bc2ee-686c-4826-ad84-3a056a2da4d4: !Template 64 | answer_choices: No ||| Yes 65 | id: df7bc2ee-686c-4826-ad84-3a056a2da4d4 66 | jinja: "Does this search query on Bing.com talk about the coronavirus explicitly?\ 67 | \ \n{{Query}}\n|||\n{% if IsImplicitIntent == \"True\" %}\n{{answer_choices[0]\ 68 | \ }}\n{% else %}\n{{answer_choices[1] }}\n{% endif %}" 69 | metadata: !TemplateMetadata 70 | choices_in_prompt: false 71 | languages: 72 | - en 73 | metrics: 74 | - Accuracy 75 | original_task: false 76 | name: is_explicit_query 77 | reference: '' 78 | -------------------------------------------------------------------------------- /promptsource/templates/blbooksgenre/title_genre_classifiction/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: blbooksgenre 2 | subset: title_genre_classifiction 3 | templates: 4 | 0c3e83f4-7f4d-4eca-8f80-6b6bdd8eeedd: !Template 5 | answer_choices: Fiction ||| Non-fiction 6 | id: 0c3e83f4-7f4d-4eca-8f80-6b6bdd8eeedd 7 | jinja: "Given the title: {{title}}, which of the following genres is the book?\n\ 8 | (a) {{ answer_choices[0] }}\n(b) {{ answer_choices[1] }}\n|||\n {{ answer_choices[label]\ 9 | \ }}" 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: true 12 | languages: 13 | - en 14 | metrics: 15 | - Accuracy 16 | - AUC 17 | original_task: true 18 | name: multi-choice 19 | reference: '' 20 | 5564acb9-c911-4d71-ba4d-add444aaf1e3: !Template 21 | answer_choices: True ||| False 22 | id: 5564acb9-c911-4d71-ba4d-add444aaf1e3 23 | jinja: "{{title}} is the title of a fictional book, True or False?\nAnswer: \n\ 24 | |||\n{{ answer_choices[label] }}" 25 | metadata: !TemplateMetadata 26 | choices_in_prompt: true 27 | languages: 28 | - en 29 | metrics: 30 | - Accuracy 31 | - AUC 32 | original_task: true 33 | name: premise_context_first 34 | reference: '' 35 | afc18daa-999d-495f-908a-d99477f6f5ac: !Template 36 | answer_choices: True ||| False 37 | id: afc18daa-999d-495f-908a-d99477f6f5ac 38 | jinja: "The following is the title of a fictional book, True or False?\n{{title}}\n\ 39 | Answer: \n|||\n{{ answer_choices[label] }}" 40 | metadata: !TemplateMetadata 41 | choices_in_prompt: true 42 | languages: 43 | - en 44 | metrics: 45 | - Accuracy 46 | - AUC 47 | original_task: true 48 | name: premise 49 | reference: '' 50 | cf4b6ce0-ff87-4c7a-9b9e-ec7c4cf741d8: !Template 51 | answer_choices: Fiction ||| Non-fiction 52 | id: cf4b6ce0-ff87-4c7a-9b9e-ec7c4cf741d8 53 | jinja: The genre of the book "{{title}}" is ||| {{ answer_choices[label] }} 54 | metadata: !TemplateMetadata 55 | choices_in_prompt: false 56 | languages: 57 | - en 58 | metrics: 59 | - Accuracy 60 | - AUC 61 | original_task: true 62 | name: classify 63 | reference: '' 64 | -------------------------------------------------------------------------------- /promptsource/templates/blended_skill_talk/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: blended_skill_talk 2 | templates: 3 | 54f785e9-453a-4ffe-8181-28095e3f2b80: !Template 4 | answer_choices: null 5 | id: 54f785e9-453a-4ffe-8181-28095e3f2b80 6 | jinja: "Given the below conversation between two people, what would the listener\ 7 | \ say?\n\nA: {{previous_utterance[0]}}\n\nB: {{previous_utterance[1]}}\n{% for\ 8 | \ message_f, message_g in zip(free_messages[:-1], guided_messages[:-1]) %}\n\ 9 | A: {{message_f}}\n\nB: {{message_g}}\n{% endfor %} \nA: {{free_messages[-1]}}\n\ 10 | \nB: \n|||\n{{guided_messages[-1]}}" 11 | metadata: !TemplateMetadata 12 | choices_in_prompt: false 13 | languages: 14 | - en 15 | metrics: 16 | - BLEU 17 | - ROUGE 18 | original_task: false 19 | name: guess-last-utterance 20 | reference: '' 21 | 58f4e068-26fa-4843-a1d6-54bde324e780: !Template 22 | answer_choices: Yes ||| No 23 | id: 58f4e068-26fa-4843-a1d6-54bde324e780 24 | jinja: "Two people are having a conversation. Are the utterances in the correct\ 25 | \ order? \n\nYour answer should be either \"Yes\" or \"No\".\n{% if range(0,\ 26 | \ 2) | choice %}\nA: {{previous_utterance[0]}}\n\nB: {{previous_utterance[1]}}\n\ 27 | {% for message_f, message_g in zip(free_messages, guided_messages) %}\nA: {{message_f}}\n\ 28 | \nB: {{message_g}}\n{% endfor %} \n\n|||\nYes.\n{% else %}\nA: {{previous_utterance[1]}}\n\ 29 | \nB: {{previous_utterance[0]}}\n{% for message_f, message_g in zip(guided_messages,\ 30 | \ free_messages) %}\nA: {{message_f}}\n\nB: {{message_g}}\n{% endfor %} \n\n\ 31 | |||\nNo.\n{% endif %}" 32 | metadata: !TemplateMetadata 33 | choices_in_prompt: true 34 | languages: 35 | - en 36 | metrics: 37 | - Accuracy 38 | original_task: false 39 | name: guess-correct-order 40 | reference: '' 41 | 8792b63e-7217-40fe-8130-7392baca3519: !Template 42 | answer_choices: null 43 | id: 8792b63e-7217-40fe-8130-7392baca3519 44 | jinja: "Two people are talking to each other. What do you think Person A said\ 45 | \ in the beginning?\n\nPerson B: {{previous_utterance[1]}}\n{% for message_f,\ 46 | \ message_g in zip(free_messages, guided_messages) %}\nPerson A: {{message_f}}\n\ 47 | \nPerson B: {{message_g}}\n{% endfor %} \n|||\n{{previous_utterance[0]}}\n" 48 | metadata: !TemplateMetadata 49 | choices_in_prompt: false 50 | languages: 51 | - en 52 | metrics: 53 | - BLEU 54 | - ROUGE 55 | original_task: false 56 | name: guess-first-utterance 57 | reference: '' 58 | -------------------------------------------------------------------------------- /promptsource/templates/cbt/raw/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: cbt 2 | subset: raw 3 | templates: 4 | 4906fc72-c879-4f0a-b7ae-c6379a63e32c: !Template 5 | answer_choices: null 6 | id: 4906fc72-c879-4f0a-b7ae-c6379a63e32c 7 | jinja: 'Guess the author for the book: "{{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'','' 8 | '')}}" 9 | 10 | ||| 11 | 12 | {{title.split(''___'')[0]|replace(''_'','' '')}}' 13 | metadata: !TemplateMetadata 14 | choices_in_prompt: false 15 | languages: 16 | - en 17 | metrics: 18 | - Other 19 | original_task: false 20 | name: Given Title Guess Author 21 | reference: Given the title, guess the author of the book. 22 | 5172f015-f022-4c3b-89e9-607467e29012: !Template 23 | answer_choices: null 24 | id: 5172f015-f022-4c3b-89e9-607467e29012 25 | jinja: 'Suggest a book written by {{title.split(''___'')[0]|replace(''_'','' '')}}. 26 | 27 | ||| 28 | 29 | {{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'','' '')}}' 30 | metadata: !TemplateMetadata 31 | choices_in_prompt: false 32 | languages: 33 | - en 34 | metrics: 35 | - Other 36 | original_task: false 37 | name: Given Author Recommend Book 38 | reference: Given the author name, recommend one of his books. 39 | 82c63934-1f33-4e6f-af59-af570b3e2e4c: !Template 40 | answer_choices: null 41 | id: 82c63934-1f33-4e6f-af59-af570b3e2e4c 42 | jinja: 'Who wrote "{{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'','' 43 | '')}}"? 44 | 45 | ||| 46 | 47 | {{title.split(''___'')[0]|replace(''_'','' '')}}' 48 | metadata: !TemplateMetadata 49 | choices_in_prompt: false 50 | languages: 51 | - en 52 | metrics: 53 | - Other 54 | original_task: false 55 | name: Who wrote 56 | reference: Given the title, guess the author of the title. 57 | d407406e-ed5c-4f1f-bca8-b1f511e5fa53: !Template 58 | answer_choices: null 59 | id: d407406e-ed5c-4f1f-bca8-b1f511e5fa53 60 | jinja: '{{ content }} 61 | 62 | 63 | Guess the author for the above story. 64 | 65 | ||| 66 | 67 | {{title.split(''___'')[0]|replace(''_'','' '')}}' 68 | metadata: !TemplateMetadata 69 | choices_in_prompt: false 70 | languages: 71 | - en 72 | metrics: 73 | - Other 74 | original_task: false 75 | name: Given Story Guess Author 76 | reference: Given the story, guess the author. 77 | f4e1d9bb-a43e-4c75-aa5d-4711090dd628: !Template 78 | answer_choices: null 79 | id: f4e1d9bb-a43e-4c75-aa5d-4711090dd628 80 | jinja: '{{ content }} 81 | 82 | 83 | Write a title for the above story. 84 | 85 | ||| 86 | 87 | {{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'','' '')}}' 88 | metadata: !TemplateMetadata 89 | choices_in_prompt: false 90 | languages: 91 | - en 92 | metrics: 93 | - BLEU 94 | - ROUGE 95 | original_task: false 96 | name: Given Story Write Title 97 | reference: Given the story, write a title. 98 | -------------------------------------------------------------------------------- /promptsource/templates/code_x_glue_tc_text_to_code/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: code_x_glue_tc_text_to_code 2 | templates: 3 | eb965448-691e-4506-bb61-a54771c7014b: !Template 4 | answer_choices: null 5 | id: eb965448-691e-4506-bb61-a54771c7014b 6 | jinja: "{% set field_seperator = \"concode_field_sep\" %}\n{% set method_seperator\ 7 | \ = \"concode_elem_sep\" %}\n{% set ns = namespace(nl=\"\", fields=[], methods=[])\ 8 | \ %}\n{% if code | length > 0 %}\n\n {% for chunk_a in nl.split(field_seperator)\ 9 | \ %}\n {% set first_iter = loop.index0 == 0 %}\n {% for chunk_b\ 10 | \ in chunk_a.split(method_seperator) %}\n {% if loop.index0 == 0\ 11 | \ and first_iter %}\n {% set ns.nl = chunk_b %}\n \ 12 | \ {% elif loop.index0 == 0 %}\n {% set ns.fields = ns.fields\ 13 | \ + [chunk_b.strip()] %}\n {% else %}\n {% set ns.methods\ 14 | \ = ns.methods + [chunk_b.strip()] %}\n {% endif %}\n {% endfor\ 15 | \ %}\n {% endfor %}\n Method description:\n {{ns.nl}}\n\n Class\ 16 | \ fields:\n {{ns.fields | unique | join(\", \")}}\n\n Class methods:\n\ 17 | \ {{ns.methods | unique | join(\", \")}}\n\n Generate the method\n \ 18 | \ |||\n {{code}}\n{% endif %}" 19 | metadata: !TemplateMetadata 20 | choices_in_prompt: false 21 | languages: 22 | - en 23 | metrics: 24 | - Accuracy 25 | - BLEU 26 | original_task: true 27 | name: generate class member function given class environment 28 | reference: '' 29 | -------------------------------------------------------------------------------- /promptsource/templates/commonsense_qa/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: commonsense_qa 2 | templates: 3 | 1e1d0ce1-b0ea-4ad8-9971-b2b44948123b: !Template 4 | answer_choices: '{{choices.text | join("|||")}}' 5 | id: 1e1d0ce1-b0ea-4ad8-9971-b2b44948123b 6 | jinja: '{% if answerKey != "" %} 7 | 8 | Answer the following question: 9 | 10 | {{question}} ||| 11 | 12 | {{ answer_choices[choices[''label''].index(answerKey)] }} 13 | 14 | {% endif %}' 15 | metadata: !TemplateMetadata 16 | choices_in_prompt: false 17 | languages: 18 | - en 19 | metrics: 20 | - Accuracy 21 | original_task: true 22 | name: answer_given_question_without_options 23 | reference: '' 24 | 41188da5-c16a-4c6b-89af-6ce6815aedc6: !Template 25 | answer_choices: '{{choices.text | join("|||")}}' 26 | id: 41188da5-c16a-4c6b-89af-6ce6815aedc6 27 | jinja: '{% if answerKey != "" %} 28 | 29 | {{question}} 30 | 31 | 32 | - {{answer_choices | join("\n- ")}} ||| 33 | 34 | {{answer_choices[choices[''label''].index(answerKey)] }} 35 | 36 | {% endif %}' 37 | metadata: !TemplateMetadata 38 | choices_in_prompt: true 39 | languages: 40 | - en 41 | metrics: 42 | - Accuracy 43 | original_task: true 44 | name: question_answering 45 | reference: '' 46 | 42fca80b-b614-4288-aad2-2525360543cb: !Template 47 | answer_choices: A ||| B ||| C ||| D 48 | id: 42fca80b-b614-4288-aad2-2525360543cb 49 | jinja: '{% if answerKey != "" %} 50 | 51 | Given the following options, what do you think is the correct answer to the 52 | question below: 53 | 54 | 55 | {{question}} 56 | 57 | 58 | Options: 59 | 60 | {% for letter, t in zip(answer_choices, choices.text) %} 61 | 62 | - {{letter}}: {{t}} 63 | 64 | {% endfor %} ||| 65 | 66 | {{answerKey}} 67 | 68 | {% endif %}' 69 | metadata: !TemplateMetadata 70 | choices_in_prompt: true 71 | languages: 72 | - en 73 | metrics: 74 | - Accuracy 75 | original_task: true 76 | name: question_to_answer_index 77 | reference: '' 78 | 8e3f63fa-9ae6-4105-bd51-874b5e1d6b8e: !Template 79 | answer_choices: '{{choices.text | join("|||")}}' 80 | id: 8e3f63fa-9ae6-4105-bd51-874b5e1d6b8e 81 | jinja: '{% if answerKey != "" %} 82 | 83 | Given the options below, select the most suitable answer for the following question: 84 | 85 | {{question}} 86 | 87 | Options: 88 | 89 | - {{answer_choices | join("\n- ")}}||| 90 | 91 | {{answer_choices[choices["label"].index(answerKey)]}} 92 | 93 | {% endif %}' 94 | metadata: !TemplateMetadata 95 | choices_in_prompt: true 96 | languages: 97 | - en 98 | metrics: 99 | - Accuracy 100 | original_task: true 101 | name: most_suitable_answer 102 | reference: '' 103 | bc718994-1d3e-4ae4-b65b-be307154b0a6: !Template 104 | answer_choices: null 105 | id: bc718994-1d3e-4ae4-b65b-be307154b0a6 106 | jinja: '{% if answerKey != "" %} 107 | 108 | Use the following options to predict a possible question for them: 109 | 110 | 111 | {% for i in range(choices[''text'']|length) %} 112 | 113 | - {{choices[''text''][i]}} 114 | 115 | {% endfor %} ||| 116 | 117 | {{question}} 118 | 119 | {% endif %}' 120 | metadata: !TemplateMetadata 121 | choices_in_prompt: false 122 | languages: 123 | - en 124 | metrics: 125 | - BLEU 126 | - ROUGE 127 | original_task: false 128 | name: answer_to_question 129 | reference: '' 130 | -------------------------------------------------------------------------------- /promptsource/templates/cord19/metadata/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: cord19 2 | subset: metadata 3 | templates: 4 | 10d78ae0-635d-4cf3-8e24-61c879fd6ae7: !Template 5 | answer_choices: null 6 | id: 10d78ae0-635d-4cf3-8e24-61c879fd6ae7 7 | jinja: 'Write a scientific title for the following abstract: {{abstract}} 8 | 9 | Title:||| 10 | 11 | {{ title }}' 12 | metadata: !TemplateMetadata 13 | choices_in_prompt: false 14 | languages: 15 | - en 16 | metrics: 17 | - BLEU 18 | - ROUGE 19 | original_task: false 20 | name: title_generation_from_following_abstract 21 | reference: '' 22 | 1821279d-37a7-42f0-ab0c-2a5589a2a7c3: !Template 23 | answer_choices: null 24 | id: 1821279d-37a7-42f0-ab0c-2a5589a2a7c3 25 | jinja: "Title: {{title}}\nGenerate a plausible scientific abstract for a scientific\ 26 | \ paper on Covid-19 with the previous title |||\n {{ abstract }}" 27 | metadata: !TemplateMetadata 28 | choices_in_prompt: false 29 | languages: 30 | - en 31 | metrics: 32 | - BLEU 33 | - ROUGE 34 | original_task: false 35 | name: abstract_generation_from_previous_title 36 | reference: '' 37 | 21fc3c51-5168-4abb-b969-81a115f2f568: !Template 38 | answer_choices: null 39 | id: 21fc3c51-5168-4abb-b969-81a115f2f568 40 | jinja: 'Write a scientific abstract for a paper on Covid-19 with the following 41 | title: {{title}} 42 | 43 | Abstract:||| 44 | 45 | {{ abstract }}' 46 | metadata: !TemplateMetadata 47 | choices_in_prompt: false 48 | languages: 49 | - en 50 | metrics: 51 | - BLEU 52 | - ROUGE 53 | original_task: false 54 | name: abstract_generation_from_following_title 55 | reference: '' 56 | 6a2ebf64-9db7-41f0-85a5-379270c54fa6: !Template 57 | answer_choices: null 58 | id: 6a2ebf64-9db7-41f0-85a5-379270c54fa6 59 | jinja: "Abstract: \n{{abstract}}\nWhat could a scientific title be for this abstract\ 60 | \ on Covid-19?\nTitle:|||\n{{ title }}" 61 | metadata: !TemplateMetadata 62 | choices_in_prompt: false 63 | languages: 64 | - en 65 | metrics: 66 | - BLEU 67 | - ROUGE 68 | original_task: false 69 | name: title_generation_from_previous_abstract 70 | reference: '' 71 | c895c4f1-d5e1-4a07-9ae9-0268c218e526: !Template 72 | answer_choices: null 73 | id: c895c4f1-d5e1-4a07-9ae9-0268c218e526 74 | jinja: 'Write a scientific abstract for a research paper on Coronavirus disease 75 | with the following title: {{title}} 76 | 77 | Abstract:||| 78 | 79 | {{ abstract }}' 80 | metadata: !TemplateMetadata 81 | choices_in_prompt: false 82 | languages: 83 | - en 84 | metrics: 85 | - BLEU 86 | - ROUGE 87 | original_task: false 88 | name: abstract_generation_on_coronavirus 89 | reference: '' 90 | -------------------------------------------------------------------------------- /promptsource/templates/covid_qa_castorini/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: covid_qa_castorini 2 | templates: 3 | 481dcd72-1674-4962-b711-0dbf146ae836: !Template 4 | answer_choices: null 5 | id: 481dcd72-1674-4962-b711-0dbf146ae836 6 | jinja: 'Generate a question whose answer could be found within the following papers 7 | (only titles have been provided): 8 | 9 | 10 | {{answers["title"]|join(", ")}} ||| 11 | 12 | {{question_query}}' 13 | metadata: !TemplateMetadata 14 | choices_in_prompt: false 15 | languages: 16 | - en 17 | metrics: 18 | - BLEU 19 | - ROUGE 20 | original_task: false 21 | name: papers_to_qn 22 | reference: '' 23 | 56915f43-ebd6-44dc-9aac-6098ec2d1b32: !Template 24 | answer_choices: null 25 | id: 56915f43-ebd6-44dc-9aac-6098ec2d1b32 26 | jinja: 'Provide the keyword form of the following query: 27 | 28 | 29 | {{question_query}} ||| 30 | 31 | {{keyword_query}}' 32 | metadata: !TemplateMetadata 33 | choices_in_prompt: false 34 | languages: 35 | - en 36 | metrics: 37 | - BLEU 38 | - ROUGE 39 | original_task: false 40 | name: keyword_form 41 | reference: '' 42 | 665bfa4a-b83f-4431-acda-29855c89916b: !Template 43 | answer_choices: null 44 | id: 665bfa4a-b83f-4431-acda-29855c89916b 45 | jinja: 'Generate a possible question to the following answers: 46 | 47 | 48 | {{answers["exact_answer"]|join(", ")}} ||| 49 | 50 | {{question_query}}' 51 | metadata: !TemplateMetadata 52 | choices_in_prompt: false 53 | languages: 54 | - en 55 | metrics: 56 | - BLEU 57 | - ROUGE 58 | original_task: false 59 | name: answers_to_qn 60 | reference: '' 61 | -------------------------------------------------------------------------------- /promptsource/templates/craffel/openai_lambada/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: craffel/openai_lambada 2 | templates: 3 | 1ee5ddef-fffb-4b73-a2f7-f600ffac63cb: !Template 4 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 5 | id: 1ee5ddef-fffb-4b73-a2f7-f600ffac63cb 6 | jinja: '{{ text.split()[:-1] | join('' '') }}... 7 | 8 | 9 | What comes after the ellipses? ||| {{ text.split()[-1] }}' 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: false 12 | languages: 13 | - en 14 | metrics: 15 | - Accuracy 16 | original_task: true 17 | name: ellipses 18 | reference: '' 19 | 4f08e9d4-bcff-4bc0-9902-87c497625d17: !Template 20 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 21 | id: 4f08e9d4-bcff-4bc0-9902-87c497625d17 22 | jinja: 'Fill in the blank: 23 | 24 | 25 | {{ text.split()[:-1] | join('' '') }} ____. ||| {{ text.split()[-1] }}' 26 | metadata: !TemplateMetadata 27 | choices_in_prompt: false 28 | languages: 29 | - en 30 | metrics: 31 | - Accuracy 32 | original_task: true 33 | name: GPT-3 style 34 | reference: Brown et al. 35 | 507de732-8298-4971-bac3-7d768d511a31: !Template 36 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 37 | id: 507de732-8298-4971-bac3-7d768d511a31 38 | jinja: '{{ text.split()[:-1] | join('' '') }} ____. 39 | 40 | 41 | Fill in the ____: ||| {{ text.split()[-1] }}' 42 | metadata: !TemplateMetadata 43 | choices_in_prompt: false 44 | languages: 45 | - en 46 | metrics: 47 | - Accuracy 48 | original_task: true 49 | name: fill in the ____ 50 | reference: '' 51 | 774b4349-0524-4a34-881b-b344f8f5c34e: !Template 52 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 53 | id: 774b4349-0524-4a34-881b-b344f8f5c34e 54 | jinja: 'This story got cut short. What comes next? 55 | 56 | 57 | {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}' 58 | metadata: !TemplateMetadata 59 | choices_in_prompt: false 60 | languages: 61 | - en 62 | metrics: 63 | - Accuracy 64 | original_task: true 65 | name: what comes next 66 | reference: '' 67 | ef072a60-252e-4c52-aa8a-4152bb4dd83c: !Template 68 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 69 | id: ef072a60-252e-4c52-aa8a-4152bb4dd83c 70 | jinja: 'Please predict the next word after the following chunk of text. 71 | 72 | 73 | {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}' 74 | metadata: !TemplateMetadata 75 | choices_in_prompt: false 76 | languages: 77 | - en 78 | metrics: 79 | - Accuracy 80 | original_task: true 81 | name: please next word 82 | reference: '' 83 | -------------------------------------------------------------------------------- /promptsource/templates/dream/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: dream 2 | templates: 3 | 024906f3-2503-451f-a0ce-2c9faf90e6c5: !Template 4 | answer_choices: null 5 | id: 024906f3-2503-451f-a0ce-2c9faf90e6c5 6 | jinja: 'Read the below conversation. 7 | 8 | 9 | {{dialogue[:-1] | join("\n\n")}} 10 | 11 | 12 | What would the listener say? 13 | 14 | ||| 15 | 16 | {{dialogue[-1]}}' 17 | metadata: !TemplateMetadata 18 | choices_in_prompt: false 19 | languages: 20 | - en 21 | metrics: 22 | - BLEU 23 | - ROUGE 24 | original_task: false 25 | name: generate-last-utterance 26 | reference: '' 27 | 5c53fe97-b8b9-4c91-bd75-b3f8e056bd01: !Template 28 | answer_choices: null 29 | id: 5c53fe97-b8b9-4c91-bd75-b3f8e056bd01 30 | jinja: 'Given the question "{{question}}" and the answer "{{answer}}", write a 31 | conversation that might have happened. 32 | 33 | ||| 34 | 35 | {{dialogue | join("\n\n")}}' 36 | metadata: !TemplateMetadata 37 | choices_in_prompt: false 38 | languages: 39 | - en 40 | metrics: 41 | - BLEU 42 | - ROUGE 43 | original_task: false 44 | name: answer-to-dialogue 45 | reference: '' 46 | 70865a35-1db3-45bc-8b08-baf1d9d0be9d: !Template 47 | answer_choices: null 48 | id: 70865a35-1db3-45bc-8b08-baf1d9d0be9d 49 | jinja: '{{dialogue[1:] | join("\n\n")}} 50 | 51 | 52 | What was said before this conversation? 53 | 54 | ||| 55 | 56 | {{dialogue[0]}}' 57 | metadata: !TemplateMetadata 58 | choices_in_prompt: false 59 | languages: 60 | - en 61 | metrics: 62 | - BLEU 63 | - ROUGE 64 | original_task: false 65 | name: generate-first-utterance 66 | reference: '' 67 | 8f962580-1611-4982-b567-05939c5012ff: !Template 68 | answer_choices: '{{choice | join("|||")}}' 69 | id: 8f962580-1611-4982-b567-05939c5012ff 70 | jinja: "Dialogue:\n\n{{dialogue | join(\"\\n\\n\")}}\n\nQuestion: {{question}}\ 71 | \ \n\n- {{answer_choices[0]}}\n\n- {{answer_choices[1]}}\n\n- {{answer_choices[2]}}\n\ 72 | |||\n{{answer}}" 73 | metadata: !TemplateMetadata 74 | choices_in_prompt: true 75 | languages: 76 | - en 77 | metrics: 78 | - Accuracy 79 | original_task: true 80 | name: baseline 81 | reference: https://dataset.org/dream/ 82 | d4687975-664d-46ac-b13b-482a35a61ab3: !Template 83 | answer_choices: '{{choice | join("|||")}}' 84 | id: d4687975-664d-46ac-b13b-482a35a61ab3 85 | jinja: "Read the following conversation and answer the question.\n\n{{dialogue\ 86 | \ | join(\"\\n\\n\")}}\n\nQuestion: {{question}} \n\n- {{answer_choices[0]}}\n\ 87 | \n- {{answer_choices[1]}}\n\n- {{answer_choices[2]}}\n|||\n{{answer}}" 88 | metadata: !TemplateMetadata 89 | choices_in_prompt: true 90 | languages: 91 | - en 92 | metrics: 93 | - Accuracy 94 | original_task: true 95 | name: read_the_following_conversation_and_answer_the_question 96 | reference: '' 97 | -------------------------------------------------------------------------------- /promptsource/templates/drop/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: drop 2 | templates: 3 | 350e0c24-b10c-4156-9053-a0b2d4af4214: !Template 4 | answer_choices: null 5 | id: 350e0c24-b10c-4156-9053-a0b2d4af4214 6 | jinja: 'Question: {{question}} 7 | 8 | Answer based on following passage. 9 | 10 | 11 | {{passage}} 12 | 13 | 14 | Answer: 15 | 16 | ||| {{ answers_spans.spans | join(", ") }}' 17 | metadata: !TemplateMetadata 18 | choices_in_prompt: false 19 | languages: 20 | - en 21 | metrics: 22 | - Accuracy 23 | - Other 24 | original_task: true 25 | name: question context answer 26 | reference: Reading Comprehension with KB 27 | 79c0d600-8d49-4628-b1c1-d472fb762fa2: !Template 28 | answer_choices: null 29 | id: 79c0d600-8d49-4628-b1c1-d472fb762fa2 30 | jinja: "I am trying to figure out the answer to the question, \"{{question}}\"\ 31 | \ I found the following text-snippet has the answer. Can you tell me the answer?\n\ 32 | \n{{passage}} \n\n||| {{ answers_spans.spans | join(\", \") }}\n" 33 | metadata: !TemplateMetadata 34 | choices_in_prompt: false 35 | languages: 36 | - en 37 | metrics: 38 | - Accuracy 39 | - Other 40 | original_task: true 41 | name: can you tell me 42 | reference: Reading Comprehension with KB 43 | ab58cc42-a558-4709-8a73-30194fcf9fa2: !Template 44 | answer_choices: null 45 | id: ab58cc42-a558-4709-8a73-30194fcf9fa2 46 | jinja: 'Passage: {{passage}} 47 | 48 | Question: {{question}} 49 | 50 | Answer: ||| {{ answers_spans.spans | join(", ") }}' 51 | metadata: !TemplateMetadata 52 | choices_in_prompt: false 53 | languages: 54 | - en 55 | metrics: 56 | - Accuracy 57 | - Other 58 | original_task: true 59 | name: DROP GPT3 60 | reference: Prompt format from GPT3 - Table G20 61 | ad649b92-59ad-44a9-b328-7bbab49b104f: !Template 62 | answer_choices: null 63 | id: ad649b92-59ad-44a9-b328-7bbab49b104f 64 | jinja: 'Generate a question from the following passage that has the answer, {{ 65 | answers_spans.spans | join(", ") }} 66 | 67 | Passage : {{passage}} 68 | 69 | Question : 70 | 71 | ||| 72 | 73 | {{question}}' 74 | metadata: !TemplateMetadata 75 | choices_in_prompt: false 76 | languages: 77 | - en 78 | metrics: 79 | - BLEU 80 | - ROUGE 81 | original_task: false 82 | name: generate_question_with_passage_and_answer 83 | reference: '' 84 | e9bba528-7782-4f2b-a431-7601f8258628: !Template 85 | answer_choices: null 86 | id: e9bba528-7782-4f2b-a431-7601f8258628 87 | jinja: 'Context: {{passage}} 88 | 89 | I am trying to figure out the answer to the question from the above context. Can 90 | you tell me the answer? 91 | 92 | Question: {{question}} 93 | 94 | Answer: 95 | 96 | ||| {{ answers_spans.spans | join(", ") }}' 97 | metadata: !TemplateMetadata 98 | choices_in_prompt: false 99 | languages: 100 | - en 101 | metrics: 102 | - Accuracy 103 | - Other 104 | original_task: true 105 | name: context question answer 106 | reference: Reading Comprehension with KB 107 | -------------------------------------------------------------------------------- /promptsource/templates/enriched_web_nlg/en/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: enriched_web_nlg 2 | subset: en 3 | templates: 4 | 3860d7fb-0b50-4275-a7ab-782ae86756e5: !Template 5 | answer_choices: null 6 | id: 3860d7fb-0b50-4275-a7ab-782ae86756e5 7 | jinja: 'Take the following triple set as part of a Data-to-Text task: {{modified_triple_sets.mtriple_set[0] 8 | | join(", ")}}. Make a lexicalization of the triple set into plain text. ||| 9 | {{lex.text | choice}}' 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: false 12 | languages: 13 | - en 14 | metrics: 15 | - BLEU 16 | - ROUGE 17 | original_task: true 18 | name: Non-explicit Description 19 | reference: 'Triple set: lexicalization' 20 | 5c203187-70e4-4913-86af-8b00b5ca9e16: !Template 21 | answer_choices: null 22 | id: 5c203187-70e4-4913-86af-8b00b5ca9e16 23 | jinja: 'Verbalize the following triples separated by a comma: {{modified_triple_sets.mtriple_set[0] 24 | | join(", ")}} ||| {{lex.text | choice}}' 25 | metadata: !TemplateMetadata 26 | choices_in_prompt: false 27 | languages: 28 | - en 29 | metrics: 30 | - BLEU 31 | - ROUGE 32 | original_task: true 33 | name: Verbalize + Specify Commas 34 | reference: 'Instruction: verbalization' 35 | 715a885b-1022-43b0-bcfe-20fa432314da: !Template 36 | answer_choices: null 37 | id: 715a885b-1022-43b0-bcfe-20fa432314da 38 | jinja: 'Take the following graph comprising triple sets, where each element of 39 | a triple is separated by "|" and each triple set by ",": {{modified_triple_sets.mtriple_set[0] 40 | | join(", ")}}. Make a verbalization of the triple set into plain text. ||| 41 | {{lex.text | choice}}' 42 | metadata: !TemplateMetadata 43 | choices_in_prompt: false 44 | languages: 45 | - en 46 | metrics: 47 | - BLEU 48 | - ROUGE 49 | original_task: true 50 | name: Explicit Graph Description 51 | reference: 'Explicit Graph Description: verbalization.' 52 | e80f68dd-ebd0-4cbc-960d-bb28aff2d2d4: !Template 53 | answer_choices: null 54 | id: e80f68dd-ebd0-4cbc-960d-bb28aff2d2d4 55 | jinja: 'Take the following Table to text task comprising semantic triples (RDF 56 | triples), where each element of a triple is separated by "|" and each triple 57 | set by ",": {{modified_triple_sets.mtriple_set[0] | join(", ")}}. Make a verbalization 58 | of the triple set into plain text, which fully and accurately describes the 59 | Table. ||| {{lex.text | choice}}' 60 | metadata: !TemplateMetadata 61 | choices_in_prompt: false 62 | languages: 63 | - en 64 | metrics: 65 | - BLEU 66 | - ROUGE 67 | original_task: true 68 | name: Explicit Table-to-Text Description 69 | reference: 'Explicit Table description: verbalization.' 70 | -------------------------------------------------------------------------------- /promptsource/templates/fever/v1.0/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: fever 2 | subset: v1.0 3 | templates: 4 | 0870481e-e5d1-43a1-821e-b11c6bfd2483: !Template 5 | answer_choices: Yes|||No|||Not sure 6 | id: 0870481e-e5d1-43a1-821e-b11c6bfd2483 7 | jinja: "{{claim}} Is this true?\n|||\n{% if label != \"\" %}\n{{\n{\"SUPPORTS\"\ 8 | : \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Not sure\"\n}[label]\n\ 9 | }}\n{% endif %}" 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: false 12 | languages: 13 | - en 14 | metrics: 15 | - Accuracy 16 | original_task: false 17 | name: cbqa_fever_postprompt 18 | reference: CBQA fever, prompt after claim 19 | 51c55af8-1996-4cb2-88a1-ca7ddb8f9e11: !Template 20 | answer_choices: Yes|||No|||Not Sure 21 | id: 51c55af8-1996-4cb2-88a1-ca7ddb8f9e11 22 | jinja: "I've heard that {{claim}} Is this correct? Yes, No or Not Sure?\n|||\n\ 23 | {% if label != \"\" %}\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\ 24 | \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}" 25 | metadata: !TemplateMetadata 26 | choices_in_prompt: true 27 | languages: 28 | - en 29 | metrics: 30 | - Accuracy 31 | original_task: false 32 | name: cbqa_fever_dialog_style_surrounded_all_class 33 | reference: CBQA fever, like a conversation, with prompts surrounding claim, all 34 | class included. 35 | 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdf: !Template 36 | answer_choices: Yes|||No|||Unsure 37 | id: 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdf 38 | jinja: "Is this statement correct? {{claim}} ||| \n{% if label != \"\" %}\n{{\n\ 39 | {\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Unsure\"\ 40 | \n}[label]\n}}\n{% endif %}" 41 | metadata: !TemplateMetadata 42 | choices_in_prompt: false 43 | languages: 44 | - en 45 | metrics: 46 | - Accuracy 47 | original_task: false 48 | name: cbqa_fever_preprompt 49 | reference: Closed-book QA from only the claim, prompt before the content 50 | 948f41ab-e6bb-4de6-af3e-7f0b5d5f39a8: !Template 51 | answer_choices: Yes|||No|||Maybe 52 | id: 948f41ab-e6bb-4de6-af3e-7f0b5d5f39a8 53 | jinja: "\"{{claim}}\" Yes, no, maybe?\n|||\n{% if label != \"\" %}\n{{\n{\"SUPPORTS\"\ 54 | : \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Maybe\"\n}[label]\n\ 55 | }}\n{% endif %}\n" 56 | metadata: !TemplateMetadata 57 | choices_in_prompt: false 58 | languages: 59 | - en 60 | metrics: 61 | - Accuracy 62 | original_task: false 63 | name: cbqa_fever_short 64 | reference: CBQA fever, minimal 65 | b1d8f035-c3af-41a8-b0b8-1604f9dc00ff: !Template 66 | answer_choices: Yes|||No|||Not Sure 67 | id: b1d8f035-c3af-41a8-b0b8-1604f9dc00ff 68 | jinja: "\"{{claim}}\", I have heard. Is this Correct? Yes, No or Not Sure?\n|||\n\ 69 | {% if label != \"\" %}\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\ 70 | \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}" 71 | metadata: !TemplateMetadata 72 | choices_in_prompt: true 73 | languages: 74 | - en 75 | metrics: 76 | - Accuracy 77 | original_task: false 78 | name: cbqa_fever_dialog_style_postprompt_all_class 79 | reference: CBQA fever, like a conversation, prompt after output. Includes 3 class. 80 | -------------------------------------------------------------------------------- /promptsource/templates/fever/v2.0/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: fever 2 | subset: v2.0 3 | templates: 4 | 0870481e-e5d1-43a1-821e-b11c6bfd248a: !Template 5 | answer_choices: Yes|||No|||Not sure 6 | id: 0870481e-e5d1-43a1-821e-b11c6bfd248a 7 | jinja: "{{claim}} Is this true?\n|||\n{% if label != \"\" %}\n{{\n{\"SUPPORTS\"\ 8 | : \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Not sure\"\n}[label]\n\ 9 | }}\n{% endif %}" 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: false 12 | languages: 13 | - en 14 | metrics: 15 | - Accuracy 16 | original_task: false 17 | name: cbqa_fever_postprompt 18 | reference: CBQA fever, prompt after claim 19 | 51c55af8-1996-4cb2-88a1-ca7ddb8f9e1b: !Template 20 | answer_choices: Yes|||No|||Not Sure 21 | id: 51c55af8-1996-4cb2-88a1-ca7ddb8f9e1b 22 | jinja: "I've heard that {{claim}} Is this correct? Yes, No or Not Sure?\n|||\n\ 23 | {% if label != \"\" %}\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\ 24 | \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}" 25 | metadata: !TemplateMetadata 26 | choices_in_prompt: true 27 | languages: 28 | - en 29 | metrics: 30 | - Accuracy 31 | original_task: false 32 | name: cbqa_fever_dialog_style_surrounded_all_class 33 | reference: CBQA fever, like a conversation, with prompts surrounding claim, all 34 | class included. 35 | 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdc: !Template 36 | answer_choices: Yes|||No|||Unsure 37 | id: 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdc 38 | jinja: "Is this statement correct? {{claim}} ||| \n{% if label != \"\" %}\n{{\n\ 39 | {\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Unsure\"\ 40 | \n}[label]\n}}\n{% endif %}" 41 | metadata: !TemplateMetadata 42 | choices_in_prompt: false 43 | languages: 44 | - en 45 | metrics: 46 | - Accuracy 47 | original_task: false 48 | name: cbqa_fever_preprompt 49 | reference: Closed-book QA from only the claim, prompt before the content 50 | 948f41ab-e6bb-4de6-af3e-7f0b5d5f39ad: !Template 51 | answer_choices: Yes|||No|||Maybe 52 | id: 948f41ab-e6bb-4de6-af3e-7f0b5d5f39ad 53 | jinja: "\"{{claim}}\" Yes, no, maybe?\n|||\n{% if label != \"\" %}\n{{\n{\"SUPPORTS\"\ 54 | : \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Maybe\"\n}[label]\n\ 55 | }}\n{% endif %}\n" 56 | metadata: !TemplateMetadata 57 | choices_in_prompt: false 58 | languages: 59 | - en 60 | metrics: 61 | - Accuracy 62 | original_task: false 63 | name: cbqa_fever_short 64 | reference: CBQA fever, minimal 65 | b1d8f035-c3af-41a8-b0b8-1604f9dc00fe: !Template 66 | answer_choices: Yes|||No|||Not Sure 67 | id: b1d8f035-c3af-41a8-b0b8-1604f9dc00fe 68 | jinja: "\"{{claim}}\", I have heard. Is this Correct? Yes, No or Not Sure?\n|||\n\ 69 | {% if label != \"\" %}\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\ 70 | \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}" 71 | metadata: !TemplateMetadata 72 | choices_in_prompt: true 73 | languages: 74 | - en 75 | metrics: 76 | - Accuracy 77 | original_task: false 78 | name: cbqa_fever_dialog_style_postprompt_all_class 79 | reference: CBQA fever, like a conversation, prompt after output. Includes 3 class. 80 | -------------------------------------------------------------------------------- /promptsource/templates/generated_reviews_enth/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: generated_reviews_enth 2 | templates: 3 | 7f158fb6-bbdd-41b8-bed7-21508c9f3c80: !Template 4 | answer_choices: no ||| yes 5 | id: 7f158fb6-bbdd-41b8-bed7-21508c9f3c80 6 | jinja: Does "{{translation.en}}" seem like a positive review to you? ||| {{answer_choices[0 7 | if review_star<3 else 1]}} 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - AUC 14 | - Accuracy 15 | original_task: true 16 | name: seem like a positive review 17 | reference: stsb_multi_mt_en 18 | 95136948-3402-4bd4-8a69-1aa7b85461cc: !Template 19 | answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 20 | id: 95136948-3402-4bd4-8a69-1aa7b85461cc 21 | jinja: Rate the positivity of this review ({{"1"}} being the lowest and {{"5"}} 22 | the highest).\n"{{translation.en}}" ||| {{review_star}} 23 | metadata: !TemplateMetadata 24 | choices_in_prompt: false 25 | languages: 26 | - en 27 | metrics: 28 | - AUC 29 | - Accuracy 30 | original_task: true 31 | name: rate positive review 32 | reference: stsb_multi_mt 33 | ad12212f-a230-4750-a199-9791628856c4: !Template 34 | answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 35 | id: ad12212f-a230-4750-a199-9791628856c4 36 | jinja: "How positive is the review \"{{translation.en}}\"? Give a score between\n\ 37 | \ {{\"0\"}} and {{\"5\"}}. ||| {{review_star}}" 38 | metadata: !TemplateMetadata 39 | choices_in_prompt: false 40 | languages: 41 | - en 42 | metrics: 43 | - Accuracy 44 | - AUC 45 | original_task: true 46 | name: how positive review 47 | reference: stsb_multi_mt_en 48 | cf8f4dcb-f527-4944-b9ec-a1a3e476c13f: !Template 49 | answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 50 | id: cf8f4dcb-f527-4944-b9ec-a1a3e476c13f 51 | jinja: On a scale from {{"1"}} to {{"5"}}, how positive is the review "{{translation.en}}"? 52 | ||| {{review_star}} 53 | metadata: !TemplateMetadata 54 | choices_in_prompt: false 55 | languages: 56 | - en 57 | metrics: 58 | - AUC 59 | - Accuracy 60 | original_task: true 61 | name: scale of positive review 62 | reference: stsb_multi_mt_en 63 | e6c55d56-23d4-41a4-9908-e9366cc2e167: !Template 64 | answer_choices: no ||| yes 65 | id: e6c55d56-23d4-41a4-9908-e9366cc2e167 66 | jinja: Do you think "{{translation.en}}" is a positive review? ||| {{answer_choices[0 67 | if review_star < 3 else 1]}} 68 | metadata: !TemplateMetadata 69 | choices_in_prompt: false 70 | languages: 71 | - en 72 | metrics: 73 | - AUC 74 | - Accuracy 75 | original_task: true 76 | name: think positive review 77 | reference: stsb_multi_mt_en 78 | -------------------------------------------------------------------------------- /promptsource/templates/glue/cola/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: glue 2 | subset: cola 3 | templates: 4 | 1d3f5f15-8128-4445-8de5-92365b7e54a8: !Template 5 | answer_choices: no ||| yes 6 | id: 1d3f5f15-8128-4445-8de5-92365b7e54a8 7 | jinja: 'Does the following sentence make sense and use correct English? Please 8 | answer {{"yes"}} or {{"no"}}. 9 | 10 | {{sentence}} 11 | 12 | ||| 13 | 14 | {{ answer_choices[label] }}' 15 | metadata: !TemplateMetadata 16 | choices_in_prompt: true 17 | languages: 18 | - en 19 | metrics: 20 | - Accuracy 21 | original_task: true 22 | name: Make sense yes no 23 | reference: '' 24 | 39a701ff-bb4b-48ac-8c0a-8c61bf0d4b8d: !Template 25 | answer_choices: No ||| Yes 26 | id: 39a701ff-bb4b-48ac-8c0a-8c61bf0d4b8d 27 | jinja: '{{sentence}} 28 | 29 | Is this example grammatically correct and sensible? 30 | 31 | ||| 32 | 33 | {{ answer_choices[label] }}' 34 | metadata: !TemplateMetadata 35 | choices_in_prompt: false 36 | languages: 37 | - en 38 | metrics: 39 | - Accuracy 40 | original_task: true 41 | name: is_this_correct 42 | reference: A sample glue template 43 | 6f49b860-9145-4fcb-b632-9faea39e254e: !Template 44 | answer_choices: no ||| yes 45 | id: 6f49b860-9145-4fcb-b632-9faea39e254e 46 | jinja: 'I''m copy-editing a story for publication. It has the following sentence 47 | in it: 48 | 49 | {{sentence}} 50 | 51 | Does this sentence make sense and is it grammatically correct? Please answer 52 | {{"yes or no"}}. 53 | 54 | ||| 55 | 56 | {{ answer_choices[label] }}' 57 | metadata: !TemplateMetadata 58 | choices_in_prompt: true 59 | languages: 60 | - en 61 | metrics: 62 | - Accuracy 63 | original_task: true 64 | name: editing 65 | reference: '' 66 | 79b4c04c-c0e2-4add-a600-d5572da192e7: !Template 67 | answer_choices: unacceptable ||| acceptable 68 | id: 79b4c04c-c0e2-4add-a600-d5572da192e7 69 | jinja: 'The following sentence is either "{{"acceptable"}}", meaning it is grammatically 70 | correct and makes sense, or "{{"unacceptable"}}". Which is it? 71 | 72 | {{sentence}} 73 | 74 | ||| 75 | 76 | {{ answer_choices[label] }}' 77 | metadata: !TemplateMetadata 78 | choices_in_prompt: true 79 | languages: 80 | - en 81 | metrics: 82 | - Accuracy 83 | original_task: true 84 | name: Following sentence acceptable 85 | reference: '' 86 | dd33f089-57a1-452b-8bd5-8f1fffd10b60: !Template 87 | answer_choices: no ||| yes 88 | id: dd33f089-57a1-452b-8bd5-8f1fffd10b60 89 | jinja: '{{sentence}} 90 | 91 | I''m worried that sentence didn''t make any sense, or was grammatically incorrect. 92 | Was it correct? 93 | 94 | ||| 95 | 96 | {{ answer_choices[label] }}' 97 | metadata: !TemplateMetadata 98 | choices_in_prompt: false 99 | languages: 100 | - en 101 | metrics: 102 | - Accuracy 103 | original_task: true 104 | name: Previous sentence acceptable 105 | reference: '' 106 | -------------------------------------------------------------------------------- /promptsource/templates/glue/qnli/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: glue 2 | subset: qnli 3 | templates: 4 | 50c3108c-b23c-4691-97be-72438606c840: !Template 5 | answer_choices: yes ||| no 6 | id: 50c3108c-b23c-4691-97be-72438606c840 7 | jinja: '{{sentence}} 8 | 9 | Does that sentence have all you need to answer the question "{{question}}"? 10 | 11 | ||| 12 | 13 | {{answer_choices[label]}}' 14 | metadata: !TemplateMetadata 15 | choices_in_prompt: false 16 | languages: 17 | - en 18 | metrics: 19 | - Accuracy 20 | original_task: true 21 | name: have all you need 22 | reference: '' 23 | 5f0f24d9-14a7-4588-8dc2-494b4c693b81: !Template 24 | answer_choices: yes ||| no 25 | id: 5f0f24d9-14a7-4588-8dc2-494b4c693b81 26 | jinja: 'Can you answer the question "{{question}}" based only on the following: 27 | 28 | {{sentence}} 29 | 30 | ||| 31 | 32 | {{answer_choices[label]}}' 33 | metadata: !TemplateMetadata 34 | choices_in_prompt: false 35 | languages: 36 | - en 37 | metrics: 38 | - Accuracy 39 | original_task: true 40 | name: based only on 41 | reference: '' 42 | c626350d-6c0e-47be-b09e-c9ba1446b027: !Template 43 | answer_choices: yes ||| no 44 | id: c626350d-6c0e-47be-b09e-c9ba1446b027 45 | jinja: 'Does knowing that "{{sentence}}" imply that I know the answer to "{{question}}" 46 | 47 | ||| 48 | 49 | {{answer_choices[label]}}' 50 | metadata: !TemplateMetadata 51 | choices_in_prompt: false 52 | languages: 53 | - en 54 | metrics: 55 | - Accuracy 56 | original_task: true 57 | name: imply 58 | reference: '' 59 | f2403d55-21a7-44bc-8b4c-6921fd7b01f5: !Template 60 | answer_choices: yes ||| no 61 | id: f2403d55-21a7-44bc-8b4c-6921fd7b01f5 62 | jinja: 'I want to know the answer to the following question: 63 | 64 | {{question}} 65 | 66 | All the background I''m provided with is that "{{sentence}}". Is that enough 67 | to answer the question? 68 | 69 | ||| 70 | 71 | {{answer_choices[label]}}' 72 | metadata: !TemplateMetadata 73 | choices_in_prompt: false 74 | languages: 75 | - en 76 | metrics: 77 | - Accuracy 78 | original_task: true 79 | name: want to know 80 | reference: '' 81 | f44715c4-d787-484e-a912-5456cc2b6741: !Template 82 | answer_choices: yes ||| no 83 | id: f44715c4-d787-484e-a912-5456cc2b6741 84 | jinja: 'Consider the passage: 85 | 86 | {{sentence}} 87 | 88 | and the question: 89 | 90 | {{question}} 91 | 92 | Is it possible to answer this question based only on the information in the 93 | passage? {{"A) yes"}} or {{"B) no"}} 94 | 95 | ||| 96 | 97 | {{answer_choices[label]}}' 98 | metadata: !TemplateMetadata 99 | choices_in_prompt: true 100 | languages: 101 | - en 102 | metrics: 103 | - Accuracy 104 | original_task: true 105 | name: possible to answer 106 | reference: '' 107 | -------------------------------------------------------------------------------- /promptsource/templates/glue/qqp/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: glue 2 | subset: qqp 3 | templates: 4 | 8e711799-a57c-4941-833b-466bedfb80ad: !Template 5 | answer_choices: no ||| yes 6 | id: 8e711799-a57c-4941-833b-466bedfb80ad 7 | jinja: I'm an administrator on the website Quora. There are two posts, one that 8 | asks "{{question1}}" and another that asks "{{question2}}". I can merge questions 9 | if they are asking the same thing. Can I merge these two questions? ||| {{ answer_choices[label] 10 | }} 11 | metadata: !TemplateMetadata 12 | choices_in_prompt: false 13 | languages: 14 | - en 15 | metrics: 16 | - Accuracy 17 | original_task: true 18 | name: quora 19 | reference: '' 20 | 94972071-a726-42a3-a726-13f414b65e67: !Template 21 | answer_choices: not duplicates ||| duplicates 22 | id: 94972071-a726-42a3-a726-13f414b65e67 23 | jinja: '{{question1}} 24 | 25 | {{question2}} 26 | 27 | Pick one: These questions are "{{"duplicates"}}" or "{{"not duplicates"}}". 28 | 29 | ||| 30 | 31 | {{ answer_choices[label] }}' 32 | metadata: !TemplateMetadata 33 | choices_in_prompt: true 34 | languages: 35 | - en 36 | metrics: 37 | - Accuracy 38 | original_task: true 39 | name: duplicate or not 40 | reference: '' 41 | a45ad5cd-a3ba-4ab2-a728-a9ea0f27102b: !Template 42 | answer_choices: no ||| yes 43 | id: a45ad5cd-a3ba-4ab2-a728-a9ea0f27102b 44 | jinja: Are the questions "{{question1}}" and "{{question2}}" asking the same thing? 45 | ||| {{ answer_choices[label] }} 46 | metadata: !TemplateMetadata 47 | choices_in_prompt: false 48 | languages: 49 | - en 50 | metrics: 51 | - Accuracy 52 | original_task: true 53 | name: same thing 54 | reference: '' 55 | c0182cd1-c7ac-4abe-829f-4651536af951: !Template 56 | answer_choices: no ||| yes 57 | id: c0182cd1-c7ac-4abe-829f-4651536af951 58 | jinja: Can an answer to "{{question1}}" also be used to answer "{{question2}}"? 59 | ||| {{ answer_choices[label] }} 60 | metadata: !TemplateMetadata 61 | choices_in_prompt: false 62 | languages: 63 | - en 64 | metrics: 65 | - Accuracy 66 | original_task: false 67 | name: answer 68 | reference: '' 69 | c0724198-97e7-44a1-89d8-c51e97ce0b04: !Template 70 | answer_choices: No ||| Yes 71 | id: c0724198-97e7-44a1-89d8-c51e97ce0b04 72 | jinja: 'Question 1: {{question1}} 73 | 74 | Question 2: {{question2}} 75 | 76 | 77 | Do these two questions convey the same meaning? Yes or no? ||| {{answer_choices[label]}}' 78 | metadata: !TemplateMetadata 79 | choices_in_prompt: true 80 | languages: 81 | - en 82 | metrics: 83 | - Accuracy 84 | original_task: true 85 | name: meaning 86 | reference: '' 87 | fd244bd3-ca3b-4e4f-9722-fd006c50e157: !Template 88 | answer_choices: no ||| yes 89 | id: fd244bd3-ca3b-4e4f-9722-fd006c50e157 90 | jinja: I received the questions "{{question1}}" and "{{question2}}". Are they 91 | duplicates? ||| {{ answer_choices[label] }} 92 | metadata: !TemplateMetadata 93 | choices_in_prompt: false 94 | languages: 95 | - en 96 | metrics: 97 | - Accuracy 98 | original_task: true 99 | name: duplicate 100 | reference: '' 101 | -------------------------------------------------------------------------------- /promptsource/templates/glue/rte/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: glue 2 | subset: rte 3 | templates: 4 | 03a7ae07-5ddd-46c4-92f3-2152223d44ec: !Template 5 | answer_choices: yes ||| no 6 | id: 03a7ae07-5ddd-46c4-92f3-2152223d44ec 7 | jinja: '{{sentence1}} 8 | 9 | Does this mean that "{{sentence2}}" is true? {{"A) yes or B) no."}} 10 | 11 | ||| 12 | 13 | {{answer_choices[label]}}' 14 | metadata: !TemplateMetadata 15 | choices_in_prompt: true 16 | languages: 17 | - en 18 | metrics: 19 | - Accuracy 20 | original_task: true 21 | name: mean 22 | reference: '' 23 | 4ee6ff27-de63-4e7b-a9d4-82a17eba407a: !Template 24 | answer_choices: yes ||| no 25 | id: 4ee6ff27-de63-4e7b-a9d4-82a17eba407a 26 | jinja: 'Does the claim "{{sentence2}}" follow from the fact that "{{sentence1}}"? 27 | Please answer either {{"yes"}} or {{"no"}}. 28 | 29 | ||| 30 | 31 | {{answer_choices[label]}}' 32 | metadata: !TemplateMetadata 33 | choices_in_prompt: true 34 | languages: 35 | - en 36 | metrics: 37 | - Accuracy 38 | original_task: true 39 | name: "does the claim\u2026 follow the fact\u2026" 40 | reference: '' 41 | 9e2b4267-ec23-44c8-b82a-107e2c890fec: !Template 42 | answer_choices: entailment ||| not entailment 43 | id: 9e2b4267-ec23-44c8-b82a-107e2c890fec 44 | jinja: 'We say that one sentence "{{"entails"}}" another sentence when the first 45 | sentence implies the second sentence. Consider the following two sentences: 46 | 47 | {{sentence1}} 48 | 49 | {{sentence2}} 50 | 51 | Is the relationship from the first to the second sentence "{{"entailment"}}" 52 | or "{{"not entailment"}}"? 53 | 54 | ||| 55 | 56 | {{answer_choices[label]}}' 57 | metadata: !TemplateMetadata 58 | choices_in_prompt: true 59 | languages: 60 | - en 61 | metrics: 62 | - Accuracy 63 | original_task: true 64 | name: entailment explained 65 | reference: '' 66 | c8dfc879-40f2-412d-be1e-4cd70107f6e6: !Template 67 | answer_choices: yes ||| no 68 | id: c8dfc879-40f2-412d-be1e-4cd70107f6e6 69 | jinja: 'Does "{{sentence1}}" imply that "{{sentence2}}"? Please answer either 70 | {{"yes"}} or {{"no"}}. 71 | 72 | ||| 73 | 74 | {{answer_choices[label]}}' 75 | metadata: !TemplateMetadata 76 | choices_in_prompt: true 77 | languages: 78 | - en 79 | metrics: 80 | - Accuracy 81 | original_task: true 82 | name: imply 83 | reference: '' 84 | f56ffced-9b16-431a-8a17-501e63cddf73: !Template 85 | answer_choices: yes ||| no 86 | id: f56ffced-9b16-431a-8a17-501e63cddf73 87 | jinja: '{{sentence1}} 88 | 89 | Does this imply 90 | 91 | {{sentence2}} 92 | 93 | Please answer {{"A) yes or B) no."}} 94 | 95 | ||| 96 | 97 | {{answer_choices[label]}}' 98 | metadata: !TemplateMetadata 99 | choices_in_prompt: true 100 | languages: 101 | - en 102 | metrics: 103 | - Accuracy 104 | original_task: true 105 | name: imply separated 106 | reference: '' 107 | -------------------------------------------------------------------------------- /promptsource/templates/glue/sst2/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: glue 2 | subset: sst2 3 | templates: 4 | 11d1c505-9232-4c35-82a4-4c3642843e2e: !Template 5 | answer_choices: negative ||| positive 6 | id: 11d1c505-9232-4c35-82a4-4c3642843e2e 7 | jinja: '{{sentence}} 8 | 9 | Question: Was that sentence {{"positive"}} or {{"negative"}}? Answer: ||| {{ 10 | answer_choices[label] }}' 11 | metadata: !TemplateMetadata 12 | choices_in_prompt: true 13 | languages: 14 | - en 15 | metrics: 16 | - Accuracy 17 | original_task: true 18 | name: positive negative after 19 | reference: '' 20 | 228fcae7-7f4c-4e3c-9ac4-e49b26bc103d: !Template 21 | answer_choices: negative ||| positive 22 | id: 228fcae7-7f4c-4e3c-9ac4-e49b26bc103d 23 | jinja: 'I''m reading a review that says "{{sentence}}". 24 | 25 | 26 | Do you think the review is {{"positive"}} or {{"negative"}}? ||| {{ answer_choices[label] 27 | }}' 28 | metadata: !TemplateMetadata 29 | choices_in_prompt: true 30 | languages: 31 | - en 32 | metrics: 33 | - Accuracy 34 | original_task: true 35 | name: review 36 | reference: '' 37 | 5aa0cea9-0f8d-454d-b25b-b0d4cda273b8: !Template 38 | answer_choices: sad ||| happy 39 | id: 5aa0cea9-0f8d-454d-b25b-b0d4cda273b8 40 | jinja: 'Someone just said to me "{{sentence}}". 41 | 42 | 43 | Do you think they are {{"sad"}} or {{"happy"}}? ||| {{ answer_choices[label] 44 | }}' 45 | metadata: !TemplateMetadata 46 | choices_in_prompt: true 47 | languages: 48 | - en 49 | metrics: 50 | - Accuracy 51 | original_task: true 52 | name: said 53 | reference: '' 54 | 63c6b2be-8ecd-42ad-88c7-0d1dc1a8323a: !Template 55 | answer_choices: negative ||| positive 56 | id: 63c6b2be-8ecd-42ad-88c7-0d1dc1a8323a 57 | jinja: 'Does the following sentence have a {{"positive"}} or {{"negative"}} sentiment? 58 | 59 | {{sentence}} 60 | 61 | ||| 62 | 63 | {{ answer_choices[label] }}' 64 | metadata: !TemplateMetadata 65 | choices_in_prompt: true 66 | languages: 67 | - en 68 | metrics: 69 | - Accuracy 70 | original_task: true 71 | name: following positive negative 72 | reference: '' 73 | 6dd74cd5-e074-4612-9e96-c17ca88c3bc4: !Template 74 | answer_choices: bad ||| good 75 | id: 6dd74cd5-e074-4612-9e96-c17ca88c3bc4 76 | jinja: Someone sent me an email with the sentence "{{sentence}}". Do you think 77 | they are feeling {{"good"}} or {{"bad"}}? ||| {{ answer_choices[label] }} 78 | metadata: !TemplateMetadata 79 | choices_in_prompt: true 80 | languages: 81 | - en 82 | metrics: 83 | - Accuracy 84 | original_task: true 85 | name: happy or mad 86 | reference: '' 87 | -------------------------------------------------------------------------------- /promptsource/templates/glue/wnli/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: glue 2 | subset: wnli 3 | templates: 4 | 10c354ee-6f4e-4b04-91e1-29e999a8f3e7: !Template 5 | answer_choices: not confident ||| very confident 6 | id: 10c354ee-6f4e-4b04-91e1-29e999a8f3e7 7 | jinja: 'If it''s true that 8 | 9 | {{sentence1}} 10 | 11 | how {{"confident"}} should I be that 12 | 13 | {{sentence2}} 14 | 15 | {{"very confident or not confident?"}} 16 | 17 | ||| 18 | 19 | {{answer_choices[label]}}' 20 | metadata: !TemplateMetadata 21 | choices_in_prompt: true 22 | languages: 23 | - en 24 | metrics: 25 | - Accuracy 26 | original_task: true 27 | name: confident 28 | reference: '' 29 | 3a0e46cb-0b96-4972-83f6-29a6c6a09ba9: !Template 30 | answer_choices: no ||| yes 31 | id: 3a0e46cb-0b96-4972-83f6-29a6c6a09ba9 32 | jinja: '{{"Entailment"}} means that the second sentence follows from the first 33 | sentence. Are the following two sentences an example of entailment? 34 | 35 | {{sentence1}} 36 | 37 | {{sentence2}} 38 | 39 | ||| 40 | 41 | {{answer_choices[label]}}' 42 | metadata: !TemplateMetadata 43 | choices_in_prompt: false 44 | languages: 45 | - en 46 | metrics: 47 | - Accuracy 48 | original_task: true 49 | name: entailment explained 50 | reference: '' 51 | 75f89b05-5a81-401b-8a04-8239211a9a95: !Template 52 | answer_choices: no ||| yes 53 | id: 75f89b05-5a81-401b-8a04-8239211a9a95 54 | jinja: 'Assume that the following is true: 55 | 56 | {{sentence1}} 57 | 58 | Does this mean that "{{sentence2}}"? 59 | 60 | ||| 61 | 62 | {{answer_choices[label]}}' 63 | metadata: !TemplateMetadata 64 | choices_in_prompt: false 65 | languages: 66 | - en 67 | metrics: 68 | - Accuracy 69 | original_task: true 70 | name: mean 71 | reference: '' 72 | a244158a-a248-4e34-bef7-66e269dd0815: !Template 73 | answer_choices: no ||| yes 74 | id: a244158a-a248-4e34-bef7-66e269dd0815 75 | jinja: 'Someone told me "{{sentence1}}" Now, I think that "{{sentence2}}" Am I 76 | justified in thinking this? 77 | 78 | ||| 79 | 80 | {{answer_choices[label]}}' 81 | metadata: !TemplateMetadata 82 | choices_in_prompt: false 83 | languages: 84 | - en 85 | metrics: 86 | - Accuracy 87 | original_task: true 88 | name: justified 89 | reference: '' 90 | a2ce492b-dfd0-4f04-bc44-70c7867ba231: !Template 91 | answer_choices: no ||| yes 92 | id: a2ce492b-dfd0-4f04-bc44-70c7867ba231 93 | jinja: '{{sentence1}} 94 | 95 | {{sentence2}} 96 | 97 | Does the first sentence imply the second sentence? 98 | 99 | ||| 100 | 101 | {{answer_choices[label]}}' 102 | metadata: !TemplateMetadata 103 | choices_in_prompt: false 104 | languages: 105 | - en 106 | metrics: 107 | - Accuracy 108 | original_task: true 109 | name: imply 110 | reference: '' 111 | -------------------------------------------------------------------------------- /promptsource/templates/hate_speech18/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: hate_speech18 2 | templates: 3 | 3266f9d4-9c80-4e17-a8a6-1fe44ca8f3bf: !Template 4 | answer_choices: noHate ||| hate 5 | id: 3266f9d4-9c80-4e17-a8a6-1fe44ca8f3bf 6 | jinja: '{% if label in [0, 1] %} 7 | 8 | {{text}} Is the sentiment the sentence expresses is a {{answer_choices[1]}} 9 | speech or {{answer_choices[0]}} speech? ||| {{answer_choices[label]}} 10 | 11 | {% endif %}' 12 | metadata: !TemplateMetadata 13 | choices_in_prompt: true 14 | languages: 15 | - en 16 | metrics: 17 | - Accuracy 18 | original_task: true 19 | name: hate_or_noHate_sentiment_analysis 20 | reference: '' 21 | -------------------------------------------------------------------------------- /promptsource/templates/kilt_tasks/hotpotqa/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: kilt_tasks 2 | subset: hotpotqa 3 | templates: 4 | 1a123f3a-0507-41b9-904f-b18d9ce2b79e: !Template 5 | answer_choices: null 6 | id: 1a123f3a-0507-41b9-904f-b18d9ce2b79e 7 | jinja: '{% if output %} 8 | 9 | Here''s a complex question that requires someone to reason about the input, 10 | can you answer it? 11 | 12 | {{input}} 13 | 14 | ||| 15 | 16 | {{output | map(attribute="answer") | list | choice}} 17 | 18 | {% endif %}' 19 | metadata: !TemplateMetadata 20 | choices_in_prompt: false 21 | languages: 22 | - en 23 | metrics: 24 | - Squad 25 | original_task: false 26 | name: complex_question 27 | reference: '' 28 | 5531ce47-35ff-4bce-943d-5b2b86c44352: !Template 29 | answer_choices: null 30 | id: 5531ce47-35ff-4bce-943d-5b2b86c44352 31 | jinja: '{% if output %} 32 | 33 | Combine facts and answer this: {{input}} 34 | 35 | ||| 36 | 37 | {{output | map(attribute="answer") | list | choice}} 38 | 39 | {% endif %}' 40 | metadata: !TemplateMetadata 41 | choices_in_prompt: false 42 | languages: 43 | - en 44 | metrics: 45 | - Squad 46 | original_task: false 47 | name: combining_facts 48 | reference: '' 49 | 5ce9d659-4df8-4afd-a6e1-3e542df0035a: !Template 50 | answer_choices: null 51 | id: 5ce9d659-4df8-4afd-a6e1-3e542df0035a 52 | jinja: '{% if output %} 53 | 54 | Formulate an answer to this elaborate question: {{input}} 55 | 56 | ||| 57 | 58 | {{output | map(attribute="answer") | list | choice}} 59 | 60 | {% endif %}' 61 | metadata: !TemplateMetadata 62 | choices_in_prompt: false 63 | languages: 64 | - en 65 | metrics: 66 | - Squad 67 | original_task: false 68 | name: formulate 69 | reference: '' 70 | 9211f663-51f9-428e-ba27-158480eee083: !Template 71 | answer_choices: null 72 | id: 9211f663-51f9-428e-ba27-158480eee083 73 | jinja: '{% if output %} 74 | 75 | FINAL EXAM 76 | 77 | 78 | Question 1. {{input}} 79 | 80 | ||| 81 | 82 | {{output | map(attribute="answer") | list | choice}} 83 | 84 | {% endif %}' 85 | metadata: !TemplateMetadata 86 | choices_in_prompt: false 87 | languages: 88 | - en 89 | metrics: 90 | - Squad 91 | original_task: false 92 | name: final_exam 93 | reference: '' 94 | ac0545a1-9363-4c17-aada-f0eedf5a24b2: !Template 95 | answer_choices: null 96 | id: ac0545a1-9363-4c17-aada-f0eedf5a24b2 97 | jinja: '{% if output %} 98 | 99 | {{input}} 100 | 101 | ||| 102 | 103 | {{output | map(attribute="answer") | list | choice}} 104 | 105 | {% endif %}' 106 | metadata: !TemplateMetadata 107 | choices_in_prompt: false 108 | languages: 109 | - en 110 | metrics: 111 | - Squad 112 | original_task: false 113 | name: straighforward_qa 114 | reference: '' 115 | -------------------------------------------------------------------------------- /promptsource/templates/lama/trex/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: lama 2 | subset: trex 3 | templates: 4 | 27c2da31-bd1a-48d4-9e34-c530e42c9f00: !Template 5 | answer_choices: null 6 | id: 27c2da31-bd1a-48d4-9e34-c530e42c9f00 7 | jinja: '{{masked_sentence}} Fill the mask with the missing word. ||| {{obj_label}}' 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - Accuracy 14 | original_task: false 15 | name: fill_mask 16 | reference: '' 17 | 52469d4c-6c46-4686-a36b-7af1801e1eec: !Template 18 | answer_choices: null 19 | id: 52469d4c-6c46-4686-a36b-7af1801e1eec 20 | jinja: 'Given the following paragraph : {{ masked_sentence | replace("[MASK]",obj_label)}}. 21 | what is the relationship between {{obj_label}} and {{sub_label}} ? 22 | 23 | ||| {{ template | replace("[X]",sub_label) | replace("[Y]", obj_surface)}} ' 24 | metadata: !TemplateMetadata 25 | choices_in_prompt: false 26 | languages: 27 | - en 28 | metrics: 29 | - Accuracy 30 | - BLEU 31 | - Other 32 | original_task: false 33 | name: find_relation 34 | reference: '' 35 | 8cb6ee9d-bcf7-4d82-9acf-b93072c7384b: !Template 36 | answer_choices: null 37 | id: 8cb6ee9d-bcf7-4d82-9acf-b93072c7384b 38 | jinja: 'Replace the mask with the correct word: {{masked_sentence}} ||| {{obj_label}}' 39 | metadata: !TemplateMetadata 40 | choices_in_prompt: false 41 | languages: 42 | - en 43 | metrics: 44 | - Accuracy 45 | original_task: false 46 | name: replace_mask 47 | reference: '' 48 | cc07e0dc-b970-4f9d-b76a-05e72a86490e: !Template 49 | answer_choices: null 50 | id: cc07e0dc-b970-4f9d-b76a-05e72a86490e 51 | jinja: "write the negation of the following statements : {{ template | replace(\"\ 52 | [X]\",sub_surface) | replace(\"[Y]\", obj_surface)}} \n||| {{ template_negated\ 53 | \ | replace(\"[X]\",sub_surface) | replace(\"[Y]\", obj_surface)}} " 54 | metadata: !TemplateMetadata 55 | choices_in_prompt: false 56 | languages: 57 | - en 58 | metrics: 59 | - Accuracy 60 | - BLEU 61 | - Other 62 | original_task: false 63 | name: write_negation 64 | reference: '' 65 | e054c5b2-56fd-451a-aba5-fcd105e70bce: !Template 66 | answer_choices: null 67 | id: e054c5b2-56fd-451a-aba5-fcd105e70bce 68 | jinja: "Negate the following statement : {{ template | replace(\"[X]\",sub_surface)\ 69 | \ | replace(\"[Y]\", obj_surface)}} \n||| {{ template_negated | replace(\"\ 70 | [X]\",sub_surface) | replace(\"[Y]\", obj_surface)}} " 71 | metadata: !TemplateMetadata 72 | choices_in_prompt: false 73 | languages: 74 | - en 75 | metrics: 76 | - Accuracy 77 | - BLEU 78 | - Other 79 | original_task: false 80 | name: negate_sentence 81 | reference: '' 82 | -------------------------------------------------------------------------------- /promptsource/templates/lambada/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: lambada 2 | templates: 3 | 3747e80a-4182-44eb-944b-dee40095bb17: !Template 4 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 5 | id: 3747e80a-4182-44eb-944b-dee40095bb17 6 | jinja: 'Please predict the next word after the following chunk of text. 7 | 8 | 9 | {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}' 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: false 12 | languages: 13 | - en 14 | metrics: 15 | - Accuracy 16 | original_task: true 17 | name: please next word 18 | reference: '' 19 | 506765b8-17c0-4946-bbb0-b28288caacb3: !Template 20 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 21 | id: 506765b8-17c0-4946-bbb0-b28288caacb3 22 | jinja: '{{ text.split()[:-1] | join('' '') }} ____. 23 | 24 | 25 | Fill in the ____: ||| {{ text.split()[-1] }}' 26 | metadata: !TemplateMetadata 27 | choices_in_prompt: false 28 | languages: 29 | - en 30 | metrics: 31 | - Accuracy 32 | original_task: true 33 | name: fill in the ____ 34 | reference: '' 35 | 948664d5-2ea2-4245-b656-9283948dd5cd: !Template 36 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 37 | id: 948664d5-2ea2-4245-b656-9283948dd5cd 38 | jinja: '{{ text.split()[:-1] | join('' '') }}... 39 | 40 | 41 | What comes after the ellipses? ||| {{ text.split()[-1] }}' 42 | metadata: !TemplateMetadata 43 | choices_in_prompt: false 44 | languages: 45 | - en 46 | metrics: 47 | - Accuracy 48 | original_task: true 49 | name: ellipses 50 | reference: '' 51 | acfe374c-60ce-4354-b285-e7b0717cffe5: !Template 52 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 53 | id: acfe374c-60ce-4354-b285-e7b0717cffe5 54 | jinja: 'This story got cut short. What comes next? 55 | 56 | 57 | {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}' 58 | metadata: !TemplateMetadata 59 | choices_in_prompt: false 60 | languages: 61 | - en 62 | metrics: 63 | - Accuracy 64 | original_task: true 65 | name: what comes next 66 | reference: '' 67 | d5707bd9-d3cc-4535-b4c1-5c2aee8cb8c7: !Template 68 | answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}' 69 | id: d5707bd9-d3cc-4535-b4c1-5c2aee8cb8c7 70 | jinja: 'Fill in the blank: 71 | 72 | 73 | {{ text.split()[:-1] | join('' '') }} ____. ||| {{ text.split()[-1] }}' 74 | metadata: !TemplateMetadata 75 | choices_in_prompt: false 76 | languages: 77 | - en 78 | metrics: 79 | - Accuracy 80 | original_task: true 81 | name: GPT-3 style 82 | reference: Brown et al. 83 | -------------------------------------------------------------------------------- /promptsource/templates/mdd/task2_recs/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: mdd 2 | subset: task2_recs 3 | templates: 4 | 3596d528-12c6-440b-bdc0-d61076b108c5: !Template 5 | answer_choices: null 6 | id: 3596d528-12c6-440b-bdc0-d61076b108c5 7 | jinja: "Complete this movie-trivia-related dialogue between Speaker {{ dialogue_turns.speaker[0]\ 8 | \ }} and Speaker {{ dialogue_turns.speaker[1] }} by answering Speaker {{ dialogue_turns.speaker[0]\ 9 | \ }}'s question as Speaker {{ dialogue_turns.speaker[1] }}.\n\nSpeaker {{dialogue_turns.speaker[0]}}:\ 10 | \ {{dialogue_turns.utterance[0]}}\n\nSpeaker {{dialogue_turns.speaker[1]}}:\ 11 | \ \n|||\n{{dialogue_turns.utterance[1]}}" 12 | metadata: !TemplateMetadata 13 | choices_in_prompt: false 14 | languages: 15 | - en 16 | metrics: 17 | - Other 18 | original_task: true 19 | name: recommend_movies_speaker 20 | reference: Given likes, recomend a movie with speaker information. 21 | 6f0eb61c-d9f9-4e52-a317-3d7b8049eb9b: !Template 22 | answer_choices: null 23 | id: 6f0eb61c-d9f9-4e52-a317-3d7b8049eb9b 24 | jinja: '{{dialogue_turns.utterance[0]}} 25 | 26 | ||| 27 | 28 | {{dialogue_turns.utterance[1]}}' 29 | metadata: !TemplateMetadata 30 | choices_in_prompt: false 31 | languages: 32 | - en 33 | metrics: 34 | - Other 35 | original_task: true 36 | name: recommed_movies 37 | reference: Given the likes, recommend a movie. 38 | 8948a52c-a422-4858-bbf7-19790597d278: !Template 39 | answer_choices: null 40 | id: 8948a52c-a422-4858-bbf7-19790597d278 41 | jinja: '{{ ["Someone said:", "He said:", "She said:", "They said:", "A friend 42 | asked me:", "A colleague asked me:"] | choice }} {{ dialogue_turns.utterance[0] 43 | }} 44 | 45 | ||| 46 | 47 | {{dialogue_turns.utterance[1]}}' 48 | metadata: !TemplateMetadata 49 | choices_in_prompt: false 50 | languages: 51 | - en 52 | metrics: 53 | - Other 54 | original_task: false 55 | name: recommend_movies_dialogue 56 | reference: Given the likes, recommend a movie as a dialogue 57 | -------------------------------------------------------------------------------- /promptsource/templates/mwsc/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: mwsc 2 | templates: 3 | 66c3e53a-2f2f-4ab4-b17b-ca42535d4ea1: !Template 4 | answer_choices: '{{options | join(" ||| ")}}' 5 | id: 66c3e53a-2f2f-4ab4-b17b-ca42535d4ea1 6 | jinja: '{{ question|trim(''?'') }} in the sentence "{{ sentence|trim(''.'') }}"? 7 | ||| {{ answer }}' 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - Accuracy 14 | original_task: true 15 | name: in-the-sentence-question-first 16 | reference: '' 17 | 8d4f3463-d64b-43be-b0ed-2455cb99e017: !Template 18 | answer_choices: '{{options | join(" ||| ")}}' 19 | id: 8d4f3463-d64b-43be-b0ed-2455cb99e017 20 | jinja: If I were to say "{{sentence}}" and then ask you "{{ question }}", what 21 | do you think is the correct answer out of "{{ options|join('" and "')}}"? ||| 22 | {{ answer }} 23 | metadata: !TemplateMetadata 24 | choices_in_prompt: true 25 | languages: 26 | - en 27 | metrics: 28 | - Accuracy 29 | original_task: true 30 | name: what-think 31 | reference: '' 32 | a37a2745-c815-4f3a-8f78-3da2fceae7fe: !Template 33 | answer_choices: '{{options | join(" ||| ")}}' 34 | id: a37a2745-c815-4f3a-8f78-3da2fceae7fe 35 | jinja: In the sentence "{{ sentence|trim('.') }}", {{ question[0]|lower }}{{ question[1:] 36 | }} ||| {{ answer }} 37 | metadata: !TemplateMetadata 38 | choices_in_prompt: false 39 | languages: 40 | - en 41 | metrics: 42 | - Accuracy 43 | original_task: true 44 | name: in-the-sentence 45 | reference: '' 46 | ad4b74f6-6b2f-40a8-8189-4ada58d64fd4: !Template 47 | answer_choices: '{{options | join(" ||| ")}}' 48 | id: ad4b74f6-6b2f-40a8-8189-4ada58d64fd4 49 | jinja: '{{sentence}} {{ question }} Was it "{{options|join(''" or "'')}}"? ||| 50 | {{ answer }}' 51 | metadata: !TemplateMetadata 52 | choices_in_prompt: true 53 | languages: 54 | - en 55 | metrics: 56 | - Accuracy 57 | original_task: true 58 | name: options-or 59 | reference: '' 60 | f0e01268-c83b-4785-b593-48eb4f9173cd: !Template 61 | answer_choices: Yes ||| No 62 | id: f0e01268-c83b-4785-b593-48eb4f9173cd 63 | jinja: '{{ sentence }} Would "{{ options[0] }}" be correct if I were to ask you 64 | {{question[0]|lower }}{{ question[1:] }} ||| {% if answer == options[0] %} {{answer_choices[0]}} 65 | {% else %} {{answer_choices[1]}} {% endif %} ' 66 | metadata: !TemplateMetadata 67 | choices_in_prompt: false 68 | languages: 69 | - en 70 | metrics: 71 | - Accuracy 72 | original_task: true 73 | name: is-correct 74 | reference: '' 75 | -------------------------------------------------------------------------------- /promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: neural_code_search 2 | subset: evaluation_dataset 3 | templates: 4 | 30858249-c732-46a6-85b5-466fe964c4d4: !Template 5 | answer_choices: null 6 | id: 30858249-c732-46a6-85b5-466fe964c4d4 7 | jinja: 'Description: 8 | 9 | {{ question }} 10 | 11 | 12 | Implementation: 13 | 14 | ||| 15 | 16 | {{ answer }} 17 | 18 | ' 19 | metadata: !TemplateMetadata 20 | choices_in_prompt: false 21 | languages: 22 | - en 23 | metrics: 24 | - BLEU 25 | - ROUGE 26 | - Other 27 | original_task: false 28 | name: generate code given a description 29 | reference: '' 30 | 34f4095d-0ce0-42d5-8070-1626dd51b987: !Template 31 | answer_choices: null 32 | id: 34f4095d-0ce0-42d5-8070-1626dd51b987 33 | jinja: 'Given the following code: 34 | 35 | {{ answer }} 36 | 37 | Describe it: 38 | 39 | ||| 40 | 41 | {{ question }}' 42 | metadata: !TemplateMetadata 43 | choices_in_prompt: false 44 | languages: 45 | - en 46 | metrics: 47 | - BLEU 48 | - ROUGE 49 | original_task: false 50 | name: generate a description given code 51 | reference: '' 52 | -------------------------------------------------------------------------------- /promptsource/templates/nq_open/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: nq_open 2 | templates: 3 | 05b8ac63-5aa1-4ce7-8257-ade0fca889ae: !Template 4 | answer_choices: null 5 | id: 05b8ac63-5aa1-4ce7-8257-ade0fca889ae 6 | jinja: 'The goal is to predict an English answer string for an input English question. 7 | All questions can be answered using the contents of English Wikipedia. 8 | 9 | Question: {{question}} 10 | 11 | Answer: 12 | 13 | ||| 14 | 15 | {{answer|choice}}' 16 | metadata: !TemplateMetadata 17 | choices_in_prompt: false 18 | languages: 19 | - en 20 | metrics: 21 | - Accuracy 22 | - Other 23 | original_task: true 24 | name: formal_description 25 | reference: Copied from the dataset description. 26 | 0b23fe26-c659-4a84-834f-f19622d11412: !Template 27 | answer_choices: null 28 | id: 0b23fe26-c659-4a84-834f-f19622d11412 29 | jinja: 'Question : {{question}} 30 | 31 | Answer : 32 | 33 | ||| 34 | 35 | 36 | {{answer | choice}}' 37 | metadata: !TemplateMetadata 38 | choices_in_prompt: false 39 | languages: 40 | - en 41 | metrics: 42 | - Accuracy 43 | - Other 44 | original_task: true 45 | name: question_answer 46 | reference: Plain Question 47 | 35113036-4cb4-4db5-a92e-d208e1b48b7c: !Template 48 | answer_choices: null 49 | id: 35113036-4cb4-4db5-a92e-d208e1b48b7c 50 | jinja: 'Guess a question that has the answer "{{answer|choice}}" 51 | 52 | ||| 53 | 54 | {{question}}?' 55 | metadata: !TemplateMetadata 56 | choices_in_prompt: false 57 | languages: 58 | - en 59 | metrics: 60 | - BLEU 61 | - ROUGE 62 | original_task: false 63 | name: guess_question 64 | reference: Guess a question. It will show if model can evaluate entity in question. 65 | 5762f138-a3bf-4614-8dff-dcae7b5bd4a4: !Template 66 | answer_choices: null 67 | id: 5762f138-a3bf-4614-8dff-dcae7b5bd4a4 68 | jinja: 'I''ve always wondered: {{question}} 69 | 70 | ||| 71 | 72 | {{answer|choice}} ' 73 | metadata: !TemplateMetadata 74 | choices_in_prompt: false 75 | languages: 76 | - en 77 | metrics: 78 | - Accuracy 79 | - Other 80 | original_task: true 81 | name: first_person_context 82 | reference: Ask a question in first person 83 | cd157288-0211-46a8-a00c-ba0e07980e37: !Template 84 | answer_choices: null 85 | id: cd157288-0211-46a8-a00c-ba0e07980e37 86 | jinja: 'Search query: {{question}} 87 | 88 | Response: 89 | 90 | ||| 91 | 92 | {{answer|choice}}' 93 | metadata: !TemplateMetadata 94 | choices_in_prompt: false 95 | languages: 96 | - en 97 | metrics: 98 | - Accuracy 99 | original_task: true 100 | name: search query 101 | reference: '' 102 | cf937d15-48e0-4ae3-a4eb-9098cccc58ce: !Template 103 | answer_choices: null 104 | id: cf937d15-48e0-4ae3-a4eb-9098cccc58ce 105 | jinja: 'Answer the following question. 106 | 107 | {{question}} 108 | 109 | ||| 110 | 111 | {{answer|choice}}' 112 | metadata: !TemplateMetadata 113 | choices_in_prompt: false 114 | languages: 115 | - en 116 | metrics: 117 | - Accuracy 118 | - Other 119 | original_task: true 120 | name: question_with_instruction 121 | reference: Instruction before question. 122 | -------------------------------------------------------------------------------- /promptsource/templates/openai_humaneval/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: openai_humaneval 2 | templates: 3 | 4a108b1c-7514-488f-99ed-3ca5da70e103: !Template 4 | answer_choices: null 5 | id: 4a108b1c-7514-488f-99ed-3ca5da70e103 6 | jinja: '{{ prompt }} 7 | 8 | Given the following docstring, what is the function body? 9 | 10 | ||| 11 | 12 | {{ canonical_solution }}' 13 | metadata: !TemplateMetadata 14 | choices_in_prompt: false 15 | languages: 16 | - en 17 | metrics: 18 | - Other 19 | original_task: true 20 | name: function body generation 21 | reference: '' 22 | 9c85c898-70fe-4a51-be37-5111be357762: !Template 23 | answer_choices: null 24 | id: 9c85c898-70fe-4a51-be37-5111be357762 25 | jinja: "{% set ns = namespace(tests=[])%}\n{% set lines = test.split('\\n') %}\n\ 26 | {% set test_ = \"\" %}\n{% set args = \"\" %}\n{% set return_val = \"\" %}\n\ 27 | \n{% for line in lines %}\n {% if line.strip().startswith('assert') and \"\ 28 | ==\" in line.strip() %}\n {% set ns.tests = ns.tests + [line.split('assert')[1]]\ 29 | \ %}\n {% endif %}\n{% endfor %}\n{% if (ns.tests | length) > 0 %}\n {%\ 30 | \ set test_ = ns.tests | choice %}\n\n {% set return_val = test_.split(\"\ 31 | ==\")[1].split(\", \\\"\")[0].strip() %}\n {% set args = (test_.split('(')[1:]\ 32 | \ | join(\"\")).split(\"==\")[0].strip() %}\n {{ prompt }}\n {{ canonical_solution\ 33 | \ }}\n {{entry_point}}({{args}} =\n |||\n {{ return_val }}\n{% endif\ 34 | \ %}\n" 35 | metadata: !TemplateMetadata 36 | choices_in_prompt: false 37 | languages: 38 | - en 39 | metrics: 40 | - Other 41 | original_task: false 42 | name: function call return value generation 43 | reference: '' 44 | -------------------------------------------------------------------------------- /promptsource/templates/quarel/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: quarel 2 | templates: 3 | 5904fd73-b1ee-4f89-b7bc-b0fe8cc07c66: !Template 4 | answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}' 5 | id: 5904fd73-b1ee-4f89-b7bc-b0fe8cc07c66 6 | jinja: 'Question: {{question}} 7 | 8 | 9 | Do not use {{"A"}} and {{"B"}} to answer the question but instead, choose between 10 | "{{answer_choices[0]}}" and "{{answer_choices[1]}}". 11 | 12 | ||| 13 | 14 | {{answer_choices[answer_index]}}' 15 | metadata: !TemplateMetadata 16 | choices_in_prompt: true 17 | languages: 18 | - en 19 | metrics: 20 | - Accuracy 21 | original_task: false 22 | name: do_not_use 23 | reference: '' 24 | 5b5f9d29-0ad5-4bb9-831a-11fcb115c10d: !Template 25 | answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}' 26 | id: 5b5f9d29-0ad5-4bb9-831a-11fcb115c10d 27 | jinja: 'Here''s a logic test: {{question}} 28 | 29 | 30 | Choose the answer between "{{answer_choices[0]}}" and "{{answer_choices[1]}}". 31 | 32 | ||| 33 | 34 | {{answer_choices[answer_index]}}' 35 | metadata: !TemplateMetadata 36 | choices_in_prompt: true 37 | languages: 38 | - en 39 | metrics: 40 | - Accuracy 41 | original_task: false 42 | name: logic_test 43 | reference: '' 44 | 63c58389-605a-42b9-85a6-a2586a954a92: !Template 45 | answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}' 46 | id: 63c58389-605a-42b9-85a6-a2586a954a92 47 | jinja: 'Here''s a short story: {{question}}. 48 | 49 | 50 | What is the most sensical answer between "{{answer_choices[0]}}" and "{{answer_choices[1]}}"? 51 | 52 | ||| 53 | 54 | {{answer_choices[answer_index]}}' 55 | metadata: !TemplateMetadata 56 | choices_in_prompt: true 57 | languages: 58 | - en 59 | metrics: 60 | - Accuracy 61 | original_task: false 62 | name: heres_a_story 63 | reference: '' 64 | 73a7adbb-41b1-4b4d-b378-d7e17d030a6f: !Template 65 | answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}' 66 | id: 73a7adbb-41b1-4b4d-b378-d7e17d030a6f 67 | jinja: 'Choose between "{{answer_choices[0]}}" and "{{answer_choices[1]}}". 68 | 69 | Question: {{question}} 70 | 71 | ||| 72 | 73 | {{answer_choices[answer_index]}}' 74 | metadata: !TemplateMetadata 75 | choices_in_prompt: true 76 | languages: 77 | - en 78 | metrics: 79 | - Accuracy 80 | original_task: false 81 | name: choose_between 82 | reference: '' 83 | 92013fab-5387-44d4-bf0f-e29a31bcafb6: !Template 84 | answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}' 85 | id: 92013fab-5387-44d4-bf0f-e29a31bcafb6 86 | jinja: 'I am testing my students'' logic. 87 | 88 | What is the answer they should choose between "{{answer_choices[0]}}" and "{{answer_choices[1]}}"? 89 | 90 | Logic test: {{question}} 91 | 92 | ||| 93 | 94 | {{answer_choices[answer_index]}}' 95 | metadata: !TemplateMetadata 96 | choices_in_prompt: true 97 | languages: 98 | - en 99 | metrics: 100 | - Accuracy 101 | original_task: false 102 | name: testing_students 103 | reference: '' 104 | -------------------------------------------------------------------------------- /promptsource/templates/quora/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: quora 2 | templates: 3 | 2c780ebe-f8e6-44f0-a804-0a3e53eb8cce: !Template 4 | answer_choices: No ||| Yes 5 | id: 2c780ebe-f8e6-44f0-a804-0a3e53eb8cce 6 | jinja: Given the question "{{questions.text.0}}" would you consider "{{questions.text.1}}" 7 | as a duplicate?||| {{ answer_choices [is_duplicate] }} 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - Accuracy 14 | original_task: true 15 | name: are_two_questions_duplicate 16 | reference: '' 17 | 3331355a-4d69-4060-ae9e-cdb951335ed2: !Template 18 | answer_choices: No ||| Yes 19 | id: 3331355a-4d69-4060-ae9e-cdb951335ed2 20 | jinja: Is the following question "{{questions.text.0}}" the same as "{{questions.text.1}}"? 21 | ||| {{ answer_choices [is_duplicate] }} 22 | metadata: !TemplateMetadata 23 | choices_in_prompt: false 24 | languages: 25 | - en 26 | metrics: 27 | - Accuracy 28 | original_task: true 29 | name: are_two_questions_same 30 | reference: '' 31 | 397b1fb9-0cf1-455b-aaf2-efdb750014c5: !Template 32 | answer_choices: null 33 | id: 397b1fb9-0cf1-455b-aaf2-efdb750014c5 34 | jinja: '{% if is_duplicate == true%} Paraphrase the the following question: {% 35 | if questions.text.0|length < questions.text.1|length %} {{questions.text.0}} 36 | ||| {{questions.text.1}} {% else %} {{questions.text.1}} ||| {{questions.text.0}} 37 | {% endif %}{% endif %}' 38 | metadata: !TemplateMetadata 39 | choices_in_prompt: false 40 | languages: 41 | - en 42 | metrics: 43 | - BLEU 44 | - ROUGE 45 | original_task: false 46 | name: rephrase_given_question 47 | reference: '' 48 | 6de61945-992b-4191-9b3a-930e266769c9: !Template 49 | answer_choices: True ||| False 50 | id: 6de61945-992b-4191-9b3a-930e266769c9 51 | jinja: The question "{{questions.text.0}}" is different from "{{questions.text.1}}". 52 | {{"True"}} or {{"false"}}? ||| {{ answer_choices [is_duplicate] }} 53 | metadata: !TemplateMetadata 54 | choices_in_prompt: true 55 | languages: 56 | - en 57 | metrics: 58 | - Accuracy 59 | original_task: true 60 | name: are_two_questions_different 61 | reference: '' 62 | 7c367d58-e34f-4899-9c09-64a6a00a04b1: !Template 63 | answer_choices: false ||| true 64 | id: 7c367d58-e34f-4899-9c09-64a6a00a04b1 65 | jinja: The question "{{questions.text.0}}" is the same as "{{questions.text.1}}". 66 | {{"True"}} or {{"false"}} ? ||| {{ answer_choices [is_duplicate] }} 67 | metadata: !TemplateMetadata 68 | choices_in_prompt: true 69 | languages: 70 | - en 71 | metrics: 72 | - Accuracy 73 | original_task: true 74 | name: true_or_false 75 | reference: '' 76 | 7cc5ba2c-215d-4834-b41e-3ef717f6ac8c: !Template 77 | answer_choices: No, they are different questions ||| Yes, they are the same question 78 | id: 7cc5ba2c-215d-4834-b41e-3ef717f6ac8c 79 | jinja: Two new posts asked on Quora are "{{questions.text.0}}" and "{{questions.text.1}}". 80 | I feel like they have asked the same question. Am I correct? ||| {{answer_choices[is_duplicate]}} 81 | metadata: !TemplateMetadata 82 | choices_in_prompt: false 83 | languages: 84 | - en 85 | metrics: 86 | - Accuracy 87 | original_task: true 88 | name: yes_or_no 89 | reference: '' 90 | -------------------------------------------------------------------------------- /promptsource/templates/riddle_sense/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: riddle_sense 2 | templates: 3 | 3df2405b-4a0e-4fcf-8600-b7f4843b945a: !Template 4 | answer_choices: null 5 | id: 3df2405b-4a0e-4fcf-8600-b7f4843b945a 6 | jinja: 'Use the following options to predict a possible question for them: 7 | 8 | 9 | {% for i in range(choices[''text'']|length) %} 10 | 11 | - {{choices[''text''][i]}} 12 | 13 | {% endfor %} ||| 14 | 15 | {{question}}' 16 | metadata: !TemplateMetadata 17 | choices_in_prompt: false 18 | languages: 19 | - en 20 | metrics: 21 | - BLEU 22 | - ROUGE 23 | original_task: false 24 | name: answer_to_question 25 | reference: '' 26 | 8b44338c-f635-47fc-86fb-3135ef2a76ae: !Template 27 | answer_choices: '{{choices.text | join("|||")}}' 28 | id: 8b44338c-f635-47fc-86fb-3135ef2a76ae 29 | jinja: 'Given the options below, select the most suitable answer for the following 30 | question: 31 | 32 | {{question}} 33 | 34 | Options: 35 | 36 | - {{answer_choices | join("\n- ")}}||| 37 | 38 | {% if answerKey != "" %} 39 | 40 | {{answer_choices[choices["label"].index(answerKey)]}} 41 | 42 | {% endif %}' 43 | metadata: !TemplateMetadata 44 | choices_in_prompt: true 45 | languages: 46 | - en 47 | metrics: 48 | - Accuracy 49 | original_task: true 50 | name: most_suitable_answer 51 | reference: '' 52 | ac002734-5b1f-4478-9ffc-f16d9ca2f70e: !Template 53 | answer_choices: '{{choices.text | join("|||")}}' 54 | id: ac002734-5b1f-4478-9ffc-f16d9ca2f70e 55 | jinja: 'Answer the following question: 56 | 57 | {{question}} ||| 58 | 59 | {% if answerKey != "" %} 60 | 61 | {{ answer_choices[choices[''label''].index(answerKey)] }} 62 | 63 | {% endif %}' 64 | metadata: !TemplateMetadata 65 | choices_in_prompt: false 66 | languages: 67 | - en 68 | metrics: 69 | - Accuracy 70 | original_task: true 71 | name: answer_given_question_without_options 72 | reference: '' 73 | bf3f7bd2-91c6-455c-b9f7-42ce265fa2db: !Template 74 | answer_choices: A ||| B ||| C ||| D ||| E 75 | id: bf3f7bd2-91c6-455c-b9f7-42ce265fa2db 76 | jinja: 'Given the following options, what do you think is the correct answer to 77 | the question below: 78 | 79 | 80 | {{question}} 81 | 82 | 83 | Options: 84 | 85 | {% for letter, t in zip(answer_choices, choices.text) %} 86 | 87 | - {{letter}}: {{t}} 88 | 89 | {% endfor %} ||| 90 | 91 | {% if answerKey != "" %} 92 | 93 | {{answerKey}} 94 | 95 | {% endif %}' 96 | metadata: !TemplateMetadata 97 | choices_in_prompt: true 98 | languages: 99 | - en 100 | metrics: 101 | - Accuracy 102 | original_task: true 103 | name: question_to_answer_index 104 | reference: '' 105 | bf7d7cbc-aa05-4aca-97ff-29eb34502019: !Template 106 | answer_choices: '{{choices.text | join("|||")}}' 107 | id: bf7d7cbc-aa05-4aca-97ff-29eb34502019 108 | jinja: '{{question}} 109 | 110 | 111 | - {{answer_choices | join("\n- ")}} ||| 112 | 113 | {% if answerKey != "" %} 114 | 115 | {{answer_choices[choices[''label''].index(answerKey)] }} 116 | 117 | {% endif %}' 118 | metadata: !TemplateMetadata 119 | choices_in_prompt: true 120 | languages: 121 | - en 122 | metrics: 123 | - Accuracy 124 | original_task: true 125 | name: question_answering 126 | reference: '' 127 | -------------------------------------------------------------------------------- /promptsource/templates/samsum/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: samsum 2 | templates: 3 | 01faf0cd-d9d8-4245-b86f-e7e13c2972ff: !Template 4 | answer_choices: null 5 | id: 01faf0cd-d9d8-4245-b86f-e7e13c2972ff 6 | jinja: 'Summarize this dialogue: {{dialogue}} ||| 7 | 8 | {{summary}}' 9 | metadata: !TemplateMetadata 10 | choices_in_prompt: false 11 | languages: 12 | - en 13 | metrics: 14 | - ROUGE 15 | original_task: true 16 | name: 'Summarize this dialogue:' 17 | reference: '' 18 | 182a251f-2f76-4b36-8d2e-417f8d43f729: !Template 19 | answer_choices: null 20 | id: 182a251f-2f76-4b36-8d2e-417f8d43f729 21 | jinja: '{{dialogue}} 22 | 23 | Given the above dialogue, write a summary. ||| 24 | 25 | {{summary}}' 26 | metadata: !TemplateMetadata 27 | choices_in_prompt: false 28 | languages: 29 | - en 30 | metrics: 31 | - ROUGE 32 | original_task: true 33 | name: Given the above dialogue write a summary 34 | reference: '' 35 | 72eda731-894d-4260-9113-9e492822f80e: !Template 36 | answer_choices: null 37 | id: 72eda731-894d-4260-9113-9e492822f80e 38 | jinja: 'Summarize: {{dialogue}}||| 39 | 40 | {{summary}}' 41 | metadata: !TemplateMetadata 42 | choices_in_prompt: false 43 | languages: 44 | - en 45 | metrics: 46 | - ROUGE 47 | original_task: true 48 | name: 'Summarize:' 49 | reference: '' 50 | 7bd51f5b-5bac-429e-b8f9-dd6782b92a59: !Template 51 | answer_choices: null 52 | id: 7bd51f5b-5bac-429e-b8f9-dd6782b92a59 53 | jinja: '{{dialogue}} 54 | 55 | To sum up this dialog: 56 | 57 | |||{{summary}}' 58 | metadata: !TemplateMetadata 59 | choices_in_prompt: false 60 | languages: 61 | - en 62 | metrics: 63 | - ROUGE 64 | original_task: true 65 | name: To sum up this dialog 66 | reference: '' 67 | 8d829dcb-ea64-457d-b025-f16e31c2834a: !Template 68 | answer_choices: null 69 | id: 8d829dcb-ea64-457d-b025-f16e31c2834a 70 | jinja: 'Generate a summary for this dialogue: 71 | 72 | {{dialogue}} 73 | 74 | |||{{summary}}' 75 | metadata: !TemplateMetadata 76 | choices_in_prompt: false 77 | languages: 78 | - en 79 | metrics: 80 | - ROUGE 81 | original_task: true 82 | name: Generate a summary for this dialogue 83 | reference: '' 84 | 9f571a72-6813-4307-9aae-753ca0f737c5: !Template 85 | answer_choices: null 86 | id: 9f571a72-6813-4307-9aae-753ca0f737c5 87 | jinja: 'Write a dialogue that matches this summary: {{summary}} ||| 88 | 89 | {{dialogue}}' 90 | metadata: !TemplateMetadata 91 | choices_in_prompt: false 92 | languages: 93 | - en 94 | metrics: 95 | - ROUGE 96 | original_task: false 97 | name: Write a dialogue that match this summary 98 | reference: '' 99 | bd891653-49b6-40bb-968f-8e6632c75659: !Template 100 | answer_choices: null 101 | id: bd891653-49b6-40bb-968f-8e6632c75659 102 | jinja: "Sum up the following dialogue: \n{{dialogue}}\n|||{{summary}}" 103 | metadata: !TemplateMetadata 104 | choices_in_prompt: false 105 | languages: 106 | - en 107 | metrics: 108 | - ROUGE 109 | original_task: true 110 | name: Sum up the following dialogue 111 | reference: '' 112 | -------------------------------------------------------------------------------- /promptsource/templates/scitail/snli_format/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: scitail 2 | subset: snli_format 3 | templates: 4 | 90827988-2a8d-4ecb-b8c1-54ad6cd0ebfa: !Template 5 | answer_choices: yes ||| no 6 | id: 90827988-2a8d-4ecb-b8c1-54ad6cd0ebfa 7 | jinja: 'Given that {{sentence1}} Does it follow that {{sentence2}} 8 | 9 | {{ answer_choices | join('' or '') }}? 10 | 11 | |||{% if gold_label == "entailment" %} 12 | 13 | {{answer_choices[0]}} 14 | 15 | {% else %} 16 | 17 | {{answer_choices[1]}} 18 | 19 | {% endif %}' 20 | metadata: !TemplateMetadata 21 | choices_in_prompt: true 22 | languages: 23 | - en 24 | metrics: 25 | - Accuracy 26 | original_task: true 27 | name: Another Yes/No Entailment Framing 28 | reference: '' 29 | -------------------------------------------------------------------------------- /promptsource/templates/scitail/tsv_format/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: scitail 2 | subset: tsv_format 3 | templates: 4 | 189ed384-c077-49ad-b606-ed08b66f8376: !Template 5 | answer_choices: true ||| false 6 | id: 189ed384-c077-49ad-b606-ed08b66f8376 7 | jinja: "{{premise}} Therefore, we are licensed to say that {{hypothesis}} {{\ 8 | \ answer_choices | join(' or ') }}|||\n{% if label == \"entails\" %} \n{{answer_choices[0]}}\n\ 9 | {% else %}\n{{answer_choices[1]}}\n{% endif %}" 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: true 12 | languages: 13 | - en 14 | metrics: 15 | - Accuracy 16 | original_task: true 17 | name: "\u2026 Therefore, we're licensed to say that\u2026" 18 | reference: '' 19 | 1ff92b02-fefc-49e0-b676-9391fab8f193: !Template 20 | answer_choices: neutral ||| entails 21 | id: 1ff92b02-fefc-49e0-b676-9391fab8f193 22 | jinja: Suppose {{premise}} Can we infer that {{hypothesis}}? ||| {{label}} 23 | metadata: !TemplateMetadata 24 | choices_in_prompt: false 25 | languages: 26 | - en 27 | metrics: 28 | - Accuracy 29 | original_task: true 30 | name: "Suppose\u2026 Can we infer that\u2026" 31 | reference: '' 32 | 5aa53544-73a6-4486-b8c8-623345353fa7: !Template 33 | answer_choices: yes ||| no 34 | id: 5aa53544-73a6-4486-b8c8-623345353fa7 35 | jinja: "{{premise}} Does the previous passage support the claim that {{hypothesis}}?\ 36 | \ |||{% if label == \"entails\" %} \n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n\ 37 | {% endif %}" 38 | metadata: !TemplateMetadata 39 | choices_in_prompt: false 40 | languages: 41 | - en 42 | metrics: 43 | - Accuracy 44 | original_task: true 45 | name: "\u2026does the previous passage support the claim that" 46 | reference: '' 47 | 705fa099-0650-4de5-b72f-881aea0fa208: !Template 48 | answer_choices: yes ||| no 49 | id: 705fa099-0650-4de5-b72f-881aea0fa208 50 | jinja: "Given that {{premise}} Does it follow that {{hypothesis}} {{ answer_choices\ 51 | \ | join(' or ') }} |||\n{% if label == \"entails\" %} \n{{answer_choices[0]}}\n\ 52 | {% else %}\n{{answer_choices[1]}}\n{% endif %}" 53 | metadata: !TemplateMetadata 54 | choices_in_prompt: true 55 | languages: 56 | - en 57 | metrics: 58 | - Accuracy 59 | original_task: true 60 | name: "given\u2026 does it follow that\u2026 " 61 | reference: Another yes/no entailment framing 62 | 9aa89dee-6cef-43bc-bdf4-e38cdf0796a6: !Template 63 | answer_choices: yes ||| no 64 | id: 9aa89dee-6cef-43bc-bdf4-e38cdf0796a6 65 | jinja: "Sentence 1: {{premise}}\n\nSentence 2: {{hypothesis}}\n\nQuestion: Does\ 66 | \ Sentence 1 entail Sentence 2? {{ answer_choices | join(' or ') }} |||\n{%\ 67 | \ if label == \"entails\" %} \n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n\ 68 | {% endif %}" 69 | metadata: !TemplateMetadata 70 | choices_in_prompt: true 71 | languages: 72 | - en 73 | metrics: 74 | - Accuracy 75 | original_task: true 76 | name: does S1 entail S2? 77 | reference: Adapted from Victor's prompts for XNLI. 78 | -------------------------------------------------------------------------------- /promptsource/templates/scitldr/Abstract/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: scitldr 2 | subset: Abstract 3 | templates: 4 | 01fb91ab-2c95-436e-9363-3dfcdb6c5ba6: !Template 5 | answer_choices: null 6 | id: 01fb91ab-2c95-436e-9363-3dfcdb6c5ba6 7 | jinja: "Generate a summary for the text: \n{{source | join(\" \")}}\n|||\n{{target|choice}}" 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - BLEU 14 | - ROUGE 15 | original_task: true 16 | name: basic_task_description_like 17 | reference: Assume there is only one choice 18 | 08b9e913-a305-46e2-aa43-f1126d76cf55: !Template 19 | answer_choices: null 20 | id: 08b9e913-a305-46e2-aa43-f1126d76cf55 21 | jinja: "Elaborate on the given summary: \n{{target |choice}}\n\nStart with following\ 22 | \ sentence: {{source[0]}}\n|||\n{{source | join(\" \")}}" 23 | metadata: !TemplateMetadata 24 | choices_in_prompt: false 25 | languages: 26 | - en 27 | metrics: 28 | - BLEU 29 | - ROUGE 30 | original_task: false 31 | name: reverse_generation 32 | reference: This template asks the model to hallucinate the abstract. 33 | 16faf5c0-a0c5-488a-89dd-2989622b01dc: !Template 34 | answer_choices: null 35 | id: 16faf5c0-a0c5-488a-89dd-2989622b01dc 36 | jinja: "Compress the abstract to one or two sentences. Make sure it captures the\ 37 | \ main point of the abstract. \nAbstract: {{source | join(\" \")}}\nSummary:\ 38 | \ \n|||\n{{target[0]}}\n\n" 39 | metadata: !TemplateMetadata 40 | choices_in_prompt: false 41 | languages: 42 | - en 43 | metrics: 44 | - BLEU 45 | - ROUGE 46 | original_task: true 47 | name: instructions_for_summary 48 | reference: Providing instructions on what a summary should look like 49 | 68502ad6-cb36-4137-9359-e6826731854a: !Template 50 | answer_choices: null 51 | id: 68502ad6-cb36-4137-9359-e6826731854a 52 | jinja: "Abstract: {{source | join(\" \")}}\nPlease summarize the abstract in one\ 53 | \ sentence: \n|||\n{{target|choice}}\n" 54 | metadata: !TemplateMetadata 55 | choices_in_prompt: false 56 | languages: 57 | - en 58 | metrics: 59 | - BLEU 60 | - ROUGE 61 | original_task: true 62 | name: summarize_in_sentence 63 | reference: Template asks the model to summarize in one sentence 64 | ab46a8f2-1e57-4ac9-b4ae-422c70689450: !Template 65 | answer_choices: null 66 | id: ab46a8f2-1e57-4ac9-b4ae-422c70689450 67 | jinja: '{{source| join(" ")}} 68 | 69 | TL;DR: ||| {{target[0]}} 70 | 71 | ' 72 | metadata: !TemplateMetadata 73 | choices_in_prompt: false 74 | languages: 75 | - en 76 | metrics: 77 | - BLEU 78 | - ROUGE 79 | original_task: true 80 | name: gpt_2_style 81 | reference: GPT 2 style template 82 | bac2ebcf-a54d-49a0-ac37-e7ad3f4878cb: !Template 83 | answer_choices: null 84 | id: bac2ebcf-a54d-49a0-ac37-e7ad3f4878cb 85 | jinja: "{{source | join(\" \")}}\nPlease summarize the above paragraph. \n|||\n\ 86 | {{target|choice}}" 87 | metadata: !TemplateMetadata 88 | choices_in_prompt: false 89 | languages: 90 | - en 91 | metrics: 92 | - BLEU 93 | - ROUGE 94 | original_task: true 95 | name: basic_with_choice_output 96 | reference: basic task like description with choice filter 97 | -------------------------------------------------------------------------------- /promptsource/templates/sick/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: sick 2 | templates: 3 | 2b5fcfdc-8dc4-4aed-9819-8a104230d0fa: !Template 4 | answer_choices: null 5 | id: 2b5fcfdc-8dc4-4aed-9819-8a104230d0fa 6 | jinja: 'How related are the following sentences? 7 | 8 | Give a score on a scale of 1 to 5. 9 | 10 | 11 | {{sentence_A}} 12 | 13 | 14 | {{sentence_B}} ||| 15 | 16 | 17 | {{(((5*relatedness_score)|round)/5)}} 18 | 19 | 20 | ' 21 | metadata: !TemplateMetadata 22 | choices_in_prompt: false 23 | languages: 24 | - en 25 | metrics: 26 | - Pearson Correlation 27 | - Spearman Correlation 28 | original_task: true 29 | name: sentences relation score 30 | reference: '' 31 | 566db154-818a-43c6-b66d-924a20fbbec2: !Template 32 | answer_choices: entail ||| is neutral ||| contradict 33 | id: 566db154-818a-43c6-b66d-924a20fbbec2 34 | jinja: 'Does sentence B entail, contradict, or is neutral with respect to sentence 35 | A? 36 | 37 | 38 | Sentence A: {{sentence_A}} 39 | 40 | 41 | Sentence B: {{sentence_B}} ||| 42 | 43 | {{ answer_choices[label] }}' 44 | metadata: !TemplateMetadata 45 | choices_in_prompt: true 46 | languages: 47 | - en 48 | metrics: 49 | - Accuracy 50 | original_task: true 51 | name: B entails_netural_contradict A? 52 | reference: '' 53 | 9a4d6bd4-bd67-46e4-ac70-6d46eff32b93: !Template 54 | answer_choices: entail ||| is neutral ||| contradict 55 | id: 9a4d6bd4-bd67-46e4-ac70-6d46eff32b93 56 | jinja: 'Does sentence A entail, contradict, or is neutral with respect to sentence 57 | B? 58 | 59 | 60 | Sentence A: {{sentence_A}} 61 | 62 | 63 | Sentence B: {{sentence_B}}||| 64 | 65 | {{ 66 | 67 | {"A_entails_B": answer_choices[0], "A_neutral_B": answer_choices[1], "A_contradicts_B": 68 | answer_choices[2]}[entailment_AB] 69 | 70 | }}' 71 | metadata: !TemplateMetadata 72 | choices_in_prompt: true 73 | languages: 74 | - en 75 | metrics: 76 | - Accuracy 77 | original_task: true 78 | name: A entails_neutral_contradict B? 79 | reference: '' 80 | a502cdc1-3bf0-4019-8b4c-b293d75a95ff: !Template 81 | answer_choices: Yes ||| No 82 | id: a502cdc1-3bf0-4019-8b4c-b293d75a95ff 83 | jinja: 'Does the sentence, "{{sentence_B}}", entail the sentence, "{{sentence_A}}" 84 | ? ||| 85 | 86 | {{ 87 | 88 | [answer_choices[0], answer_choices[1], answer_choices[1]][label] 89 | 90 | }}' 91 | metadata: !TemplateMetadata 92 | choices_in_prompt: false 93 | languages: 94 | - en 95 | metrics: 96 | - Accuracy 97 | original_task: true 98 | name: B entails A? 99 | reference: '' 100 | d96bfba4-3bf3-41db-84be-3d67126faf07: !Template 101 | answer_choices: Yes ||| No 102 | id: d96bfba4-3bf3-41db-84be-3d67126faf07 103 | jinja: 'Does the sentence, "{{sentence_A}}", entail the sentence, "{{sentence_B}}" 104 | ? ||| 105 | 106 | {{ 107 | 108 | {"A_entails_B": answer_choices[0], "A_neutral_B": answer_choices[1], "A_contradicts_B": 109 | answer_choices[1]}[entailment_AB] 110 | 111 | }}' 112 | metadata: !TemplateMetadata 113 | choices_in_prompt: false 114 | languages: 115 | - en 116 | metrics: 117 | - Accuracy 118 | original_task: true 119 | name: A entails B? 120 | reference: '' 121 | -------------------------------------------------------------------------------- /promptsource/templates/sms_spam/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: sms_spam 2 | templates: 3 | 7bab221f-92fc-46b4-8c02-d5f401185f7e: !Template 4 | answer_choices: not spam ||| spam 5 | id: 7bab221f-92fc-46b4-8c02-d5f401185f7e 6 | jinja: "What is the spam label for the following sms message? {{sms}} \n|||\n\ 7 | {{ answer_choices [label] }}" 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: true 10 | languages: 11 | - en 12 | metrics: 13 | - Accuracy 14 | original_task: true 15 | name: spam_label 16 | reference: '' 17 | 84cdb14c-f129-461c-83cf-a0a48af3d2ce: !Template 18 | answer_choices: True ||| False 19 | id: 84cdb14c-f129-461c-83cf-a0a48af3d2ce 20 | jinja: "Is this sms message considered {{\"ham\"}} (i.e. not spam)? \n{{sms}}\n\ 21 | |||\n{{answer_choices[label]}}" 22 | metadata: !TemplateMetadata 23 | choices_in_prompt: false 24 | languages: 25 | - en 26 | metrics: 27 | - Accuracy 28 | original_task: true 29 | name: ham_True_False 30 | reference: '' 31 | 871415d2-552d-4798-a319-613c3c86d290: !Template 32 | answer_choices: ham ||| spam 33 | id: 871415d2-552d-4798-a319-613c3c86d290 34 | jinja: 'Is the label for the following sms message {{"ham"}} (not spam) or {{"spam"}}? 35 | {{sms}} 36 | 37 | ||| 38 | 39 | {{ answer_choices [label] }}' 40 | metadata: !TemplateMetadata 41 | choices_in_prompt: true 42 | languages: 43 | - en 44 | metrics: 45 | - Accuracy 46 | original_task: true 47 | name: is_the_label 48 | reference: '' 49 | a38996db-6f24-4412-ab78-fb9265bedd66: !Template 50 | answer_choices: not spam||| spam 51 | id: a38996db-6f24-4412-ab78-fb9265bedd66 52 | jinja: "The following sms message should be marked as \"spam\" or \"not spam\"\ 53 | ? {{sms}} \n|||\n{{ answer_choices [label] }}" 54 | metadata: !TemplateMetadata 55 | choices_in_prompt: true 56 | languages: 57 | - en 58 | metrics: 59 | - Accuracy 60 | original_task: true 61 | name: marked as 62 | reference: '' 63 | ef8c84e0-d45d-4e5d-b5e2-6ee3a94ce330: !Template 64 | answer_choices: False ||| True 65 | id: ef8c84e0-d45d-4e5d-b5e2-6ee3a94ce330 66 | jinja: "Is this sms message considered {{\"spam\"}}? \n{{sms}}\n|||\n{{answer_choices[label]}}" 67 | metadata: !TemplateMetadata 68 | choices_in_prompt: false 69 | languages: 70 | - en 71 | metrics: 72 | - Accuracy 73 | original_task: true 74 | name: spam_True_False 75 | reference: '' 76 | -------------------------------------------------------------------------------- /promptsource/templates/squad/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: squad 2 | templates: 3 | 3d85b5b0-51db-4d72-8ead-d0b3654025ee: !Template 4 | answer_choices: null 5 | id: 3d85b5b0-51db-4d72-8ead-d0b3654025ee 6 | jinja: 'Refer to the passage below and answer the following question: 7 | 8 | 9 | Passage: {{context}} 10 | 11 | 12 | Question: {{question}} 13 | 14 | ||| 15 | 16 | {{answers["text"][0]}}' 17 | metadata: !TemplateMetadata 18 | choices_in_prompt: false 19 | languages: 20 | - en 21 | metrics: 22 | - Squad 23 | original_task: true 24 | name: answer_question_given_context 25 | reference: '' 26 | 5a3c2d11-9469-46f6-88c2-f7e159a9742b: !Template 27 | answer_choices: null 28 | id: 5a3c2d11-9469-46f6-88c2-f7e159a9742b 29 | jinja: '{{context}} 30 | 31 | 32 | Q: {{question}} 33 | 34 | 35 | A: ||| {{answers["text"][0]}}' 36 | metadata: !TemplateMetadata 37 | choices_in_prompt: false 38 | languages: 39 | - en 40 | metrics: 41 | - Squad 42 | original_task: true 43 | name: given_context_answer_question_variation 44 | reference: '' 45 | 64ed14d6-c835-424d-a55d-ded1b1bd2546: !Template 46 | answer_choices: null 47 | id: 64ed14d6-c835-424d-a55d-ded1b1bd2546 48 | jinja: '{{context}} 49 | 50 | 51 | Generate a question from the above passage : ||| {{question}}' 52 | metadata: !TemplateMetadata 53 | choices_in_prompt: false 54 | languages: 55 | - en 56 | metrics: 57 | - BLEU 58 | - ROUGE 59 | original_task: false 60 | name: given_context_generate_question 61 | reference: '' 62 | 69041854-6e48-4902-92c2-adb46457bea3: !Template 63 | answer_choices: null 64 | id: 69041854-6e48-4902-92c2-adb46457bea3 65 | jinja: '{{context}} 66 | 67 | 68 | From the above passage, a reasonable question with "{{answers["text"][0]}}" 69 | as the answer would be: ||| {{question}}' 70 | metadata: !TemplateMetadata 71 | choices_in_prompt: false 72 | languages: 73 | - en 74 | metrics: 75 | - BLEU 76 | - ROUGE 77 | original_task: false 78 | name: jeopardy 79 | reference: jeopardy style- wiki_qa 80 | 7c13b5ba-abfc-4b68-9a36-5430a0b0e580: !Template 81 | answer_choices: null 82 | id: 7c13b5ba-abfc-4b68-9a36-5430a0b0e580 83 | jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\ 84 | \n{{answers.text[0]}}" 85 | metadata: !TemplateMetadata 86 | choices_in_prompt: false 87 | languages: 88 | - en 89 | metrics: 90 | - Squad 91 | original_task: true 92 | name: answer_the_question 93 | reference: '' 94 | e153c4cd-6757-487a-8fe6-da7e88ec3e47: !Template 95 | answer_choices: null 96 | id: e153c4cd-6757-487a-8fe6-da7e88ec3e47 97 | jinja: '{{context}} 98 | 99 | 100 | Q: {{question}} 101 | 102 | 103 | Referring to the passage above, the correct answer to the given question is 104 | ||| {{answers["text"][0]}}' 105 | metadata: !TemplateMetadata 106 | choices_in_prompt: false 107 | languages: 108 | - en 109 | metrics: 110 | - Squad 111 | original_task: true 112 | name: answer_given_context_and_question 113 | reference: '' 114 | -------------------------------------------------------------------------------- /promptsource/templates/squad_adversarial/AddSent/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: squad_adversarial 2 | subset: AddSent 3 | templates: 4 | 22a2f318-5302-479e-93be-215453060624: !Template 5 | answer_choices: null 6 | id: 22a2f318-5302-479e-93be-215453060624 7 | jinja: '{{context}} 8 | 9 | 10 | Q: {{question}} 11 | 12 | 13 | Referring to the passage above, the correct answer to the given question is 14 | ||| {{answers["text"][0]}}' 15 | metadata: !TemplateMetadata 16 | choices_in_prompt: false 17 | languages: 18 | - en 19 | metrics: 20 | - Squad 21 | original_task: true 22 | name: answer_given_context_and_question 23 | reference: '' 24 | 402adce7-4857-4524-8ad3-6270b66a5e0f: !Template 25 | answer_choices: null 26 | id: 402adce7-4857-4524-8ad3-6270b66a5e0f 27 | jinja: 'Refer to the passage below and answer the following question: 28 | 29 | 30 | Passage: {{context}} 31 | 32 | 33 | Question: {{question}} 34 | 35 | ||| 36 | 37 | {{answers["text"][0]}}' 38 | metadata: !TemplateMetadata 39 | choices_in_prompt: false 40 | languages: 41 | - en 42 | metrics: 43 | - Squad 44 | original_task: true 45 | name: answer_question_given_context 46 | reference: '' 47 | b4994c82-bfb2-4e0c-a5d7-081053830097: !Template 48 | answer_choices: null 49 | id: b4994c82-bfb2-4e0c-a5d7-081053830097 50 | jinja: '{{context}} 51 | 52 | 53 | From the above passage, a reasonable question with "{{answers["text"][0]}}" 54 | as the answer would be: ||| {{question}}' 55 | metadata: !TemplateMetadata 56 | choices_in_prompt: false 57 | languages: 58 | - en 59 | metrics: 60 | - BLEU 61 | - ROUGE 62 | original_task: false 63 | name: jeopardy 64 | reference: jeopardy style- wiki_qa 65 | b60cd43d-7026-434b-abf8-f67cc965316a: !Template 66 | answer_choices: null 67 | id: b60cd43d-7026-434b-abf8-f67cc965316a 68 | jinja: '{{context}} 69 | 70 | 71 | Generate a question from the above passage : ||| {{question}}' 72 | metadata: !TemplateMetadata 73 | choices_in_prompt: false 74 | languages: 75 | - en 76 | metrics: 77 | - BLEU 78 | - ROUGE 79 | original_task: false 80 | name: given_context_generate_question 81 | reference: '' 82 | dada0334-1dc2-4e39-a7e1-258ac622ab4f: !Template 83 | answer_choices: null 84 | id: dada0334-1dc2-4e39-a7e1-258ac622ab4f 85 | jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\ 86 | \n{{answers.text[0]}}" 87 | metadata: !TemplateMetadata 88 | choices_in_prompt: false 89 | languages: 90 | - en 91 | metrics: 92 | - Squad 93 | original_task: true 94 | name: answer_the_question 95 | reference: '' 96 | e638bc9e-5059-4ace-a6f9-4871f548342f: !Template 97 | answer_choices: null 98 | id: e638bc9e-5059-4ace-a6f9-4871f548342f 99 | jinja: '{{context}} 100 | 101 | 102 | Q: {{question}} 103 | 104 | 105 | A: ||| {{answers["text"][0]}}' 106 | metadata: !TemplateMetadata 107 | choices_in_prompt: false 108 | languages: 109 | - en 110 | metrics: 111 | - Squad 112 | original_task: true 113 | name: given_context_answer_question_variation 114 | reference: '' 115 | -------------------------------------------------------------------------------- /promptsource/templates/sst/default/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: sst 2 | subset: default 3 | templates: 4 | 23c231c1-672d-4420-a8ab-41ab930de317: !Template 5 | answer_choices: no ||| yes 6 | id: 23c231c1-672d-4420-a8ab-41ab930de317 7 | jinja: 'Does the movie review below make someone want to watch it? 8 | 9 | 10 | {{sentence}} ||| 11 | 12 | {{answer_choices 13 | 14 | [0 if label < 0.5 else 1] 15 | 16 | }}' 17 | metadata: !TemplateMetadata 18 | choices_in_prompt: false 19 | languages: 20 | - en 21 | metrics: 22 | - Accuracy 23 | original_task: true 24 | name: sentiment_watch_movie 25 | reference: '' 26 | 5119a0b5-5d82-4401-900a-7fafc1d48ff6: !Template 27 | answer_choices: null 28 | id: 5119a0b5-5d82-4401-900a-7fafc1d48ff6 29 | jinja: 'How positive is the movie review below? 30 | 31 | Give a score on a scale from 0 to 1. 32 | 33 | 34 | {{sentence}} ||| 35 | 36 | {{''%0.1f''| format(label|float)}}' 37 | metadata: !TemplateMetadata 38 | choices_in_prompt: false 39 | languages: 40 | - en 41 | metrics: 42 | - Other 43 | original_task: true 44 | name: sentiment scoring scale 45 | reference: '' 46 | 647585d3-dac6-40c3-b6d0-f02d835ae4c4: !Template 47 | answer_choices: null 48 | id: 647585d3-dac6-40c3-b6d0-f02d835ae4c4 49 | jinja: 'How much does the movie review below make you want to watch it? 50 | 51 | Give a score on a scale from 0 to 1. 52 | 53 | 54 | {{sentence}} ||| 55 | 56 | {{''%0.1f''| format(label|float)}}' 57 | metadata: !TemplateMetadata 58 | choices_in_prompt: false 59 | languages: 60 | - en 61 | metrics: 62 | - Other 63 | original_task: true 64 | name: sentiment_watch_scale 65 | reference: '' 66 | 9453d08b-6144-4f36-a53d-232ed1dfcff4: !Template 67 | answer_choices: no ||| yes 68 | id: 9453d08b-6144-4f36-a53d-232ed1dfcff4 69 | jinja: 'Does it seem like the reviewer who wrote this review liked the movie? 70 | 71 | 72 | {{sentence}} ||| 73 | 74 | {{answer_choices[0 if label < 0.5 else 1]}}' 75 | metadata: !TemplateMetadata 76 | choices_in_prompt: false 77 | languages: 78 | - en 79 | metrics: 80 | - Accuracy 81 | original_task: true 82 | name: did_reviewer_like 83 | reference: '' 84 | b15994be-ca57-4924-9af7-fbaa6ee0124b: !Template 85 | answer_choices: no ||| yes 86 | id: b15994be-ca57-4924-9af7-fbaa6ee0124b 87 | jinja: 'Is the movie review below positive? 88 | 89 | 90 | {{sentence}} ||| 91 | 92 | {{answer_choices 93 | 94 | [0 if label < 0.5 else 1] 95 | 96 | }}' 97 | metadata: !TemplateMetadata 98 | choices_in_prompt: false 99 | languages: 100 | - en 101 | metrics: 102 | - Accuracy 103 | original_task: true 104 | name: sentiment_classification 105 | reference: '' 106 | -------------------------------------------------------------------------------- /promptsource/templates/stsb_multi_mt/en/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: stsb_multi_mt 2 | subset: en 3 | templates: 4 | 6c0bdf61-9baa-415a-bf03-fdb8789d1740: !Template 5 | answer_choices: null 6 | id: 6c0bdf61-9baa-415a-bf03-fdb8789d1740 7 | jinja: How similar are "{{sentence1}}" and "{{sentence2}}"? Give a score between 8 | {{"0.0"}} and {{"5.0"}}. ||| {{(((5*similarity_score)|round)/5)}} 9 | metadata: !TemplateMetadata 10 | choices_in_prompt: false 11 | languages: 12 | - en 13 | metrics: 14 | - Pearson Correlation 15 | original_task: true 16 | name: Similarity_how 17 | reference: '' 18 | 6df357b5-f8ea-49d2-b304-3541acb5271a: !Template 19 | answer_choices: no ||| yes 20 | id: 6df357b5-f8ea-49d2-b304-3541acb5271a 21 | jinja: Do you think "{{sentence1}}" and "{{sentence2}}" express the same thing? 22 | ||| {{answer_choices[0 if similarity_score < 2.5 else 1]}} 23 | metadata: !TemplateMetadata 24 | choices_in_prompt: false 25 | languages: 26 | - en 27 | metrics: 28 | - Accuracy 29 | original_task: false 30 | name: Similarity_express_binary 31 | reference: sst2 32 | 775af665-d8a5-46b2-bfcf-2a21abc7e99c: !Template 33 | answer_choices: no ||| yes 34 | id: 775af665-d8a5-46b2-bfcf-2a21abc7e99c 35 | jinja: Do "{{sentence1}}" and "{{sentence2}}" seem similar to you ? ||| {{answer_choices[0 36 | if similarity_score < 2.5 else 1]}} 37 | metadata: !TemplateMetadata 38 | choices_in_prompt: false 39 | languages: 40 | - en 41 | metrics: 42 | - Accuracy 43 | original_task: false 44 | name: Similarity_seem_binary 45 | reference: '' 46 | 9cab340c-32ce-465d-be89-049e4a63af11: !Template 47 | answer_choices: null 48 | id: 9cab340c-32ce-465d-be89-049e4a63af11 49 | jinja: On a scale from {{"0.0"}} to {{"5.0"}}, how similar are "{{sentence1}}" 50 | and "{{sentence2}}"? ||| {{(((5*similarity_score)|round)/5)}} 51 | metadata: !TemplateMetadata 52 | choices_in_prompt: false 53 | languages: 54 | - en 55 | metrics: 56 | - Pearson Correlation 57 | original_task: true 58 | name: Similarity_scale 59 | reference: '' 60 | e0551bee-61f0-4c1e-9c3f-18c8b54439f8: !Template 61 | answer_choices: null 62 | id: e0551bee-61f0-4c1e-9c3f-18c8b54439f8 63 | jinja: "Rate the similarity of these two sentences: ({{\"0.0\"}} being the lowest\ 64 | \ and {{\"5.0\"}} the highest)\n\"{{sentence1}}\" and \"{{sentence2}}\" \n|||\n\ 65 | {{(((5*similarity_score)|round)/5)}}" 66 | metadata: !TemplateMetadata 67 | choices_in_prompt: false 68 | languages: 69 | - en 70 | metrics: 71 | - Pearson Correlation 72 | original_task: true 73 | name: Similarity_rate 74 | reference: '' 75 | -------------------------------------------------------------------------------- /promptsource/templates/trivia_qa/unfiltered/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: trivia_qa 2 | subset: unfiltered 3 | templates: 4 | 5946db1a-a068-4a31-a06f-74a7d976cb6d: !Template 5 | answer_choices: null 6 | id: 5946db1a-a068-4a31-a06f-74a7d976cb6d 7 | jinja: "{% if answer.aliases %} \n Guess a question that has the answer \"\ 8 | {{answer.aliases|choice}}\" \n ||| \n {{question}} \n{% endif %}" 9 | metadata: !TemplateMetadata 10 | choices_in_prompt: false 11 | languages: 12 | - en 13 | metrics: 14 | - BLEU 15 | - ROUGE 16 | original_task: false 17 | name: guess_question 18 | reference: Guess a question. 19 | 7ada9605-6fd1-49a9-a56e-6d778d4a0eb6: !Template 20 | answer_choices: null 21 | id: 7ada9605-6fd1-49a9-a56e-6d778d4a0eb6 22 | jinja: "The goal is to predict an English answer string for an input English question.\ 23 | \ \nQuestion : {{question}}\nAnswer : \n||| \n{% if answer.aliases %} \n{{answer.aliases|choice}}\ 24 | \ \n{% endif %}" 25 | metadata: !TemplateMetadata 26 | choices_in_prompt: false 27 | languages: 28 | - en 29 | metrics: 30 | - Accuracy 31 | - Other 32 | original_task: true 33 | name: formal_description 34 | reference: '' 35 | 91d9f950-a25a-4557-a16f-952d74629584: !Template 36 | answer_choices: null 37 | id: 91d9f950-a25a-4557-a16f-952d74629584 38 | jinja: "Answer the following question.\n{{question}} \n|||\n{% if answer.aliases\ 39 | \ %} \n{{answer.aliases|choice}} \n{% endif %}" 40 | metadata: !TemplateMetadata 41 | choices_in_prompt: false 42 | languages: 43 | - en 44 | metrics: 45 | - Accuracy 46 | - Other 47 | original_task: true 48 | name: question_with_instruction 49 | reference: Instruction before question. 50 | bfec3d73-c024-492f-8878-64fdb6639a29: !Template 51 | answer_choices: null 52 | id: bfec3d73-c024-492f-8878-64fdb6639a29 53 | jinja: "I've always wondered: {{question}} \n||| \n{% if answer.aliases %} \n\ 54 | {{answer.aliases|choice}}\n{% endif %}" 55 | metadata: !TemplateMetadata 56 | choices_in_prompt: false 57 | languages: 58 | - en 59 | metrics: 60 | - Accuracy 61 | - Other 62 | original_task: true 63 | name: first_person_context 64 | reference: Ask a question in first person 65 | c29c7072-0535-4e38-ba0c-b7ac0acdacf8: !Template 66 | answer_choices: null 67 | id: c29c7072-0535-4e38-ba0c-b7ac0acdacf8 68 | jinja: "Question : {{question}}\nAnswer : \n||| \n{% if answer.aliases %} \n{{answer.aliases|choice}}\n\ 69 | {% endif %}" 70 | metadata: !TemplateMetadata 71 | choices_in_prompt: false 72 | languages: 73 | - en 74 | metrics: 75 | - Accuracy 76 | - Other 77 | original_task: true 78 | name: question_answer 79 | reference: Plain Question 80 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/emoji/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: emoji 3 | templates: 4 | 8c794abe-5364-430f-aa1e-eb3501443cec: !Template 5 | answer_choices: "\u2764|||\U0001F60D|||\U0001F602|||\U0001F495|||\U0001F525|||\U0001F60A\ 6 | |||\U0001F60E|||\u2728|||\U0001F499|||\U0001F618|||\U0001F4F7|||\U0001F1FA\U0001F1F8\ 7 | |||\u2600|||\U0001F49C|||\U0001F609|||\U0001F4AF|||\U0001F601|||\U0001F384|||\U0001F4F8\ 8 | |||\U0001F61C" 9 | id: 8c794abe-5364-430f-aa1e-eb3501443cec 10 | jinja: 'Which emoji among {{answer_choices | join(", ")}} best describes the sentiment 11 | of the following tweet? 12 | 13 | 14 | {{text}} ||| 15 | 16 | {{answer_choices[label]}}' 17 | metadata: !TemplateMetadata 18 | choices_in_prompt: true 19 | languages: 20 | - en 21 | metrics: 22 | - Other 23 | original_task: true 24 | name: emoji_option 25 | reference: 'official metric: macroaveraged F1' 26 | c05f50e0-f708-44bc-98e7-ff7b3f9f5d93: !Template 27 | answer_choices: "\u2764|||\U0001F60D|||\U0001F602|||\U0001F495|||\U0001F525|||\U0001F60A\ 28 | |||\U0001F60E|||\u2728|||\U0001F499|||\U0001F618|||\U0001F4F7|||\U0001F1FA\U0001F1F8\ 29 | |||\u2600|||\U0001F49C|||\U0001F609|||\U0001F4AF|||\U0001F601|||\U0001F384|||\U0001F4F8\ 30 | |||\U0001F61C" 31 | id: c05f50e0-f708-44bc-98e7-ff7b3f9f5d93 32 | jinja: 'Which emoji among {{answer_choices | join(", ")}} would be the best comment 33 | to the following tweet? 34 | 35 | 36 | {{text}} ||| 37 | 38 | {{answer_choices[label]}}' 39 | metadata: !TemplateMetadata 40 | choices_in_prompt: true 41 | languages: 42 | - en 43 | metrics: 44 | - Other 45 | original_task: false 46 | name: emoji_reply 47 | reference: 'Metric: macroaveraged F1' 48 | d5c771d3-28e7-420e-af47-c077cfe0e7e5: !Template 49 | answer_choices: "\u2764|||\U0001F60D|||\U0001F602|||\U0001F495|||\U0001F525|||\U0001F60A\ 50 | |||\U0001F60E|||\u2728|||\U0001F499|||\U0001F618|||\U0001F4F7|||\U0001F1FA\U0001F1F8\ 51 | |||\u2600|||\U0001F49C|||\U0001F609|||\U0001F4AF|||\U0001F601|||\U0001F384|||\U0001F4F8\ 52 | |||\U0001F61C" 53 | id: d5c771d3-28e7-420e-af47-c077cfe0e7e5 54 | jinja: 'Which emoji best describes the sentiment of the following tweet? 55 | 56 | 57 | {{text}} ||| 58 | 59 | {{answer_choices[label]}}' 60 | metadata: !TemplateMetadata 61 | choices_in_prompt: false 62 | languages: 63 | - en 64 | metrics: 65 | - Other 66 | original_task: true 67 | name: emoji 68 | reference: 'official metric: macroaveraged F1' 69 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/emotion/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: emotion 3 | templates: 4 | 7c09c33e-31f2-414b-89a1-6b1dda92ef6f: !Template 5 | answer_choices: anger ||| joy ||| optimism ||| sadness 6 | id: 7c09c33e-31f2-414b-89a1-6b1dda92ef6f 7 | jinja: '{{text}} 8 | 9 | 10 | To get full credit in this exam, choose the correct emotion from the following 11 | choices: {{answer_choices | join(", ")}} 12 | 13 | ||| 14 | 15 | {{answer_choices[label]}}' 16 | metadata: !TemplateMetadata 17 | choices_in_prompt: true 18 | languages: 19 | - en 20 | metrics: 21 | - Accuracy 22 | original_task: true 23 | name: emotion_exam 24 | reference: exam style prompt 25 | 87db02f2-585e-4fd1-81c0-e94297607097: !Template 26 | answer_choices: anger ||| joy ||| optimism ||| sadness 27 | id: 87db02f2-585e-4fd1-81c0-e94297607097 28 | jinja: 'Which emotion among {{answer_choices | join(", ")}} best describes the 29 | feeling of the author of the following tweet? 30 | 31 | 32 | {{text}}||| 33 | 34 | {{answer_choices[label]}}' 35 | metadata: !TemplateMetadata 36 | choices_in_prompt: true 37 | languages: 38 | - en 39 | metrics: 40 | - Accuracy 41 | original_task: true 42 | name: author_emotion 43 | reference: '' 44 | 8bc3ebc5-77f1-4d55-bd96-c62429ebf093: !Template 45 | answer_choices: anger ||| joy ||| optimism ||| sadness 46 | id: 8bc3ebc5-77f1-4d55-bd96-c62429ebf093 47 | jinja: 'Which emotion is best represented by the following tweet? 48 | 49 | {{text}} 50 | 51 | 52 | Possible emotions: {{answer_choices | join(", ")}} 53 | 54 | ||| 55 | 56 | {{answer_choices[label]}}' 57 | metadata: !TemplateMetadata 58 | choices_in_prompt: true 59 | languages: 60 | - en 61 | metrics: 62 | - Accuracy 63 | original_task: true 64 | name: which_emotion 65 | reference: '' 66 | a5992077-2e31-467b-a6ee-b75dee933d0e: !Template 67 | answer_choices: anger ||| joy ||| optimism ||| sadness 68 | id: a5992077-2e31-467b-a6ee-b75dee933d0e 69 | jinja: "{{text}}\n\nCategorize the tweet into one of the following options: \n\ 70 | (a) {{answer_choices[0]}}\n(b) {{answer_choices[1]}}\n(c) {{answer_choices[2]}}\n\ 71 | (d) {{answer_choices[3]}}\n|||\n{{answer_choices[label]}}" 72 | metadata: !TemplateMetadata 73 | choices_in_prompt: true 74 | languages: 75 | - en 76 | metrics: 77 | - Accuracy 78 | original_task: true 79 | name: emotion_with_option 80 | reference: '' 81 | b8f4912e-e3be-4dd5-82ec-6f110c056a86: !Template 82 | answer_choices: anger ||| joy ||| optimism ||| sadness 83 | id: b8f4912e-e3be-4dd5-82ec-6f110c056a86 84 | jinja: '{{text}} 85 | 86 | 87 | What is the emotion of the text? 88 | 89 | 90 | Hint: {{answer_choices | join(", ")}} 91 | 92 | ||| 93 | 94 | {{answer_choices[label]}}' 95 | metadata: !TemplateMetadata 96 | choices_in_prompt: true 97 | languages: 98 | - en 99 | metrics: 100 | - Accuracy 101 | original_task: true 102 | name: emotion_hint 103 | reference: '' 104 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/hate/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: hate 3 | templates: 4 | 3266f9d4-9c80-4e17-a8a6-1fe44ce8f3bf: !Template 5 | answer_choices: no ||| yes 6 | id: 3266f9d4-9c80-4e17-a8a6-1fe44ce8f3bf 7 | jinja: 'Does this tweet convey the author''s hatred towards something or someone? 8 | 9 | 10 | {{text}} ||| 11 | 12 | {{answer_choices[label]}}' 13 | metadata: !TemplateMetadata 14 | choices_in_prompt: false 15 | languages: 16 | - en 17 | metrics: 18 | - Accuracy 19 | original_task: true 20 | name: author_hate 21 | reference: '' 22 | 34a5128b-6fc8-453b-94d4-4ebaa87172c1: !Template 23 | answer_choices: no ||| yes 24 | id: 34a5128b-6fc8-453b-94d4-4ebaa87172c1 25 | jinja: 'Does this tweet convey hate: yes or no? 26 | 27 | 28 | {{text}} ||| 29 | 30 | {{answer_choices[label]}}' 31 | metadata: !TemplateMetadata 32 | choices_in_prompt: true 33 | languages: 34 | - en 35 | metrics: 36 | - Accuracy 37 | original_task: true 38 | name: hate_no_yes 39 | reference: '' 40 | 5b15dc68-05a2-42dd-b0bf-fa15d4f40320: !Template 41 | answer_choices: no|||yes 42 | id: 5b15dc68-05a2-42dd-b0bf-fa15d4f40320 43 | jinja: "Is this a hateful tweet? \n{{text}}\n|||\n{{answer_choices[label]}}" 44 | metadata: !TemplateMetadata 45 | choices_in_prompt: false 46 | languages: 47 | - en 48 | metrics: 49 | - Accuracy 50 | original_task: true 51 | name: hate_question 52 | reference: '' 53 | 971ad470-85bf-484e-aab7-b942f817bf2c: !Template 54 | answer_choices: non-hate ||| hate 55 | id: 971ad470-85bf-484e-aab7-b942f817bf2c 56 | jinja: 'Does this tweet convey {{"hate"}} or {{"non-hate"}}? 57 | 58 | 59 | {{text}} ||| 60 | 61 | {{answer_choices[label]}}' 62 | metadata: !TemplateMetadata 63 | choices_in_prompt: true 64 | languages: 65 | - en 66 | metrics: 67 | - Accuracy 68 | original_task: true 69 | name: hate_options 70 | reference: '' 71 | b0cdecc2-78a2-47e7-a74e-38d509c01214: !Template 72 | answer_choices: 'no ||| yes ' 73 | id: b0cdecc2-78a2-47e7-a74e-38d509c01214 74 | jinja: "In this test, you need to answer with either yes or no. \n\nQ: Is this\ 75 | \ a hateful tweet? \n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}" 76 | metadata: !TemplateMetadata 77 | choices_in_prompt: true 78 | languages: 79 | - en 80 | metrics: 81 | - Accuracy 82 | original_task: true 83 | name: hate_exam 84 | reference: '' 85 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/irony/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: irony 3 | templates: 4 | c45095f1-9be1-4e83-8daa-68805b6ece39: !Template 5 | answer_choices: no ||| yes 6 | id: c45095f1-9be1-4e83-8daa-68805b6ece39 7 | jinja: "Is this tweet is ironic? \n\n{{text}} |||\n{{answer_choices[label]}}" 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - Accuracy 14 | original_task: true 15 | name: irony_question 16 | reference: '' 17 | cd2ed852-c6fa-431a-b0f1-06f0240d74a0: !Template 18 | answer_choices: no ||| yes 19 | id: cd2ed852-c6fa-431a-b0f1-06f0240d74a0 20 | jinja: "Is this tweet ironic? Answer with either yes or no. \n\n{{text}} |||\n\ 21 | {{answer_choices[label]}}" 22 | metadata: !TemplateMetadata 23 | choices_in_prompt: true 24 | languages: 25 | - en 26 | metrics: 27 | - Accuracy 28 | original_task: true 29 | name: irony_yes_no 30 | reference: '' 31 | cef39e97-09f2-430c-ad1f-5fd9d05c876b: !Template 32 | answer_choices: no ||| yes 33 | id: cef39e97-09f2-430c-ad1f-5fd9d05c876b 34 | jinja: "In this test, you need to answer with either yes or no. \n\nQ: Is this\ 35 | \ an ironic tweet? \n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}" 36 | metadata: !TemplateMetadata 37 | choices_in_prompt: true 38 | languages: 39 | - en 40 | metrics: 41 | - Accuracy 42 | original_task: true 43 | name: irony_exam 44 | reference: exam style prompt 45 | e7cff075-9388-48de-af51-fe185b654217: !Template 46 | answer_choices: non-irony ||| irony 47 | id: e7cff075-9388-48de-af51-fe185b654217 48 | jinja: 'Does this tweet contain {{"irony"}} or {{"non-irony"}}? 49 | 50 | 51 | {{text}} ||| 52 | 53 | {{answer_choices[label]}}' 54 | metadata: !TemplateMetadata 55 | choices_in_prompt: true 56 | languages: 57 | - en 58 | metrics: 59 | - Accuracy 60 | original_task: true 61 | name: irony_option 62 | reference: '' 63 | f8361684-dad1-4c2c-9f22-1128f3096c20: !Template 64 | answer_choices: no|||yes 65 | id: f8361684-dad1-4c2c-9f22-1128f3096c20 66 | jinja: 'Does this tweet convey the author''s irony towards something or someone? 67 | 68 | 69 | {{text}} 70 | 71 | ||| 72 | 73 | {{answer_choices[label]}} 74 | 75 | ' 76 | metadata: !TemplateMetadata 77 | choices_in_prompt: false 78 | languages: 79 | - en 80 | metrics: 81 | - Accuracy 82 | original_task: true 83 | name: irony_author 84 | reference: '' 85 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/offensive/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: offensive 3 | templates: 4 | 10850707-80f3-4a75-b9f4-1e2d12be04c0: !Template 5 | answer_choices: no ||| yes 6 | id: 10850707-80f3-4a75-b9f4-1e2d12be04c0 7 | jinja: "Is this tweet {{\"offensive\"}}? \n\n{{text}} |||\n{{answer_choices[label]}}" 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - Accuracy 14 | original_task: true 15 | name: offensive_question 16 | reference: '' 17 | 22ecf073-2932-4b0a-b305-1f92b696d0b4: !Template 18 | answer_choices: no ||| yes 19 | id: 22ecf073-2932-4b0a-b305-1f92b696d0b4 20 | jinja: "Can the tweet be removed for being offensive? Answer with a yes or a no.\ 21 | \ \n\n{{text}}\n|||\n{{answer_choices[label]}}" 22 | metadata: !TemplateMetadata 23 | choices_in_prompt: true 24 | languages: 25 | - en 26 | metrics: 27 | - Accuracy 28 | original_task: true 29 | name: offensive_tweet_removed 30 | reference: '' 31 | 90a80d37-922d-40c0-b036-87eac64ce4c5: !Template 32 | answer_choices: no ||| yes 33 | id: 90a80d37-922d-40c0-b036-87eac64ce4c5 34 | jinja: 'Is the author''s tweet offensive? Answer with either yes or no. 35 | 36 | 37 | {{text}} 38 | 39 | ||| 40 | 41 | {{answer_choices[label]}}' 42 | metadata: !TemplateMetadata 43 | choices_in_prompt: true 44 | languages: 45 | - en 46 | metrics: 47 | - Accuracy 48 | original_task: true 49 | name: offensive_author 50 | reference: '' 51 | a12bd98b-facc-4b17-bb16-80c98a20aa64: !Template 52 | answer_choices: no ||| yes 53 | id: a12bd98b-facc-4b17-bb16-80c98a20aa64 54 | jinja: 'Task: Identify if the tweet or text is offensive. 55 | 56 | 57 | Tweet: {{text}} 58 | 59 | 60 | Possible answers: yes, no 61 | 62 | ||| 63 | 64 | {{answer_choices[label]}}' 65 | metadata: !TemplateMetadata 66 | choices_in_prompt: true 67 | languages: 68 | - en 69 | metrics: 70 | - Accuracy 71 | original_task: true 72 | name: offensive_task 73 | reference: '' 74 | bf2cea43-0666-4eb5-814d-00956afd1900: !Template 75 | answer_choices: no ||| yes 76 | id: bf2cea43-0666-4eb5-814d-00956afd1900 77 | jinja: "In this test, you need to answer with either yes or no.\n\nQ: Is this\ 78 | \ an offensive tweet?\n\n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}" 79 | metadata: !TemplateMetadata 80 | choices_in_prompt: true 81 | languages: 82 | - en 83 | metrics: 84 | - Accuracy 85 | original_task: true 86 | name: offensive_exam 87 | reference: '' 88 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/sentiment/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: sentiment 3 | templates: 4 | 1fef2b36-3a19-4179-9b43-d67887cff299: !Template 5 | answer_choices: negative ||| neutral ||| positive 6 | id: 1fef2b36-3a19-4179-9b43-d67887cff299 7 | jinja: 'In this exam, you need to pick the correct sentiment for the tweet: 8 | 9 | 10 | {{text}} 11 | 12 | 13 | Possible choices: {{answer_choices | join(", ")}} 14 | 15 | ||| 16 | 17 | {{answer_choices[label]}}' 18 | metadata: !TemplateMetadata 19 | choices_in_prompt: true 20 | languages: 21 | - en 22 | metrics: 23 | - Accuracy 24 | original_task: true 25 | name: sentiment_exam 26 | reference: '' 27 | 6702e8cd-9764-4c88-86a9-046f84c98ef2: !Template 28 | answer_choices: negative ||| neutral ||| positive 29 | id: 6702e8cd-9764-4c88-86a9-046f84c98ef2 30 | jinja: "What is the sentiment of the tweet?\n\n{{text}} \n\nPossible choices:\ 31 | \ {{answer_choices | join(\", \")}}\n|||\n{{answer_choices[label]}}\n" 32 | metadata: !TemplateMetadata 33 | choices_in_prompt: true 34 | languages: 35 | - en 36 | metrics: 37 | - Accuracy 38 | original_task: true 39 | name: sentiment_question 40 | reference: '' 41 | 6c6c797c-9912-4778-906b-16f465941d16: !Template 42 | answer_choices: negative ||| neutral ||| positive 43 | id: 6c6c797c-9912-4778-906b-16f465941d16 44 | jinja: "Task: Identify the sentiment of the tweet.\n\nTweet: {{text}}\n\nOptions:\ 45 | \ {{answer_choices | join(\", \")}} \n|||\n{{answer_choices[label]}}" 46 | metadata: !TemplateMetadata 47 | choices_in_prompt: true 48 | languages: 49 | - en 50 | metrics: 51 | - Accuracy 52 | original_task: true 53 | name: sentiment_task 54 | reference: '' 55 | b70647cf-22a0-49b2-b45e-23432c635cc2: !Template 56 | answer_choices: negative|||neutral|||positive 57 | id: b70647cf-22a0-49b2-b45e-23432c635cc2 58 | jinja: "Suppose you are the moderator of Twitter, what would be the sentiment\ 59 | \ of the following tweet: \n\n{{text}}\n\nOptions: {{answer_choices | join(\"\ 60 | , \")}}\n|||\n{{answer_choices[label]}}" 61 | metadata: !TemplateMetadata 62 | choices_in_prompt: true 63 | languages: 64 | - en 65 | metrics: 66 | - Accuracy 67 | original_task: true 68 | name: sentiment_moderator 69 | reference: '' 70 | ec68fd8e-92a3-4010-b0df-b14af95421a3: !Template 71 | answer_choices: negative ||| neutral ||| positive 72 | id: ec68fd8e-92a3-4010-b0df-b14af95421a3 73 | jinja: "{{text}}\n\nCategorize the tweet into one of the following options: \n\ 74 | (a) {{answer_choices[0]}} \n(b) {{answer_choices[1]}} \n(c) {{answer_choices[2]}}\n\ 75 | |||\n{{answer_choices[label]}}" 76 | metadata: !TemplateMetadata 77 | choices_in_prompt: true 78 | languages: 79 | - en 80 | metrics: 81 | - Accuracy 82 | original_task: true 83 | name: sentiment_options 84 | reference: '' 85 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/stance_abortion/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: stance_abortion 3 | templates: 4 | 0d1dc279-e50c-4706-bc3d-84ea01cb59a1: !Template 5 | answer_choices: Neutral ||| Against ||| In favor 6 | id: 0d1dc279-e50c-4706-bc3d-84ea01cb59a1 7 | jinja: 'Does the author express any stance about abortion in the following text? 8 | 9 | 10 | {{text}} ||| 11 | 12 | {{answer_choices[label]}}' 13 | metadata: !TemplateMetadata 14 | choices_in_prompt: false 15 | languages: 16 | - en 17 | metrics: 18 | - Accuracy 19 | original_task: true 20 | name: abortion_predict_stance 21 | reference: '' 22 | 22758062-db86-4009-81a4-1e2a2e1052f2: !Template 23 | answer_choices: Neutral ||| Against ||| In favor 24 | id: 22758062-db86-4009-81a4-1e2a2e1052f2 25 | jinja: '{{text}} Where does the author of the above sentence stand on abortion? 26 | ||| 27 | 28 | {{answer_choices[label]}}' 29 | metadata: !TemplateMetadata 30 | choices_in_prompt: false 31 | languages: 32 | - en 33 | metrics: 34 | - Accuracy 35 | original_task: true 36 | name: abortion_guess_passive_author 37 | reference: '' 38 | 615151f8-ac5b-4c0e-a234-9e9b6296a2f2: !Template 39 | answer_choices: Neutral ||| Against ||| In favor 40 | id: 615151f8-ac5b-4c0e-a234-9e9b6296a2f2 41 | jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best 42 | describes the stance of this tweet regarding abortion? 43 | 44 | 45 | {{text}} ||| 46 | 47 | {{answer_choices[label]}}' 48 | metadata: !TemplateMetadata 49 | choices_in_prompt: true 50 | languages: 51 | - en 52 | metrics: 53 | - Accuracy 54 | original_task: true 55 | name: abortion_option 56 | reference: '' 57 | 687ffa1e-a772-48b1-9291-ba4e530a909e: !Template 58 | answer_choices: Neutral ||| Against ||| In favor 59 | id: 687ffa1e-a772-48b1-9291-ba4e530a909e 60 | jinja: 'Is this tweet neutral, in favor of, or against abortion? 61 | 62 | 63 | {{text}} ||| 64 | 65 | {{answer_choices[label]}}' 66 | metadata: !TemplateMetadata 67 | choices_in_prompt: true 68 | languages: 69 | - en 70 | metrics: 71 | - Accuracy 72 | original_task: true 73 | name: abortion 74 | reference: '' 75 | c5507588-1d20-42f9-935f-0c767294f5a9: !Template 76 | answer_choices: Neutral ||| Against ||| In favor 77 | id: c5507588-1d20-42f9-935f-0c767294f5a9 78 | jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", 79 | ")}} 80 | 81 | 82 | {{text}} ||| 83 | 84 | {{answer_choices[label]}}' 85 | metadata: !TemplateMetadata 86 | choices_in_prompt: true 87 | languages: 88 | - en 89 | metrics: 90 | - Accuracy 91 | original_task: true 92 | name: abortion_how_describe 93 | reference: '' 94 | ee12d37b-5667-4b0e-9831-f952d08152b5: !Template 95 | answer_choices: Neutral ||| Against ||| In favor 96 | id: ee12d37b-5667-4b0e-9831-f952d08152b5 97 | jinja: '{{text}} Where does the above sentence stand on abortion? ||| 98 | 99 | {{answer_choices[label]}}' 100 | metadata: !TemplateMetadata 101 | choices_in_prompt: false 102 | languages: 103 | - en 104 | metrics: 105 | - Accuracy 106 | original_task: true 107 | name: abortion_guess_passive 108 | reference: '' 109 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/stance_atheism/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: stance_atheism 3 | templates: 4 | 2f6bfe8f-c45e-4f1d-a623-91a9cfd0ea8f: !Template 5 | answer_choices: Neutral ||| Against ||| In favor 6 | id: 2f6bfe8f-c45e-4f1d-a623-91a9cfd0ea8f 7 | jinja: '{{text}} Where does the above sentence stand on atheism? ||| 8 | 9 | {{answer_choices[label]}}' 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: false 12 | languages: 13 | - en 14 | metrics: 15 | - Accuracy 16 | original_task: true 17 | name: atheism_guess_passive 18 | reference: '' 19 | 4309e10d-c9a9-4a17-8561-15270b998905: !Template 20 | answer_choices: Neutral ||| Against ||| In favor 21 | id: 4309e10d-c9a9-4a17-8561-15270b998905 22 | jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", 23 | ")}} 24 | 25 | 26 | {{text}} ||| 27 | 28 | {{answer_choices[label]}}' 29 | metadata: !TemplateMetadata 30 | choices_in_prompt: true 31 | languages: 32 | - en 33 | metrics: 34 | - Accuracy 35 | original_task: true 36 | name: atheism_how_describe 37 | reference: '' 38 | 7e47c6b8-2923-4580-a275-a2b8867a3d96: !Template 39 | answer_choices: Neutral ||| Against ||| In favor 40 | id: 7e47c6b8-2923-4580-a275-a2b8867a3d96 41 | jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best 42 | describes the stance of this tweet regarding atheism? 43 | 44 | 45 | {{text}} ||| 46 | 47 | {{answer_choices[label]}}' 48 | metadata: !TemplateMetadata 49 | choices_in_prompt: true 50 | languages: 51 | - en 52 | metrics: 53 | - Accuracy 54 | original_task: true 55 | name: atheism_option 56 | reference: '' 57 | 89aa258e-3c3b-4d1c-8ac4-fe2c838b76e4: !Template 58 | answer_choices: Neutral ||| Against ||| In favor 59 | id: 89aa258e-3c3b-4d1c-8ac4-fe2c838b76e4 60 | jinja: 'Does the author express any stance about atheism in the following text? 61 | 62 | 63 | {{text}} ||| 64 | 65 | {{answer_choices[label]}}' 66 | metadata: !TemplateMetadata 67 | choices_in_prompt: false 68 | languages: 69 | - en 70 | metrics: 71 | - Accuracy 72 | original_task: true 73 | name: atheism_predict_stance 74 | reference: '' 75 | 97ef9418-7c92-455d-a4c5-d7b91668278c: !Template 76 | answer_choices: Neutral ||| Against ||| In favor 77 | id: 97ef9418-7c92-455d-a4c5-d7b91668278c 78 | jinja: 'Is this tweet neutral, in favor of, or against atheism? 79 | 80 | 81 | {{text}} ||| 82 | 83 | {{answer_choices[label]}}' 84 | metadata: !TemplateMetadata 85 | choices_in_prompt: true 86 | languages: 87 | - en 88 | metrics: 89 | - Accuracy 90 | original_task: true 91 | name: atheism 92 | reference: '' 93 | f28307ab-563e-4189-99b5-e0d858e9ab4c: !Template 94 | answer_choices: Neutral ||| Against ||| In favor 95 | id: f28307ab-563e-4189-99b5-e0d858e9ab4c 96 | jinja: '{{text}} Where does the author of the above sentence stand on atheism? 97 | ||| 98 | 99 | {{answer_choices[label]}}' 100 | metadata: !TemplateMetadata 101 | choices_in_prompt: false 102 | languages: 103 | - en 104 | metrics: 105 | - Accuracy 106 | original_task: true 107 | name: atheism_guess_passive_author 108 | reference: '' 109 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/stance_climate/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: stance_climate 3 | templates: 4 | 2ebf2eaa-ef9f-413d-b7bf-cb2037330d2a: !Template 5 | answer_choices: Neutral ||| Against ||| In favor 6 | id: 2ebf2eaa-ef9f-413d-b7bf-cb2037330d2a 7 | jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best 8 | describes the stance of this tweet regarding climate change? 9 | 10 | 11 | {{text}} ||| 12 | 13 | {{answer_choices[label]}}' 14 | metadata: !TemplateMetadata 15 | choices_in_prompt: true 16 | languages: 17 | - en 18 | metrics: 19 | - Accuracy 20 | original_task: true 21 | name: climate_option 22 | reference: '' 23 | 6f4205ad-6321-42a9-bf8e-a45508e67c1a: !Template 24 | answer_choices: Neutral ||| Against ||| In favor 25 | id: 6f4205ad-6321-42a9-bf8e-a45508e67c1a 26 | jinja: '{{text}} Where does the above sentence stand on climate change? ||| 27 | 28 | {{answer_choices[label]}}' 29 | metadata: !TemplateMetadata 30 | choices_in_prompt: false 31 | languages: 32 | - en 33 | metrics: 34 | - Accuracy 35 | original_task: true 36 | name: climate_guess_passive 37 | reference: '' 38 | 703f067e-5930-424e-9882-48063307ff8e: !Template 39 | answer_choices: Neutral ||| Against ||| In favor 40 | id: 703f067e-5930-424e-9882-48063307ff8e 41 | jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", 42 | ")}} 43 | 44 | 45 | {{text}} ||| 46 | 47 | {{answer_choices[label]}}' 48 | metadata: !TemplateMetadata 49 | choices_in_prompt: true 50 | languages: 51 | - en 52 | metrics: 53 | - Accuracy 54 | original_task: true 55 | name: climate_how_describe 56 | reference: '' 57 | 8ecd5059-742a-4833-95a1-bf0e25e9abfc: !Template 58 | answer_choices: Neutral ||| Against ||| In favor 59 | id: 8ecd5059-742a-4833-95a1-bf0e25e9abfc 60 | jinja: '{{text}} Where does the author of the above sentence stand on climate 61 | change? ||| 62 | 63 | {{answer_choices[label]}}' 64 | metadata: !TemplateMetadata 65 | choices_in_prompt: false 66 | languages: 67 | - en 68 | metrics: 69 | - Accuracy 70 | original_task: true 71 | name: climate_guess_passive_author 72 | reference: '' 73 | cd82620b-6d1d-42f7-af89-56980cbb69a5: !Template 74 | answer_choices: Neutral ||| Against ||| In favor 75 | id: cd82620b-6d1d-42f7-af89-56980cbb69a5 76 | jinja: 'Does the author express any stance about climate change in the following 77 | text? 78 | 79 | 80 | {{text}} ||| 81 | 82 | {{answer_choices[label]}}' 83 | metadata: !TemplateMetadata 84 | choices_in_prompt: false 85 | languages: 86 | - en 87 | metrics: 88 | - Accuracy 89 | original_task: true 90 | name: climate_predict_stance 91 | reference: '' 92 | edcdde10-b2e4-4954-82e2-f84fd57fc122: !Template 93 | answer_choices: Neutral ||| Against ||| In favor 94 | id: edcdde10-b2e4-4954-82e2-f84fd57fc122 95 | jinja: 'Is this tweet neutral, in favor of, or against climate change? 96 | 97 | 98 | {{text}} ||| 99 | 100 | {{answer_choices[label]}}' 101 | metadata: !TemplateMetadata 102 | choices_in_prompt: true 103 | languages: 104 | - en 105 | metrics: 106 | - Accuracy 107 | original_task: true 108 | name: climate_change 109 | reference: '' 110 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/stance_feminist/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: stance_feminist 3 | templates: 4 | 2f6bfe8f-c45e-4f1d-a623-91a9cfd0ea8b: !Template 5 | answer_choices: Neutral ||| Against ||| In favor 6 | id: 2f6bfe8f-c45e-4f1d-a623-91a9cfd0ea8b 7 | jinja: '{{text}} Where does the above sentence stand on feminism? ||| 8 | 9 | {{answer_choices[label]}}' 10 | metadata: !TemplateMetadata 11 | choices_in_prompt: false 12 | languages: 13 | - en 14 | metrics: 15 | - Accuracy 16 | original_task: true 17 | name: feminism_guess_passive 18 | reference: '' 19 | 4309e10d-c9a9-4a17-8561-15270b99890b: !Template 20 | answer_choices: Neutral ||| Against ||| In favor 21 | id: 4309e10d-c9a9-4a17-8561-15270b99890b 22 | jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", 23 | ")}} 24 | 25 | 26 | {{text}} ||| 27 | 28 | {{answer_choices[label]}}' 29 | metadata: !TemplateMetadata 30 | choices_in_prompt: true 31 | languages: 32 | - en 33 | metrics: 34 | - Accuracy 35 | original_task: true 36 | name: feminism_how_describe 37 | reference: '' 38 | 7e47c6b8-2923-4580-a275-a2b8867a3d9b: !Template 39 | answer_choices: Neutral ||| Against ||| In favor 40 | id: 7e47c6b8-2923-4580-a275-a2b8867a3d9b 41 | jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best 42 | describes the stance of this tweet regarding feminism? 43 | 44 | 45 | {{text}} ||| 46 | 47 | {{answer_choices[label]}}' 48 | metadata: !TemplateMetadata 49 | choices_in_prompt: true 50 | languages: 51 | - en 52 | metrics: 53 | - Accuracy 54 | original_task: true 55 | name: feminism_option 56 | reference: '' 57 | 89aa258e-3c3b-4d1c-8ac4-fe2c838b76eb: !Template 58 | answer_choices: Neutral ||| Against ||| In favor 59 | id: 89aa258e-3c3b-4d1c-8ac4-fe2c838b76eb 60 | jinja: 'Does the author express any stance about feminism in the following text? 61 | 62 | 63 | {{text}} ||| 64 | 65 | {{answer_choices[label]}}' 66 | metadata: !TemplateMetadata 67 | choices_in_prompt: false 68 | languages: 69 | - en 70 | metrics: 71 | - Accuracy 72 | original_task: true 73 | name: feminism_predict_stance 74 | reference: '' 75 | 97ef9418-7c92-455d-a4c5-d7b91668278b: !Template 76 | answer_choices: Neutral ||| Against ||| In favor 77 | id: 97ef9418-7c92-455d-a4c5-d7b91668278b 78 | jinja: 'Is this tweet neutral, in favor of, or against feminism? 79 | 80 | 81 | {{text}} ||| 82 | 83 | {{answer_choices[label]}}' 84 | metadata: !TemplateMetadata 85 | choices_in_prompt: true 86 | languages: 87 | - en 88 | metrics: 89 | - Accuracy 90 | original_task: true 91 | name: feminism 92 | reference: '' 93 | f28307ab-563e-4189-99b5-e0d858e9ab4b: !Template 94 | answer_choices: Neutral ||| Against ||| In favor 95 | id: f28307ab-563e-4189-99b5-e0d858e9ab4b 96 | jinja: '{{text}} Where does the author of the above sentence stand on feminism? 97 | ||| 98 | 99 | {{answer_choices[label]}}' 100 | metadata: !TemplateMetadata 101 | choices_in_prompt: false 102 | languages: 103 | - en 104 | metrics: 105 | - Accuracy 106 | original_task: true 107 | name: feminism_guess_passive_author 108 | reference: '' 109 | -------------------------------------------------------------------------------- /promptsource/templates/tweet_eval/stance_hillary/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: tweet_eval 2 | subset: stance_hillary 3 | templates: 4 | 21ba1c40-b491-43ed-96d6-7423b55c3bcf: !Template 5 | answer_choices: Neutral ||| Against ||| In favor 6 | id: 21ba1c40-b491-43ed-96d6-7423b55c3bcf 7 | jinja: 'Does the author express any stance about Hillary in the following text? 8 | 9 | 10 | {{text}} ||| 11 | 12 | {{answer_choices[label]}}' 13 | metadata: !TemplateMetadata 14 | choices_in_prompt: false 15 | languages: 16 | - en 17 | metrics: 18 | - Accuracy 19 | original_task: true 20 | name: Hillary_predict_stance 21 | reference: '' 22 | 41502ea8-73a4-48a4-a15e-ab2ac7700457: !Template 23 | answer_choices: Neutral ||| Against ||| In favor 24 | id: 41502ea8-73a4-48a4-a15e-ab2ac7700457 25 | jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best 26 | describes the stance of this tweet regarding Hillary? 27 | 28 | 29 | {{text}} ||| 30 | 31 | {{answer_choices[label]}}' 32 | metadata: !TemplateMetadata 33 | choices_in_prompt: true 34 | languages: 35 | - en 36 | metrics: 37 | - Accuracy 38 | original_task: true 39 | name: Hillary_option 40 | reference: '' 41 | 498f1dec-12dc-4082-a44e-82fcae004bb8: !Template 42 | answer_choices: Neutral ||| Against ||| In favor 43 | id: 498f1dec-12dc-4082-a44e-82fcae004bb8 44 | jinja: 'Is this tweet neutral, in favor of, or against Hillary? 45 | 46 | 47 | {{text}} ||| 48 | 49 | {{answer_choices[label]}}' 50 | metadata: !TemplateMetadata 51 | choices_in_prompt: true 52 | languages: 53 | - en 54 | metrics: 55 | - Accuracy 56 | original_task: true 57 | name: Hillary 58 | reference: '' 59 | 5c451846-349a-44ad-83ef-d0f8e5d2bd6b: !Template 60 | answer_choices: Neutral ||| Against ||| In favor 61 | id: 5c451846-349a-44ad-83ef-d0f8e5d2bd6b 62 | jinja: '{{text}} Where does the above sentence stand on Hillary? ||| 63 | 64 | {{answer_choices[label]}}' 65 | metadata: !TemplateMetadata 66 | choices_in_prompt: false 67 | languages: 68 | - en 69 | metrics: 70 | - Accuracy 71 | original_task: true 72 | name: Hillary_guess_passive 73 | reference: '' 74 | 83f10728-2347-46e9-b365-724f47e65877: !Template 75 | answer_choices: Neutral ||| Against ||| In favor 76 | id: 83f10728-2347-46e9-b365-724f47e65877 77 | jinja: '{{text}} Where does the author of the above sentence stand on Hillary? 78 | ||| 79 | 80 | {{answer_choices[label]}}' 81 | metadata: !TemplateMetadata 82 | choices_in_prompt: false 83 | languages: 84 | - en 85 | metrics: 86 | - Accuracy 87 | original_task: true 88 | name: Hillary_guess_passive_author 89 | reference: '' 90 | b521857a-9d4f-4e21-848b-0baf7f4a636c: !Template 91 | answer_choices: Neutral ||| Against ||| In favor 92 | id: b521857a-9d4f-4e21-848b-0baf7f4a636c 93 | jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", 94 | ")}} 95 | 96 | 97 | {{text}} ||| 98 | 99 | {{answer_choices[label]}}' 100 | metadata: !TemplateMetadata 101 | choices_in_prompt: true 102 | languages: 103 | - en 104 | metrics: 105 | - Accuracy 106 | original_task: true 107 | name: Hillary_how_describe 108 | reference: '' 109 | -------------------------------------------------------------------------------- /promptsource/templates/web_questions/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: web_questions 2 | templates: 3 | 427785bc-a8f3-4c86-bd43-e54447a58615: !Template 4 | answer_choices: null 5 | id: 427785bc-a8f3-4c86-bd43-e54447a58615 6 | jinja: 'Give me the correct facts to answer this: {{question}} ||| {{answers | 7 | choice}}' 8 | metadata: !TemplateMetadata 9 | choices_in_prompt: false 10 | languages: 11 | - en 12 | metrics: 13 | - Squad 14 | original_task: true 15 | name: get_the_answer 16 | reference: '' 17 | 9f4cd4a4-79e5-40b2-bb0d-f9a86396511a: !Template 18 | answer_choices: null 19 | id: 9f4cd4a4-79e5-40b2-bb0d-f9a86396511a 20 | jinja: Give me a possible correct answer to the question "{{ question }}" ||| 21 | {{ answers | choice }} 22 | metadata: !TemplateMetadata 23 | choices_in_prompt: false 24 | languages: 25 | - en 26 | metrics: 27 | - Squad 28 | original_task: true 29 | name: potential-correct-answer 30 | reference: '' 31 | bfed45a7-b36c-440b-8c94-f117cc6c9f34: !Template 32 | answer_choices: null 33 | id: bfed45a7-b36c-440b-8c94-f117cc6c9f34 34 | jinja: 'What''s the answer to that question: {{question}} ||| {{answers | choice}}' 35 | metadata: !TemplateMetadata 36 | choices_in_prompt: false 37 | languages: 38 | - en 39 | metrics: 40 | - Squad 41 | original_task: true 42 | name: whats_the_answer 43 | reference: '' 44 | df08956c-035b-4216-af1c-61250617faa4: !Template 45 | answer_choices: null 46 | id: df08956c-035b-4216-af1c-61250617faa4 47 | jinja: 'Short general knowledge question: {{question}} ||| {{answers | choice}}' 48 | metadata: !TemplateMetadata 49 | choices_in_prompt: false 50 | languages: 51 | - en 52 | metrics: 53 | - Squad 54 | original_task: true 55 | name: short_general_knowledge_q 56 | reference: '' 57 | e5c72a6b-8ab4-4219-9f41-debf7224884c: !Template 58 | answer_choices: null 59 | id: e5c72a6b-8ab4-4219-9f41-debf7224884c 60 | jinja: '{{ question|capitalize }} ||| {{ answers | choice }}' 61 | metadata: !TemplateMetadata 62 | choices_in_prompt: false 63 | languages: 64 | - en 65 | metrics: 66 | - Squad 67 | original_task: true 68 | name: question-answer 69 | reference: '' 70 | -------------------------------------------------------------------------------- /promptsource/templates/xquad/xquad.en/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: xquad 2 | subset: xquad.en 3 | templates: 4 | 10efb2e0-390c-4bab-9dc7-d90db707b6ae: !Template 5 | answer_choices: null 6 | id: 10efb2e0-390c-4bab-9dc7-d90db707b6ae 7 | jinja: '{{context}} 8 | 9 | 10 | Generate a question from the above passage : ||| {{question}}' 11 | metadata: !TemplateMetadata 12 | choices_in_prompt: false 13 | languages: 14 | - en 15 | metrics: 16 | - BLEU 17 | - ROUGE 18 | original_task: false 19 | name: given_context_generate_question 20 | reference: '' 21 | 120fffe0-b752-43f8-bf50-ecf009703ef0: !Template 22 | answer_choices: null 23 | id: 120fffe0-b752-43f8-bf50-ecf009703ef0 24 | jinja: '{{context}} 25 | 26 | 27 | Q: {{question}} 28 | 29 | 30 | Referring to the passage above, the correct answer to the given question is 31 | ||| {{answers["text"][0]}}' 32 | metadata: !TemplateMetadata 33 | choices_in_prompt: false 34 | languages: 35 | - en 36 | metrics: 37 | - Squad 38 | original_task: true 39 | name: answer_given_context_and_question 40 | reference: '' 41 | 32a9896f-34d5-4bde-8843-6d01d4621016: !Template 42 | answer_choices: null 43 | id: 32a9896f-34d5-4bde-8843-6d01d4621016 44 | jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\ 45 | \n{{answers.text[0]}}" 46 | metadata: !TemplateMetadata 47 | choices_in_prompt: false 48 | languages: 49 | - en 50 | metrics: 51 | - Squad 52 | original_task: true 53 | name: answer_the_question 54 | reference: '' 55 | 4bae0661-a3e5-448a-bfa2-69b096b01283: !Template 56 | answer_choices: null 57 | id: 4bae0661-a3e5-448a-bfa2-69b096b01283 58 | jinja: '{{context}} 59 | 60 | 61 | From the above passage, a reasonable question with "{{answers["text"][0]}}" 62 | as the answer would be: ||| {{question}}' 63 | metadata: !TemplateMetadata 64 | choices_in_prompt: false 65 | languages: 66 | - en 67 | metrics: 68 | - BLEU 69 | - ROUGE 70 | original_task: false 71 | name: jeopardy 72 | reference: jeopardy style- wiki_qa 73 | 90b53380-5c3b-4884-8cd1-9b4316da7993: !Template 74 | answer_choices: null 75 | id: 90b53380-5c3b-4884-8cd1-9b4316da7993 76 | jinja: 'Refer to the passage below and answer the following question: 77 | 78 | 79 | Passage: {{context}} 80 | 81 | 82 | Question: {{question}} 83 | 84 | ||| 85 | 86 | {{answers["text"][0]}}' 87 | metadata: !TemplateMetadata 88 | choices_in_prompt: false 89 | languages: 90 | - en 91 | metrics: 92 | - Squad 93 | original_task: true 94 | name: answer_question_given_context 95 | reference: '' 96 | 9cff064e-97e0-4026-94bc-3f7987856ec7: !Template 97 | answer_choices: null 98 | id: 9cff064e-97e0-4026-94bc-3f7987856ec7 99 | jinja: '{{context}} 100 | 101 | 102 | Q: {{question}} 103 | 104 | 105 | A: ||| {{answers["text"][0]}}' 106 | metadata: !TemplateMetadata 107 | choices_in_prompt: false 108 | languages: 109 | - en 110 | metrics: 111 | - Squad 112 | original_task: true 113 | name: given_context_answer_question_variation 114 | reference: '' 115 | -------------------------------------------------------------------------------- /promptsource/templates/xquad_r/en/templates.yaml: -------------------------------------------------------------------------------- 1 | dataset: xquad_r 2 | subset: en 3 | templates: 4 | 2b5ecd1b-4a91-420b-b91a-ad5d8b19e42a: !Template 5 | answer_choices: null 6 | id: 2b5ecd1b-4a91-420b-b91a-ad5d8b19e42a 7 | jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\ 8 | \n{{answers.text[0]}}" 9 | metadata: !TemplateMetadata 10 | choices_in_prompt: false 11 | languages: 12 | - en 13 | metrics: 14 | - Squad 15 | original_task: true 16 | name: answer_the_question 17 | reference: '' 18 | ad458b3c-7eb1-4011-9a9b-4e289f172160: !Template 19 | answer_choices: null 20 | id: ad458b3c-7eb1-4011-9a9b-4e289f172160 21 | jinja: 'Refer to the passage below and answer the following question: 22 | 23 | 24 | Passage: {{context}} 25 | 26 | 27 | Question: {{question}} 28 | 29 | ||| 30 | 31 | {{answers["text"][0]}}' 32 | metadata: !TemplateMetadata 33 | choices_in_prompt: false 34 | languages: 35 | - en 36 | metrics: 37 | - Squad 38 | original_task: true 39 | name: answer_question_given_context 40 | reference: '' 41 | b52c4840-a5c2-4bb1-ab39-0d533d56b336: !Template 42 | answer_choices: null 43 | id: b52c4840-a5c2-4bb1-ab39-0d533d56b336 44 | jinja: '{{context}} 45 | 46 | 47 | Q: {{question}} 48 | 49 | 50 | Referring to the passage above, the correct answer to the given question is 51 | ||| {{answers["text"][0]}}' 52 | metadata: !TemplateMetadata 53 | choices_in_prompt: false 54 | languages: 55 | - en 56 | metrics: 57 | - Squad 58 | original_task: true 59 | name: answer_given_context_and_question 60 | reference: '' 61 | c559afa7-10dc-47ed-b407-e1799c835027: !Template 62 | answer_choices: null 63 | id: c559afa7-10dc-47ed-b407-e1799c835027 64 | jinja: '{{context}} 65 | 66 | 67 | Generate a question from the above passage : ||| {{question}}' 68 | metadata: !TemplateMetadata 69 | choices_in_prompt: false 70 | languages: 71 | - en 72 | metrics: 73 | - BLEU 74 | - ROUGE 75 | original_task: false 76 | name: given_context_generate_question 77 | reference: '' 78 | d853848e-df82-4575-9c40-98c44ddd9d3f: !Template 79 | answer_choices: null 80 | id: d853848e-df82-4575-9c40-98c44ddd9d3f 81 | jinja: '{{context}} 82 | 83 | 84 | From the above passage, a reasonable question with "{{answers["text"][0]}}" 85 | as the answer would be: ||| {{question}}' 86 | metadata: !TemplateMetadata 87 | choices_in_prompt: false 88 | languages: 89 | - en 90 | metrics: 91 | - BLEU 92 | - ROUGE 93 | original_task: false 94 | name: jeopardy 95 | reference: jeopardy style- wiki_qa 96 | fd8c7cf6-3654-4622-bdff-b4f9fae52871: !Template 97 | answer_choices: null 98 | id: fd8c7cf6-3654-4622-bdff-b4f9fae52871 99 | jinja: '{{context}} 100 | 101 | 102 | Q: {{question}} 103 | 104 | 105 | A: ||| {{answers["text"][0]}}' 106 | metadata: !TemplateMetadata 107 | choices_in_prompt: false 108 | languages: 109 | - en 110 | metrics: 111 | - Squad 112 | original_task: true 113 | name: given_context_answer_question_variation 114 | reference: '' 115 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | ensure_newline_before_comments = True 3 | force_grid_wrap = 0 4 | include_trailing_comma = True 5 | line_length = 119 6 | lines_after_imports = 2 7 | multi_line_output = 3 8 | use_parentheses = True 9 | 10 | 11 | [flake8] 12 | ignore = E203, E501, W503 13 | max-line-length = 119 14 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | with open("README.md", "r", encoding="utf-8") as readme_file: 4 | readme = readme_file.read() 5 | 6 | requirements = [ 7 | "black<=21.12b0", 8 | "datasets>=1.7.0", 9 | "flake8", 10 | "isort==5.8.0", 11 | "pytest", 12 | "pyyaml>=5", 13 | "streamlit==0.82", 14 | "jinja2", 15 | "plotly", 16 | "requests", 17 | "pandas", 18 | ############################################################## 19 | # Dependencies in this section are added for specific datasets 20 | ############################################################## 21 | "py7zr", 22 | ############################################################## 23 | # End of dataset-specific dependencies 24 | ############################################################## 25 | ] 26 | 27 | setup( 28 | name='promptsource', 29 | version='0.2.3', 30 | url='https://github.com/bigscience-workshop/promptsource.git', 31 | author='BigScience - Prompt Engineering Working Group', 32 | author_email='sbach@cs.brown.edu,victor@huggingface.co', 33 | python_requires='>=3.7,<4.0', 34 | install_requires=requirements, 35 | classifiers=[ 36 | 'Development Status :: 2 - Pre-Alpha', 37 | 'Intended Audience :: Developers', 38 | 'License :: OSI Approved :: Apache Software License', 39 | 'Natural Language :: English', 40 | 'Programming Language :: Python :: 3', 41 | 'Programming Language :: Python :: 3.7', 42 | 'Programming Language :: Python :: 3.8', 43 | 'Programming Language :: Python :: 3.9', 44 | 'Programming Language :: Python :: 3.10', 45 | ], 46 | description='An Integrated Development Environment and Repository for Natural Language Prompts.', 47 | packages=find_packages(), 48 | license="Apache Software License 2.0", 49 | long_description=readme, 50 | long_description_content_type="text/markdown", 51 | package_data={"": [ 52 | "templates/*/*.yaml", 53 | "templates/*/*/*.yaml", 54 | ]} 55 | ) 56 | -------------------------------------------------------------------------------- /test/show_templates.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import textwrap 3 | 4 | from promptsource.templates import TemplateCollection, INCLUDED_USERS 5 | from promptsource.utils import get_dataset 6 | 7 | 8 | parser = argparse.ArgumentParser(description="Process some integers.") 9 | parser.add_argument("dataset_path", type=str, help="path to dataset name") 10 | 11 | args = parser.parse_args() 12 | if "templates.yaml" not in args.dataset_path: 13 | exit() 14 | 15 | path = args.dataset_path.split("/") 16 | 17 | if path[2] in INCLUDED_USERS: 18 | print("Skipping showing templates for community dataset.") 19 | else: 20 | dataset_name = path[2] 21 | subset_name = path[3] if len(path) == 5 else "" 22 | 23 | template_collection = TemplateCollection() 24 | 25 | dataset = get_dataset(dataset_name, subset_name) 26 | splits = list(dataset.keys()) 27 | 28 | dataset_templates = template_collection.get_dataset(dataset_name, subset_name) 29 | template_list = dataset_templates.all_template_names 30 | 31 | width = 80 32 | print("DATASET ", args.dataset_path) 33 | 34 | # First show all the templates. 35 | for template_name in template_list: 36 | template = dataset_templates[template_name] 37 | print("TEMPLATE") 38 | print("NAME:", template_name) 39 | print("Is Original Task: ", template.metadata.original_task) 40 | print(template.jinja) 41 | print() 42 | 43 | # Show examples of the templates. 44 | for template_name in template_list: 45 | template = dataset_templates[template_name] 46 | print() 47 | print("TEMPLATE") 48 | print("NAME:", template_name) 49 | print("REFERENCE:", template.reference) 50 | print("--------") 51 | print() 52 | print(template.jinja) 53 | print() 54 | 55 | for split_name in splits: 56 | dataset_split = dataset[split_name] 57 | 58 | print_counter = 0 59 | for example in dataset_split: 60 | print("\t--------") 61 | print("\tSplit ", split_name) 62 | print("\tExample ", example) 63 | print("\t--------") 64 | output = template.apply(example) 65 | if output[0].strip() == "" or (len(output) > 1 and output[1].strip() == ""): 66 | print("\t Blank result") 67 | continue 68 | 69 | xp, yp = output 70 | print() 71 | print("\tPrompt | X") 72 | for line in textwrap.wrap(xp, width=width, replace_whitespace=False): 73 | print("\t", line.replace("\n", "\n\t")) 74 | print() 75 | print("\tY") 76 | for line in textwrap.wrap(yp, width=width, replace_whitespace=False): 77 | print("\t", line.replace("\n", "\n\t")) 78 | 79 | print_counter += 1 80 | if print_counter >= 10: 81 | break 82 | --------------------------------------------------------------------------------