├── .env.template ├── .github └── workflows │ ├── check_filesize.yml │ ├── quality.yml │ ├── run_evaluation_jobs.yml │ └── sync_with_spaces.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── app.py ├── evaluation.py ├── images ├── autotrain_job.png └── autotrain_projects.png ├── notebooks └── flush-prediction-repos.ipynb ├── pyproject.toml ├── requirements.txt ├── run_evaluation_jobs.py └── utils.py /.env.template: -------------------------------------------------------------------------------- 1 | AUTOTRAIN_USERNAME=autoevaluator # The bot or user that authors evaluation jobs 2 | HF_TOKEN=hf_xxx # An API token of the `autoevaluator` user 3 | AUTOTRAIN_BACKEND_API=https://api-staging.autotrain.huggingface.co # The AutoTrain backend to send jobs to. Use https://api.autotrain.huggingface.co for prod or http://localhost:8000 for local development 4 | DATASETS_PREVIEW_API=https://datasets-server.huggingface.co # The API to grab dataset information from -------------------------------------------------------------------------------- /.github/workflows/check_filesize.yml: -------------------------------------------------------------------------------- 1 | name: Check file size 2 | on: # or directly `on: [push]` to run the action on every push on any branch 3 | pull_request: 4 | branches: [main] 5 | 6 | # to run this workflow manually from the Actions tab 7 | workflow_dispatch: 8 | 9 | jobs: 10 | sync-to-hub: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Check large files 14 | uses: ActionsDesk/lfs-warning@v2.0 15 | with: 16 | filesizelimit: 10485760 # this is 10MB so we can sync to HF Spaces -------------------------------------------------------------------------------- /.github/workflows/quality.yml: -------------------------------------------------------------------------------- 1 | name: Code quality 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | 13 | check_code_quality: 14 | name: Check code quality 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout code 18 | uses: actions/checkout@v2 19 | - name: Setup Python environment 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: 3.9 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | python -m pip install black isort flake8 27 | - name: Code quality 28 | run: | 29 | make quality -------------------------------------------------------------------------------- /.github/workflows/run_evaluation_jobs.yml: -------------------------------------------------------------------------------- 1 | name: Start evaluation jobs 2 | 3 | on: 4 | schedule: 5 | - cron: '*/15 * * * *' # Start evaluations every 15th minute 6 | 7 | jobs: 8 | 9 | build: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v2 15 | 16 | - name: Setup Python Environment 17 | uses: actions/setup-python@v2 18 | with: 19 | python-version: 3.8 20 | 21 | - name: Install requirements 22 | run: pip install -r requirements.txt 23 | 24 | - name: Execute scoring script 25 | env: 26 | HF_TOKEN: ${{ secrets.HF_TOKEN }} 27 | AUTOTRAIN_USERNAME: ${{ secrets.AUTOTRAIN_USERNAME }} 28 | AUTOTRAIN_BACKEND_API: ${{ secrets.AUTOTRAIN_BACKEND_API }} 29 | run: | 30 | HF_TOKEN=$HF_TOKEN AUTOTRAIN_USERNAME=$AUTOTRAIN_USERNAME AUTOTRAIN_BACKEND_API=$AUTOTRAIN_BACKEND_API python run_evaluation_jobs.py -------------------------------------------------------------------------------- /.github/workflows/sync_with_spaces.yml: -------------------------------------------------------------------------------- 1 | name: Sync to Hugging Face hub 2 | on: 3 | push: 4 | branches: [main] 5 | 6 | # to run this workflow manually from the Actions tab 7 | workflow_dispatch: 8 | 9 | jobs: 10 | sync-to-hub: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | with: 15 | fetch-depth: 0 16 | - name: Push to hub 17 | env: 18 | HF_TOKEN: ${{ secrets.HF_TOKEN }} 19 | run: | 20 | git push https://lewtun:$HF_TOKEN@huggingface.co/spaces/autoevaluate/model-evaluator main 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | scratch/ 132 | 133 | # Evaluation job logs 134 | evaluation-job-logs/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | style: 2 | python -m black --line-length 119 --target-version py39 . 3 | python -m isort . 4 | 5 | quality: 6 | python -m black --check --line-length 119 --target-version py39 . 7 | python -m isort --check-only . 8 | python -m flake8 --max-line-length 119 --exclude app.py -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Model Evaluator 3 | emoji: 📊 4 | colorFrom: red 5 | colorTo: red 6 | sdk: streamlit 7 | sdk_version: 1.10.0 8 | app_file: app.py 9 | --- 10 | 11 | # Model Evaluator 12 | 13 | > Submit evaluation jobs to AutoTrain from the Hugging Face Hub 14 | 15 | **⚠️ This project has been archived. If you want to evaluate LLMs, checkout [this collection](https://huggingface.co/collections/clefourrier/llm-leaderboards-and-benchmarks-✨-64f99d2e11e92ca5568a7cce) of leaderboards.** 16 | 17 | ## Supported tasks 18 | 19 | The table below shows which tasks are currently supported for evaluation in the AutoTrain backend: 20 | 21 | | Task | Supported | 22 | |:-----------------------------------|:---------:| 23 | | `binary_classification` | ✅ | 24 | | `multi_class_classification` | ✅ | 25 | | `multi_label_classification` | ❌ | 26 | | `entity_extraction` | ✅ | 27 | | `extractive_question_answering` | ✅ | 28 | | `translation` | ✅ | 29 | | `summarization` | ✅ | 30 | | `image_binary_classification` | ✅ | 31 | | `image_multi_class_classification` | ✅ | 32 | | `text_zero_shot_evaluation` | ✅ | 33 | 34 | 35 | ## Installation 36 | 37 | To run the application locally, first clone this repository and install the dependencies as follows: 38 | 39 | ``` 40 | pip install -r requirements.txt 41 | ``` 42 | 43 | Next, copy the example file of environment variables: 44 | 45 | ``` 46 | cp .env.template .env 47 | ``` 48 | 49 | and set the `HF_TOKEN` variable with a valid API token from the [`autoevaluator`](https://huggingface.co/autoevaluator) bot user. Finally, spin up the application by running: 50 | 51 | ``` 52 | streamlit run app.py 53 | ``` 54 | 55 | ## Usage 56 | 57 | Evaluation on the Hub involves two main steps: 58 | 59 | 1. Submitting an evaluation job via the UI. This creates an AutoTrain project with `N` models for evaluation. At this stage, the dataset is also processed and prepared for evaluation. 60 | 2. Triggering the evaluation itself once the dataset is processed. 61 | 62 | From the user perspective, only step (1) is needed since step (2) is handled by a cron job on GitHub Actions that executes the `run_evaluation_jobs.py` script every 15 minutes. 63 | 64 | See below for details on manually triggering evaluation jobs. 65 | 66 | ### Triggering an evaluation 67 | 68 | To evaluate the models in an AutoTrain project, run: 69 | 70 | ``` 71 | python run_evaluation_jobs.py 72 | ``` 73 | 74 | This will download the [`autoevaluate/evaluation-job-logs`](https://huggingface.co/datasets/autoevaluate/evaluation-job-logs) dataset from the Hub and check which evaluation projects are ready for evaluation (i.e. those whose dataset has been processed). 75 | 76 | ## AutoTrain configuration details 77 | 78 | Models are evaluated by the [`autoevaluator`](https://huggingface.co/autoevaluator) bot user in AutoTrain, with the payload sent to the `AUTOTRAIN_BACKEND_API` environment variable. Evaluation projects are created and run on either the `prod` or `staging` environments. You can view the status of projects in the AutoTrain UI by navigating to one of the links below (ask internally for access to the staging UI): 79 | 80 | | AutoTrain environment | AutoTrain UI URL | `AUTOTRAIN_BACKEND_API` | 81 | |:---------------------:|:--------------------------------------------------------------------------------------------------------------:|:--------------------------------------------:| 82 | | `prod` | [`https://ui.autotrain.huggingface.co/projects`](https://ui.autotrain.huggingface.co/projects) | https://api.autotrain.huggingface.co | 83 | | `staging` | [`https://ui-staging.autotrain.huggingface.co/projects`](https://ui-staging.autotrain.huggingface.co/projects) | https://api-staging.autotrain.huggingface.co | 84 | 85 | 86 | The current configuration for evaluation jobs running on [Spaces](https://huggingface.co/spaces/autoevaluate/model-evaluator) is: 87 | 88 | ``` 89 | AUTOTRAIN_BACKEND_API=https://api.autotrain.huggingface.co 90 | ``` 91 | 92 | To evaluate models with a _local_ instance of AutoTrain, change the environment to: 93 | 94 | ``` 95 | AUTOTRAIN_BACKEND_API=http://localhost:8000 96 | ``` 97 | 98 | ### Migrating from staging to production (and vice versa) 99 | 100 | In general, evaluation jobs should run in AutoTrain's `prod` environment, which is defined by the following environment variable: 101 | 102 | ``` 103 | AUTOTRAIN_BACKEND_API=https://api.autotrain.huggingface.co 104 | ``` 105 | 106 | However, there are times when it is necessary to run evaluation jobs in AutoTrain's `staging` environment (e.g. because a new evaluation pipeline is being deployed). In these cases the corresponding environement variable is: 107 | 108 | ``` 109 | AUTOTRAIN_BACKEND_API=https://api-staging.autotrain.huggingface.co 110 | ``` 111 | 112 | To migrate between these two environments, update the `AUTOTRAIN_BACKEND_API` in two places: 113 | 114 | * In the [repo secrets](https://huggingface.co/spaces/autoevaluate/model-evaluator/settings) associated with the `model-evaluator` Space. This will ensure evaluation projects are created in the desired environment. 115 | * In the [GitHub Actions secrets](https://github.com/huggingface/model-evaluator/settings/secrets/actions) associated with this repo. This will ensure that the correct evaluation jobs are approved and launched via the `run_evaluation_jobs.py` script. 116 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | from pathlib import Path 4 | 5 | import pandas as pd 6 | import streamlit as st 7 | import yaml 8 | from datasets import get_dataset_config_names 9 | from dotenv import load_dotenv 10 | from huggingface_hub import list_datasets 11 | 12 | from evaluation import filter_evaluated_models 13 | from utils import ( 14 | AUTOTRAIN_TASK_TO_HUB_TASK, 15 | commit_evaluation_log, 16 | create_autotrain_project_name, 17 | format_col_mapping, 18 | get_compatible_models, 19 | get_config_metadata, 20 | get_dataset_card_url, 21 | get_key, 22 | get_metadata, 23 | http_get, 24 | http_post, 25 | ) 26 | 27 | if Path(".env").is_file(): 28 | load_dotenv(".env") 29 | 30 | HF_TOKEN = os.getenv("HF_TOKEN") 31 | AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME") 32 | AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API") 33 | DATASETS_PREVIEW_API = os.getenv("DATASETS_PREVIEW_API") 34 | 35 | # Put image tasks on top 36 | TASK_TO_ID = { 37 | "image_binary_classification": 17, 38 | "image_multi_class_classification": 18, 39 | "binary_classification": 1, 40 | "multi_class_classification": 2, 41 | "natural_language_inference": 22, 42 | "entity_extraction": 4, 43 | "extractive_question_answering": 5, 44 | "translation": 6, 45 | "summarization": 8, 46 | "text_zero_shot_classification": 23, 47 | } 48 | 49 | TASK_TO_DEFAULT_METRICS = { 50 | "binary_classification": ["f1", "precision", "recall", "auc", "accuracy"], 51 | "multi_class_classification": [ 52 | "f1", 53 | "precision", 54 | "recall", 55 | "accuracy", 56 | ], 57 | "natural_language_inference": ["f1", "precision", "recall", "auc", "accuracy"], 58 | "entity_extraction": ["precision", "recall", "f1", "accuracy"], 59 | "extractive_question_answering": ["f1", "exact_match"], 60 | "translation": ["sacrebleu"], 61 | "summarization": ["rouge1", "rouge2", "rougeL", "rougeLsum"], 62 | "image_binary_classification": ["f1", "precision", "recall", "auc", "accuracy"], 63 | "image_multi_class_classification": [ 64 | "f1", 65 | "precision", 66 | "recall", 67 | "accuracy", 68 | ], 69 | "text_zero_shot_classification": ["accuracy", "loss"], 70 | } 71 | 72 | AUTOTRAIN_TASK_TO_LANG = { 73 | "translation": "en2de", 74 | "image_binary_classification": "unk", 75 | "image_multi_class_classification": "unk", 76 | } 77 | 78 | AUTOTRAIN_MACHINE = {"text_zero_shot_classification": "r5.16x"} 79 | 80 | 81 | SUPPORTED_TASKS = list(TASK_TO_ID.keys()) 82 | 83 | # Extracted from utils.get_supported_metrics 84 | # Hardcoded for now due to speed / caching constraints 85 | SUPPORTED_METRICS = [ 86 | "accuracy", 87 | "bertscore", 88 | "bleu", 89 | "cer", 90 | "chrf", 91 | "code_eval", 92 | "comet", 93 | "competition_math", 94 | "coval", 95 | "cuad", 96 | "exact_match", 97 | "f1", 98 | "frugalscore", 99 | "google_bleu", 100 | "mae", 101 | "mahalanobis", 102 | "matthews_correlation", 103 | "mean_iou", 104 | "meteor", 105 | "mse", 106 | "pearsonr", 107 | "perplexity", 108 | "precision", 109 | "recall", 110 | "roc_auc", 111 | "rouge", 112 | "sacrebleu", 113 | "sari", 114 | "seqeval", 115 | "spearmanr", 116 | "squad", 117 | "squad_v2", 118 | "ter", 119 | "trec_eval", 120 | "wer", 121 | "wiki_split", 122 | "xnli", 123 | "angelina-wang/directional_bias_amplification", 124 | "jordyvl/ece", 125 | "lvwerra/ai4code", 126 | "lvwerra/amex", 127 | ] 128 | 129 | 130 | ####### 131 | # APP # 132 | ####### 133 | st.title("Evaluation on the Hub") 134 | st.warning( 135 | "**⚠️ This project has been archived. If you want to evaluate LLMs, checkout [this collection](https://huggingface.co/collections/clefourrier/llm-leaderboards-and-benchmarks-✨-64f99d2e11e92ca5568a7cce) of leaderboards.**" 136 | ) 137 | st.markdown( 138 | """ 139 | Welcome to Hugging Face's automatic model evaluator 👋! 140 | 141 | This application allows you to evaluate 🤗 Transformers 142 | [models](https://huggingface.co/models?library=transformers&sort=downloads) 143 | across a wide variety of [datasets](https://huggingface.co/datasets) on the 144 | Hub. Please select the dataset and configuration below. The results of your 145 | evaluation will be displayed on the [public 146 | leaderboards](https://huggingface.co/spaces/autoevaluate/leaderboards). For 147 | more details, check out out our [blog 148 | post](https://huggingface.co/blog/eval-on-the-hub). 149 | """ 150 | ) 151 | 152 | # all_datasets = [d.id for d in list_datasets()] 153 | # query_params = st.experimental_get_query_params() 154 | # if "first_query_params" not in st.session_state: 155 | # st.session_state.first_query_params = query_params 156 | # first_query_params = st.session_state.first_query_params 157 | # default_dataset = all_datasets[0] 158 | # if "dataset" in first_query_params: 159 | # if len(first_query_params["dataset"]) > 0 and first_query_params["dataset"][0] in all_datasets: 160 | # default_dataset = first_query_params["dataset"][0] 161 | 162 | # selected_dataset = st.selectbox( 163 | # "Select a dataset", 164 | # all_datasets, 165 | # index=all_datasets.index(default_dataset), 166 | # help="""Datasets with metadata can be evaluated with 1-click. Configure an evaluation job to add \ 167 | # new metadata to a dataset card.""", 168 | # ) 169 | # st.experimental_set_query_params(**{"dataset": [selected_dataset]}) 170 | 171 | # # Check if selected dataset can be streamed 172 | # is_valid_dataset = http_get( 173 | # path="/is-valid", 174 | # domain=DATASETS_PREVIEW_API, 175 | # params={"dataset": selected_dataset}, 176 | # ).json() 177 | # if is_valid_dataset["viewer"] is False and is_valid_dataset["preview"] is False: 178 | # st.error( 179 | # """The dataset you selected is not currently supported. Open a \ 180 | # [discussion](https://huggingface.co/spaces/autoevaluate/model-evaluator/discussions) for support.""" 181 | # ) 182 | 183 | # metadata = get_metadata(selected_dataset, token=HF_TOKEN) 184 | # print(f"INFO -- Dataset metadata: {metadata}") 185 | # if metadata is None: 186 | # st.warning("No evaluation metadata found. Please configure the evaluation job below.") 187 | 188 | # with st.expander("Advanced configuration"): 189 | # # Select task 190 | # selected_task = st.selectbox( 191 | # "Select a task", 192 | # SUPPORTED_TASKS, 193 | # index=SUPPORTED_TASKS.index(metadata[0]["task_id"]) if metadata is not None else 0, 194 | # help="""Don't see your favourite task here? Open a \ 195 | # [discussion](https://huggingface.co/spaces/autoevaluate/model-evaluator/discussions) to request it!""", 196 | # ) 197 | # # Select config 198 | # configs = get_dataset_config_names(selected_dataset) 199 | # selected_config = st.selectbox( 200 | # "Select a config", 201 | # configs, 202 | # help="""Some datasets contain several sub-datasets, known as _configurations_. \ 203 | # Select one to evaluate your models on. \ 204 | # See the [docs](https://huggingface.co/docs/datasets/master/en/load_hub#configurations) for more details. 205 | # """, 206 | # ) 207 | # # Some datasets have multiple metadata (one per config), so we grab the one associated with the selected config 208 | # config_metadata = get_config_metadata(selected_config, metadata) 209 | # print(f"INFO -- Config metadata: {config_metadata}") 210 | 211 | # # Select splits 212 | # splits_resp = http_get( 213 | # path="/splits", 214 | # domain=DATASETS_PREVIEW_API, 215 | # params={"dataset": selected_dataset}, 216 | # ) 217 | # if splits_resp.status_code == 200: 218 | # split_names = [] 219 | # all_splits = splits_resp.json() 220 | # for split in all_splits["splits"]: 221 | # if split["config"] == selected_config: 222 | # split_names.append(split["split"]) 223 | 224 | # if config_metadata is not None: 225 | # eval_split = config_metadata["splits"].get("eval_split", None) 226 | # else: 227 | # eval_split = None 228 | # selected_split = st.selectbox( 229 | # "Select a split", 230 | # split_names, 231 | # index=split_names.index(eval_split) if eval_split is not None else 0, 232 | # help="Be wary when evaluating models on the `train` split.", 233 | # ) 234 | 235 | # # Select columns 236 | # rows_resp = http_get( 237 | # path="/first-rows", 238 | # domain=DATASETS_PREVIEW_API, 239 | # params={ 240 | # "dataset": selected_dataset, 241 | # "config": selected_config, 242 | # "split": selected_split, 243 | # }, 244 | # ).json() 245 | # col_names = list(pd.json_normalize(rows_resp["rows"][0]["row"]).columns) 246 | 247 | # st.markdown("**Map your dataset columns**") 248 | # st.markdown( 249 | # """The model evaluator uses a standardised set of column names for the input examples and labels. \ 250 | # Please define the mapping between your dataset columns (right) and the standardised column names (left).""" 251 | # ) 252 | # col1, col2 = st.columns(2) 253 | 254 | # # TODO: find a better way to layout these items 255 | # # TODO: need graceful way of handling dataset <--> task mismatch for datasets with metadata 256 | # col_mapping = {} 257 | # if selected_task in ["binary_classification", "multi_class_classification"]: 258 | # with col1: 259 | # st.markdown("`text` column") 260 | # st.text("") 261 | # st.text("") 262 | # st.text("") 263 | # st.text("") 264 | # st.markdown("`target` column") 265 | # with col2: 266 | # text_col = st.selectbox( 267 | # "This column should contain the text to be classified", 268 | # col_names, 269 | # index=col_names.index(get_key(config_metadata["col_mapping"], "text")) 270 | # if config_metadata is not None 271 | # else 0, 272 | # ) 273 | # target_col = st.selectbox( 274 | # "This column should contain the labels associated with the text", 275 | # col_names, 276 | # index=col_names.index(get_key(config_metadata["col_mapping"], "target")) 277 | # if config_metadata is not None 278 | # else 0, 279 | # ) 280 | # col_mapping[text_col] = "text" 281 | # col_mapping[target_col] = "target" 282 | 283 | # elif selected_task == "text_zero_shot_classification": 284 | # with col1: 285 | # st.markdown("`text` column") 286 | # st.text("") 287 | # st.text("") 288 | # st.text("") 289 | # st.text("") 290 | # st.markdown("`classes` column") 291 | # st.text("") 292 | # st.text("") 293 | # st.text("") 294 | # st.text("") 295 | # st.markdown("`target` column") 296 | # with col2: 297 | # text_col = st.selectbox( 298 | # "This column should contain the text to be classified", 299 | # col_names, 300 | # index=col_names.index(get_key(config_metadata["col_mapping"], "text")) 301 | # if config_metadata is not None 302 | # else 0, 303 | # ) 304 | # classes_col = st.selectbox( 305 | # "This column should contain the classes associated with the text", 306 | # col_names, 307 | # index=col_names.index(get_key(config_metadata["col_mapping"], "classes")) 308 | # if config_metadata is not None 309 | # else 0, 310 | # ) 311 | # target_col = st.selectbox( 312 | # "This column should contain the index of the correct class", 313 | # col_names, 314 | # index=col_names.index(get_key(config_metadata["col_mapping"], "target")) 315 | # if config_metadata is not None 316 | # else 0, 317 | # ) 318 | # col_mapping[text_col] = "text" 319 | # col_mapping[classes_col] = "classes" 320 | # col_mapping[target_col] = "target" 321 | 322 | # if selected_task in ["natural_language_inference"]: 323 | # config_metadata = get_config_metadata(selected_config, metadata) 324 | # with col1: 325 | # st.markdown("`text1` column") 326 | # st.text("") 327 | # st.text("") 328 | # st.text("") 329 | # st.text("") 330 | # st.text("") 331 | # st.markdown("`text2` column") 332 | # st.text("") 333 | # st.text("") 334 | # st.text("") 335 | # st.text("") 336 | # st.text("") 337 | # st.markdown("`target` column") 338 | # with col2: 339 | # text1_col = st.selectbox( 340 | # "This column should contain the first text passage to be classified", 341 | # col_names, 342 | # index=col_names.index(get_key(config_metadata["col_mapping"], "text1")) 343 | # if config_metadata is not None 344 | # else 0, 345 | # ) 346 | # text2_col = st.selectbox( 347 | # "This column should contain the second text passage to be classified", 348 | # col_names, 349 | # index=col_names.index(get_key(config_metadata["col_mapping"], "text2")) 350 | # if config_metadata is not None 351 | # else 0, 352 | # ) 353 | # target_col = st.selectbox( 354 | # "This column should contain the labels associated with the text", 355 | # col_names, 356 | # index=col_names.index(get_key(config_metadata["col_mapping"], "target")) 357 | # if config_metadata is not None 358 | # else 0, 359 | # ) 360 | # col_mapping[text1_col] = "text1" 361 | # col_mapping[text2_col] = "text2" 362 | # col_mapping[target_col] = "target" 363 | 364 | # elif selected_task == "entity_extraction": 365 | # with col1: 366 | # st.markdown("`tokens` column") 367 | # st.text("") 368 | # st.text("") 369 | # st.text("") 370 | # st.text("") 371 | # st.markdown("`tags` column") 372 | # with col2: 373 | # tokens_col = st.selectbox( 374 | # "This column should contain the array of tokens to be classified", 375 | # col_names, 376 | # index=col_names.index(get_key(config_metadata["col_mapping"], "tokens")) 377 | # if config_metadata is not None 378 | # else 0, 379 | # ) 380 | # tags_col = st.selectbox( 381 | # "This column should contain the labels associated with each part of the text", 382 | # col_names, 383 | # index=col_names.index(get_key(config_metadata["col_mapping"], "tags")) 384 | # if config_metadata is not None 385 | # else 0, 386 | # ) 387 | # col_mapping[tokens_col] = "tokens" 388 | # col_mapping[tags_col] = "tags" 389 | 390 | # elif selected_task == "translation": 391 | # with col1: 392 | # st.markdown("`source` column") 393 | # st.text("") 394 | # st.text("") 395 | # st.text("") 396 | # st.text("") 397 | # st.markdown("`target` column") 398 | # with col2: 399 | # text_col = st.selectbox( 400 | # "This column should contain the text to be translated", 401 | # col_names, 402 | # index=col_names.index(get_key(config_metadata["col_mapping"], "source")) 403 | # if config_metadata is not None 404 | # else 0, 405 | # ) 406 | # target_col = st.selectbox( 407 | # "This column should contain the target translation", 408 | # col_names, 409 | # index=col_names.index(get_key(config_metadata["col_mapping"], "target")) 410 | # if config_metadata is not None 411 | # else 0, 412 | # ) 413 | # col_mapping[text_col] = "source" 414 | # col_mapping[target_col] = "target" 415 | 416 | # elif selected_task == "summarization": 417 | # with col1: 418 | # st.markdown("`text` column") 419 | # st.text("") 420 | # st.text("") 421 | # st.text("") 422 | # st.text("") 423 | # st.markdown("`target` column") 424 | # with col2: 425 | # text_col = st.selectbox( 426 | # "This column should contain the text to be summarized", 427 | # col_names, 428 | # index=col_names.index(get_key(config_metadata["col_mapping"], "text")) 429 | # if config_metadata is not None 430 | # else 0, 431 | # ) 432 | # target_col = st.selectbox( 433 | # "This column should contain the target summary", 434 | # col_names, 435 | # index=col_names.index(get_key(config_metadata["col_mapping"], "target")) 436 | # if config_metadata is not None 437 | # else 0, 438 | # ) 439 | # col_mapping[text_col] = "text" 440 | # col_mapping[target_col] = "target" 441 | 442 | # elif selected_task == "extractive_question_answering": 443 | # if config_metadata is not None: 444 | # col_mapping = config_metadata["col_mapping"] 445 | # # Hub YAML parser converts periods to hyphens, so we remap them here 446 | # col_mapping = format_col_mapping(col_mapping) 447 | # with col1: 448 | # st.markdown("`context` column") 449 | # st.text("") 450 | # st.text("") 451 | # st.text("") 452 | # st.text("") 453 | # st.markdown("`question` column") 454 | # st.text("") 455 | # st.text("") 456 | # st.text("") 457 | # st.text("") 458 | # st.markdown("`answers.text` column") 459 | # st.text("") 460 | # st.text("") 461 | # st.text("") 462 | # st.text("") 463 | # st.markdown("`answers.answer_start` column") 464 | # with col2: 465 | # context_col = st.selectbox( 466 | # "This column should contain the question's context", 467 | # col_names, 468 | # index=col_names.index(get_key(col_mapping, "context")) if config_metadata is not None else 0, 469 | # ) 470 | # question_col = st.selectbox( 471 | # "This column should contain the question to be answered, given the context", 472 | # col_names, 473 | # index=col_names.index(get_key(col_mapping, "question")) if config_metadata is not None else 0, 474 | # ) 475 | # answers_text_col = st.selectbox( 476 | # "This column should contain example answers to the question, extracted from the context", 477 | # col_names, 478 | # index=col_names.index(get_key(col_mapping, "answers.text")) if config_metadata is not None else 0, 479 | # ) 480 | # answers_start_col = st.selectbox( 481 | # "This column should contain the indices in the context of the first character of each `answers.text`", 482 | # col_names, 483 | # index=col_names.index(get_key(col_mapping, "answers.answer_start")) 484 | # if config_metadata is not None 485 | # else 0, 486 | # ) 487 | # col_mapping[context_col] = "context" 488 | # col_mapping[question_col] = "question" 489 | # col_mapping[answers_text_col] = "answers.text" 490 | # col_mapping[answers_start_col] = "answers.answer_start" 491 | # elif selected_task in ["image_binary_classification", "image_multi_class_classification"]: 492 | # with col1: 493 | # st.markdown("`image` column") 494 | # st.text("") 495 | # st.text("") 496 | # st.text("") 497 | # st.text("") 498 | # st.markdown("`target` column") 499 | # with col2: 500 | # image_col = st.selectbox( 501 | # "This column should contain the images to be classified", 502 | # col_names, 503 | # index=col_names.index(get_key(config_metadata["col_mapping"], "image")) 504 | # if config_metadata is not None 505 | # else 0, 506 | # ) 507 | # target_col = st.selectbox( 508 | # "This column should contain the labels associated with the images", 509 | # col_names, 510 | # index=col_names.index(get_key(config_metadata["col_mapping"], "target")) 511 | # if config_metadata is not None 512 | # else 0, 513 | # ) 514 | # col_mapping[image_col] = "image" 515 | # col_mapping[target_col] = "target" 516 | 517 | # # Select metrics 518 | # st.markdown("**Select metrics**") 519 | # st.markdown("The following metrics will be computed") 520 | # html_string = " ".join( 521 | # [ 522 | # '
' 523 | # + '
' 525 | # + metric 526 | # + "
" 527 | # for metric in TASK_TO_DEFAULT_METRICS[selected_task] 528 | # ] 529 | # ) 530 | # st.markdown(html_string, unsafe_allow_html=True) 531 | # selected_metrics = st.multiselect( 532 | # "(Optional) Select additional metrics", 533 | # sorted(list(set(SUPPORTED_METRICS) - set(TASK_TO_DEFAULT_METRICS[selected_task]))), 534 | # help="""User-selected metrics will be computed with their default arguments. \ 535 | # For example, `f1` will report results for binary labels. \ 536 | # Check out the [available metrics](https://huggingface.co/metrics) for more details.""", 537 | # ) 538 | 539 | # with st.form(key="form"): 540 | # compatible_models = get_compatible_models(selected_task, [selected_dataset]) 541 | # selected_models = st.multiselect( 542 | # "Select the models you wish to evaluate", 543 | # compatible_models, 544 | # help="""Don't see your favourite model in this list? Add the dataset and task it was trained on to the \ 545 | # [model card metadata.](https://huggingface.co/docs/hub/models-cards#model-card-metadata)""", 546 | # ) 547 | # print("INFO -- Selected models before filter:", selected_models) 548 | 549 | # hf_username = st.text_input("Enter your 🤗 Hub username to be notified when the evaluation is finished") 550 | 551 | # submit_button = st.form_submit_button("Evaluate models 🚀") 552 | 553 | # if submit_button: 554 | # if len(hf_username) == 0: 555 | # st.warning("No 🤗 Hub username provided! Please enter your username and try again.") 556 | # elif len(selected_models) == 0: 557 | # st.warning("⚠️ No models were selected for evaluation! Please select at least one model and try again.") 558 | # elif len(selected_models) > 10: 559 | # st.warning("Only 10 models can be evaluated at once. Please select fewer models and try again.") 560 | # else: 561 | # # Filter out previously evaluated models 562 | # selected_models = filter_evaluated_models( 563 | # selected_models, 564 | # selected_task, 565 | # selected_dataset, 566 | # selected_config, 567 | # selected_split, 568 | # selected_metrics, 569 | # ) 570 | # print("INFO -- Selected models after filter:", selected_models) 571 | # if len(selected_models) > 0: 572 | # project_payload = { 573 | # "username": AUTOTRAIN_USERNAME, 574 | # "proj_name": create_autotrain_project_name(selected_dataset, selected_config), 575 | # "task": TASK_TO_ID[selected_task], 576 | # "config": { 577 | # "language": AUTOTRAIN_TASK_TO_LANG[selected_task] 578 | # if selected_task in AUTOTRAIN_TASK_TO_LANG 579 | # else "en", 580 | # "max_models": 5, 581 | # "instance": { 582 | # "provider": "sagemaker" if selected_task in AUTOTRAIN_MACHINE.keys() else "ovh", 583 | # "instance_type": AUTOTRAIN_MACHINE[selected_task] 584 | # if selected_task in AUTOTRAIN_MACHINE.keys() 585 | # else "p3", 586 | # "max_runtime_seconds": 172800, 587 | # "num_instances": 1, 588 | # "disk_size_gb": 200, 589 | # }, 590 | # "evaluation": { 591 | # "metrics": selected_metrics, 592 | # "models": selected_models, 593 | # "hf_username": hf_username, 594 | # }, 595 | # }, 596 | # } 597 | # print(f"INFO -- Payload: {project_payload}") 598 | # project_json_resp = http_post( 599 | # path="/projects/create", 600 | # payload=project_payload, 601 | # token=HF_TOKEN, 602 | # domain=AUTOTRAIN_BACKEND_API, 603 | # ).json() 604 | # print(f"INFO -- Project creation response: {project_json_resp}") 605 | 606 | # if project_json_resp["created"]: 607 | # data_payload = { 608 | # "split": 4, # use "auto" split choice in AutoTrain 609 | # "col_mapping": col_mapping, 610 | # "load_config": {"max_size_bytes": 0, "shuffle": False}, 611 | # "dataset_id": selected_dataset, 612 | # "dataset_config": selected_config, 613 | # "dataset_split": selected_split, 614 | # } 615 | # data_json_resp = http_post( 616 | # path=f"/projects/{project_json_resp['id']}/data/dataset", 617 | # payload=data_payload, 618 | # token=HF_TOKEN, 619 | # domain=AUTOTRAIN_BACKEND_API, 620 | # ).json() 621 | # print(f"INFO -- Dataset creation response: {data_json_resp}") 622 | # if data_json_resp["download_status"] == 1: 623 | # train_json_resp = http_post( 624 | # path=f"/projects/{project_json_resp['id']}/data/start_processing", 625 | # token=HF_TOKEN, 626 | # domain=AUTOTRAIN_BACKEND_API, 627 | # ).json() 628 | # # For local development we process and approve projects on-the-fly 629 | # if "localhost" in AUTOTRAIN_BACKEND_API: 630 | # with st.spinner("⏳ Waiting for data processing to complete ..."): 631 | # is_data_processing_success = False 632 | # while is_data_processing_success is not True: 633 | # project_status = http_get( 634 | # path=f"/projects/{project_json_resp['id']}", 635 | # token=HF_TOKEN, 636 | # domain=AUTOTRAIN_BACKEND_API, 637 | # ).json() 638 | # if project_status["status"] == 3: 639 | # is_data_processing_success = True 640 | # time.sleep(10) 641 | 642 | # # Approve training job 643 | # train_job_resp = http_post( 644 | # path=f"/projects/{project_json_resp['id']}/start_training", 645 | # token=HF_TOKEN, 646 | # domain=AUTOTRAIN_BACKEND_API, 647 | # ).json() 648 | # st.success("✅ Data processing and project approval complete - go forth and evaluate!") 649 | # else: 650 | # # Prod/staging submissions are evaluated in a cron job via run_evaluation_jobs.py 651 | # print(f"INFO -- AutoTrain job response: {train_json_resp}") 652 | # if train_json_resp["success"]: 653 | # train_eval_index = { 654 | # "train-eval-index": [ 655 | # { 656 | # "config": selected_config, 657 | # "task": AUTOTRAIN_TASK_TO_HUB_TASK[selected_task], 658 | # "task_id": selected_task, 659 | # "splits": {"eval_split": selected_split}, 660 | # "col_mapping": col_mapping, 661 | # } 662 | # ] 663 | # } 664 | # selected_metadata = yaml.dump(train_eval_index, sort_keys=False) 665 | # dataset_card_url = get_dataset_card_url(selected_dataset) 666 | # st.success("✅ Successfully submitted evaluation job!") 667 | # st.markdown( 668 | # f""" 669 | # Evaluation can take up to 1 hour to complete, so grab a ☕️ or 🍵 while you wait: 670 | 671 | # * 🔔 A [Hub pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) with the evaluation results will be opened for each model you selected. Check your email for notifications. 672 | # * 📊 Click [here](https://hf.co/spaces/autoevaluate/leaderboards?dataset={selected_dataset}) to view the results from your submission once the Hub pull request is merged. 673 | # * 🥱 Tired of configuring evaluations? Add the following metadata to the [dataset card]({dataset_card_url}) to enable 1-click evaluations: 674 | # """ # noqa 675 | # ) 676 | # st.markdown( 677 | # f""" 678 | # ```yaml 679 | # {selected_metadata} 680 | # """ 681 | # ) 682 | # print("INFO -- Pushing evaluation job logs to the Hub") 683 | # evaluation_log = {} 684 | # evaluation_log["project_id"] = project_json_resp["id"] 685 | # evaluation_log["autotrain_env"] = ( 686 | # "staging" if "staging" in AUTOTRAIN_BACKEND_API else "prod" 687 | # ) 688 | # evaluation_log["payload"] = project_payload 689 | # evaluation_log["project_creation_response"] = project_json_resp 690 | # evaluation_log["dataset_creation_response"] = data_json_resp 691 | # evaluation_log["autotrain_job_response"] = train_json_resp 692 | # commit_evaluation_log(evaluation_log, hf_access_token=HF_TOKEN) 693 | # else: 694 | # st.error("🙈 Oh no, there was an error submitting your evaluation job!") 695 | # else: 696 | # st.warning("⚠️ No models left to evaluate! Please select other models and try again.") 697 | -------------------------------------------------------------------------------- /evaluation.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from dataclasses import dataclass 3 | 4 | import streamlit as st 5 | from huggingface_hub import DatasetFilter, HfApi 6 | from huggingface_hub.hf_api import DatasetInfo 7 | 8 | 9 | @dataclass(frozen=True, eq=True) 10 | class EvaluationInfo: 11 | task: str 12 | model: str 13 | dataset_name: str 14 | dataset_config: str 15 | dataset_split: str 16 | metrics: set 17 | 18 | 19 | def create_evaluation_info(dataset_info: DatasetInfo) -> int: 20 | if dataset_info.cardData is not None: 21 | metadata = dataset_info.cardData["eval_info"] 22 | metadata.pop("col_mapping", None) 23 | # TODO(lewtun): populate dataset cards with metric info 24 | if "metrics" not in metadata: 25 | metadata["metrics"] = frozenset() 26 | else: 27 | metadata["metrics"] = frozenset(metadata["metrics"]) 28 | return EvaluationInfo(**metadata) 29 | 30 | 31 | def get_evaluation_infos(): 32 | evaluation_datasets = [] 33 | filt = DatasetFilter(author="autoevaluate") 34 | autoevaluate_datasets = HfApi().list_datasets(filter=filt, full=True) 35 | for dset in autoevaluate_datasets: 36 | try: 37 | evaluation_datasets.append(create_evaluation_info(dset)) 38 | except Exception as e: 39 | print(f"Error processing dataset {dset}: {e}") 40 | return evaluation_datasets 41 | 42 | 43 | def filter_evaluated_models(models, task, dataset_name, dataset_config, dataset_split, metrics): 44 | evaluation_infos = get_evaluation_infos() 45 | models_to_filter = copy.copy(models) 46 | 47 | for model in models_to_filter: 48 | evaluation_info = EvaluationInfo( 49 | task=task, 50 | model=model, 51 | dataset_name=dataset_name, 52 | dataset_config=dataset_config, 53 | dataset_split=dataset_split, 54 | metrics=frozenset(metrics), 55 | ) 56 | if evaluation_info in evaluation_infos: 57 | st.info( 58 | f"Model [`{model}`](https://huggingface.co/{model}) has already been evaluated on this configuration. \ 59 | This model will be excluded from the evaluation job..." 60 | ) 61 | models.remove(model) 62 | 63 | return models 64 | -------------------------------------------------------------------------------- /images/autotrain_job.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/model-evaluator/003006a8fd779bf904dce8d7fe97b8b999d7f49e/images/autotrain_job.png -------------------------------------------------------------------------------- /images/autotrain_projects.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/model-evaluator/003006a8fd779bf904dce8d7fe97b8b999d7f49e/images/autotrain_projects.png -------------------------------------------------------------------------------- /notebooks/flush-prediction-repos.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "c8093b9e-ca6a-423d-96c3-5fe21f7109a1", 6 | "metadata": {}, 7 | "source": [ 8 | "## Imports" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 1, 14 | "id": "efe8cda7-a687-4867-b1f0-8efbcd428681", 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import os\n", 19 | "from pathlib import Path\n", 20 | "\n", 21 | "from dotenv import load_dotenv\n", 22 | "from huggingface_hub import DatasetFilter, delete_repo, list_datasets\n", 23 | "from tqdm.auto import tqdm\n", 24 | "\n", 25 | "if Path(\".env\").is_file():\n", 26 | " load_dotenv(\".env\")\n", 27 | "\n", 28 | "HF_TOKEN = os.getenv(\"HF_TOKEN\")" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "id": "8f6e01f0-b658-451f-999c-e08d9f4bbbd3", 34 | "metadata": {}, 35 | "source": [ 36 | "## Get all prediction repos from autoevaluate org" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 2, 42 | "id": "2e369478-66d3-498d-a8fd-95bc9180f362", 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "def get_prediction_repos():\n", 47 | " all_repos = list_datasets(author=\"autoevaluate\")\n", 48 | " prediction_repos = [\n", 49 | " repo for repo in all_repos if repo.id.split(\"/\")[1].startswith(\"autoeval-\")\n", 50 | " ]\n", 51 | " return prediction_repos" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 3, 57 | "id": "542db019-d01f-42f5-bef4-888dae8eeadb", 58 | "metadata": {}, 59 | "outputs": [ 60 | { 61 | "data": { 62 | "text/plain": [ 63 | "66" 64 | ] 65 | }, 66 | "execution_count": 3, 67 | "metadata": {}, 68 | "output_type": "execute_result" 69 | } 70 | ], 71 | "source": [ 72 | "prediction_repos = get_prediction_repos()\n", 73 | "len(prediction_repos)" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 4, 79 | "id": "331cfabf-4b73-490f-8d6a-86b5bc162666", 80 | "metadata": {}, 81 | "outputs": [ 82 | { 83 | "data": { 84 | "text/plain": [ 85 | "DatasetInfo: {\n", 86 | "\tid: autoevaluate/autoeval-staging-eval-project-9dcc51b5-6464670\n", 87 | "\tsha: d3bb02be592d167f7a217ac9341d187142d9a90a\n", 88 | "\tlastModified: 2022-06-13T14:54:34.000Z\n", 89 | "\ttags: ['type:predictions', 'tags:autotrain', 'tags:evaluation', 'datasets:glue']\n", 90 | "\tprivate: False\n", 91 | "\tauthor: autoevaluate\n", 92 | "\tdescription: None\n", 93 | "\tcitation: None\n", 94 | "\tcardData: None\n", 95 | "\tsiblings: None\n", 96 | "\tgated: False\n", 97 | "\tdownloads: 12\n", 98 | "}" 99 | ] 100 | }, 101 | "execution_count": 4, 102 | "metadata": {}, 103 | "output_type": "execute_result" 104 | } 105 | ], 106 | "source": [ 107 | "prediction_repos[0]" 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "id": "57a86b69-ffe8-4035-8f3d-5c917d8ce7bf", 113 | "metadata": {}, 114 | "source": [ 115 | "## Delete all prediction repos" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 5, 121 | "id": "6c8e23e7-2a6d-437b-9742-17f37684d9eb", 122 | "metadata": {}, 123 | "outputs": [ 124 | { 125 | "data": { 126 | "application/vnd.jupyter.widget-view+json": { 127 | "model_id": "06fa304dcc6d44e39205b20a5e488052", 128 | "version_major": 2, 129 | "version_minor": 0 130 | }, 131 | "text/plain": [ 132 | " 0%| | 0/66 [00:00=0.11 2 | python-dotenv 3 | streamlit==1.10.0 4 | datasets 5 | evaluate 6 | jsonlines 7 | typer 8 | # Dataset specific deps 9 | py7zr<0.19 10 | openpyxl<3.1 11 | # Dirty bug from Google 12 | protobuf<=3.20.1 13 | # Bug from Streamlit 14 | altair<5 -------------------------------------------------------------------------------- /run_evaluation_jobs.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import typer 5 | from datasets import load_dataset 6 | from dotenv import load_dotenv 7 | 8 | from utils import http_get, http_post 9 | 10 | if Path(".env").is_file(): 11 | load_dotenv(".env") 12 | 13 | HF_TOKEN = os.getenv("HF_TOKEN") 14 | AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME") 15 | AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API") 16 | 17 | if "staging" in AUTOTRAIN_BACKEND_API: 18 | AUTOTRAIN_ENV = "staging" 19 | else: 20 | AUTOTRAIN_ENV = "prod" 21 | 22 | 23 | def main(): 24 | print(f"💡 Starting jobs on {AUTOTRAIN_ENV} environment") 25 | logs_df = load_dataset("autoevaluate/evaluation-job-logs", use_auth_token=HF_TOKEN, split="train").to_pandas() 26 | # Filter out legacy AutoTrain submissions prior to project approvals requirement 27 | projects_df = logs_df.copy()[(~logs_df["project_id"].isnull())] 28 | # Filter IDs for appropriate AutoTrain env (staging vs prod) 29 | projects_df = projects_df.copy().query(f"autotrain_env == '{AUTOTRAIN_ENV}'") 30 | projects_to_approve = projects_df["project_id"].astype(int).tolist() 31 | failed_approvals = [] 32 | print(f"🚀 Found {len(projects_to_approve)} evaluation projects to approve!") 33 | 34 | for project_id in projects_to_approve: 35 | print(f"Attempting to evaluate project ID {project_id} ...") 36 | try: 37 | project_info = http_get( 38 | path=f"/projects/{project_id}", 39 | token=HF_TOKEN, 40 | domain=AUTOTRAIN_BACKEND_API, 41 | ).json() 42 | print(project_info) 43 | # Only start evaluation for projects with completed data processing (status=3) 44 | if project_info["status"] == 3 and project_info["training_status"] == "not_started": 45 | train_job_resp = http_post( 46 | path=f"/projects/{project_id}/start_training", 47 | token=HF_TOKEN, 48 | domain=AUTOTRAIN_BACKEND_API, 49 | ).json() 50 | print(f"🤖 Project {project_id} approval response: {train_job_resp}") 51 | else: 52 | print(f"💪 Project {project_id} either not ready or has already been evaluated. Skipping ...") 53 | except Exception as e: 54 | print(f"There was a problem obtaining the project info for project ID {project_id}") 55 | print(f"Error message: {e}") 56 | failed_approvals.append(project_id) 57 | pass 58 | 59 | if len(failed_approvals) > 0: 60 | print(f"🚨 Failed to approve {len(failed_approvals)} projects: {failed_approvals}") 61 | 62 | 63 | if __name__ == "__main__": 64 | typer.run(main) 65 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import uuid 3 | from typing import Dict, List, Union 4 | 5 | import jsonlines 6 | import requests 7 | import streamlit as st 8 | from evaluate import load 9 | from huggingface_hub import HfApi, ModelFilter, Repository, dataset_info, list_metrics 10 | from tqdm import tqdm 11 | 12 | AUTOTRAIN_TASK_TO_HUB_TASK = { 13 | "binary_classification": "text-classification", 14 | "multi_class_classification": "text-classification", 15 | "natural_language_inference": "text-classification", 16 | "entity_extraction": "token-classification", 17 | "extractive_question_answering": "question-answering", 18 | "translation": "translation", 19 | "summarization": "summarization", 20 | "image_binary_classification": "image-classification", 21 | "image_multi_class_classification": "image-classification", 22 | "text_zero_shot_classification": "text-generation", 23 | } 24 | 25 | 26 | HUB_TASK_TO_AUTOTRAIN_TASK = {v: k for k, v in AUTOTRAIN_TASK_TO_HUB_TASK.items()} 27 | LOGS_REPO = "evaluation-job-logs" 28 | 29 | 30 | def get_auth_headers(token: str, prefix: str = "Bearer"): 31 | return {"Authorization": f"{prefix} {token}"} 32 | 33 | 34 | def http_post(path: str, token: str, payload=None, domain: str = None, params=None) -> requests.Response: 35 | """HTTP POST request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached""" 36 | try: 37 | response = requests.post( 38 | url=domain + path, 39 | json=payload, 40 | headers=get_auth_headers(token=token), 41 | allow_redirects=True, 42 | params=params, 43 | ) 44 | except requests.exceptions.ConnectionError: 45 | print("❌ Failed to reach AutoNLP API, check your internet connection") 46 | response.raise_for_status() 47 | return response 48 | 49 | 50 | def http_get(path: str, domain: str, token: str = None, params: dict = None) -> requests.Response: 51 | """HTTP POST request to `path`, raises UnreachableAPIError if the API cannot be reached""" 52 | try: 53 | response = requests.get( 54 | url=domain + path, 55 | headers=get_auth_headers(token=token), 56 | allow_redirects=True, 57 | params=params, 58 | ) 59 | except requests.exceptions.ConnectionError: 60 | print(f"❌ Failed to reach {path}, check your internet connection") 61 | response.raise_for_status() 62 | return response 63 | 64 | 65 | def get_metadata(dataset_name: str, token: str) -> Union[Dict, None]: 66 | data = dataset_info(dataset_name, token=token) 67 | if data.cardData is not None and "train-eval-index" in data.cardData.keys(): 68 | return data.cardData["train-eval-index"] 69 | else: 70 | return None 71 | 72 | 73 | def get_compatible_models(task: str, dataset_ids: List[str]) -> List[str]: 74 | """ 75 | Returns all model IDs that are compatible with the given task and dataset names. 76 | 77 | Args: 78 | task (`str`): The task to search for. 79 | dataset_names (`List[str]`): A list of dataset names to search for. 80 | 81 | Returns: 82 | A list of model IDs, sorted alphabetically. 83 | """ 84 | compatible_models = [] 85 | # Allow any summarization model to be used for summarization tasks 86 | # and allow any text-generation model to be used for text_zero_shot_classification 87 | if task in ("summarization", "text_zero_shot_classification"): 88 | model_filter = ModelFilter( 89 | task=AUTOTRAIN_TASK_TO_HUB_TASK[task], 90 | library=["transformers", "pytorch"], 91 | ) 92 | compatible_models.extend(HfApi().list_models(filter=model_filter)) 93 | # Include models trained on SQuAD datasets, since these can be evaluated on 94 | # other SQuAD-like datasets 95 | if task == "extractive_question_answering": 96 | dataset_ids.extend(["squad", "squad_v2"]) 97 | 98 | # TODO: relax filter on PyTorch models if TensorFlow supported in AutoTrain 99 | for dataset_id in dataset_ids: 100 | model_filter = ModelFilter( 101 | task=AUTOTRAIN_TASK_TO_HUB_TASK[task], 102 | trained_dataset=dataset_id, 103 | library=["transformers", "pytorch"], 104 | ) 105 | compatible_models.extend(HfApi().list_models(filter=model_filter)) 106 | return sorted(set([model.modelId for model in compatible_models])) 107 | 108 | 109 | def get_key(col_mapping, val): 110 | for key, value in col_mapping.items(): 111 | if val == value: 112 | return key 113 | 114 | return "key doesn't exist" 115 | 116 | 117 | def format_col_mapping(col_mapping: dict) -> dict: 118 | for k, v in col_mapping["answers"].items(): 119 | col_mapping[f"answers.{k}"] = f"answers.{v}" 120 | del col_mapping["answers"] 121 | return col_mapping 122 | 123 | 124 | def commit_evaluation_log(evaluation_log, hf_access_token=None): 125 | logs_repo_url = f"https://huggingface.co/datasets/autoevaluate/{LOGS_REPO}" 126 | logs_repo = Repository( 127 | local_dir=LOGS_REPO, 128 | clone_from=logs_repo_url, 129 | repo_type="dataset", 130 | use_auth_token=hf_access_token, 131 | ) 132 | logs_repo.git_pull() 133 | with jsonlines.open(f"{LOGS_REPO}/logs.jsonl") as r: 134 | lines = [] 135 | for obj in r: 136 | lines.append(obj) 137 | 138 | lines.append(evaluation_log) 139 | with jsonlines.open(f"{LOGS_REPO}/logs.jsonl", mode="w") as writer: 140 | for job in lines: 141 | writer.write(job) 142 | logs_repo.push_to_hub( 143 | commit_message=f"Evaluation submitted with project name {evaluation_log['payload']['proj_name']}" 144 | ) 145 | print("INFO -- Pushed evaluation logs to the Hub") 146 | 147 | 148 | @st.experimental_memo 149 | def get_supported_metrics(): 150 | """Helper function to get all metrics compatible with evaluation service. 151 | 152 | Requires all metric dependencies installed in the same environment, so wait until 153 | https://github.com/huggingface/evaluate/issues/138 is resolved before using this. 154 | """ 155 | metrics = [metric.id for metric in list_metrics()] 156 | supported_metrics = [] 157 | for metric in tqdm(metrics): 158 | # TODO: this currently requires all metric dependencies to be installed 159 | # in the same environment. Refactor to avoid needing to actually load 160 | # the metric. 161 | try: 162 | print(f"INFO -- Attempting to load metric: {metric}") 163 | metric_func = load(metric) 164 | except Exception as e: 165 | print(e) 166 | print("WARNING -- Skipping the following metric, which cannot load:", metric) 167 | continue 168 | 169 | argspec = inspect.getfullargspec(metric_func.compute) 170 | if "references" in argspec.kwonlyargs and "predictions" in argspec.kwonlyargs: 171 | # We require that "references" and "predictions" are arguments 172 | # to the metric function. We also require that the other arguments 173 | # besides "references" and "predictions" have defaults and so do not 174 | # need to be specified explicitly. 175 | defaults = True 176 | for key, value in argspec.kwonlydefaults.items(): 177 | if key not in ("references", "predictions"): 178 | if value is None: 179 | defaults = False 180 | break 181 | 182 | if defaults: 183 | supported_metrics.append(metric) 184 | return supported_metrics 185 | 186 | 187 | def get_dataset_card_url(dataset_id: str) -> str: 188 | """Gets the URL to edit the dataset card for the given dataset ID.""" 189 | if "/" in dataset_id: 190 | return f"https://huggingface.co/datasets/{dataset_id}/edit/main/README.md" 191 | else: 192 | return f"https://github.com/huggingface/datasets/edit/master/datasets/{dataset_id}/README.md" 193 | 194 | 195 | def create_autotrain_project_name(dataset_id: str, dataset_config: str) -> str: 196 | """Creates an AutoTrain project name for the given dataset ID.""" 197 | # Project names cannot have "/", so we need to format community datasets accordingly 198 | dataset_id_formatted = dataset_id.replace("/", "__") 199 | dataset_config_formatted = dataset_config.replace("--", "__") 200 | # Project names need to be unique, so we append a random string to guarantee this while adhering to naming rules 201 | basename = f"eval-{dataset_id_formatted}-{dataset_config_formatted}" 202 | basename = basename[:60] if len(basename) > 60 else basename # Hub naming limitation 203 | return f"{basename}-{str(uuid.uuid4())[:6]}" 204 | 205 | 206 | def get_config_metadata(config: str, metadata: List[Dict] = None) -> Union[Dict, None]: 207 | """Gets the dataset card metadata for the given config.""" 208 | if metadata is None: 209 | return None 210 | config_metadata = [m for m in metadata if m["config"] == config] 211 | if len(config_metadata) >= 1: 212 | return config_metadata[0] 213 | else: 214 | return None 215 | --------------------------------------------------------------------------------