├── tests ├── __init__.py ├── fixtures │ ├── empty.txt │ └── cards │ │ ├── sample_template.md │ │ ├── sample_datasetcard_template.md │ │ ├── sample_no_metadata.md │ │ ├── sample_invalid_card_data.md │ │ ├── sample_simple.md │ │ ├── sample_windows_line_breaks.md │ │ ├── sample_datasetcard_simple.md │ │ ├── sample_invalid_model_index.md │ │ └── sample_simple_model_index.md ├── test_testing_configuration.py ├── test_utils_runtime.py ├── README.md ├── test_windows.py ├── test_endpoint_helpers.py ├── testing_constants.py ├── test_utils_experimental.py ├── test_offline_utils.py ├── test_utils_chunks.py ├── test_login_utils.py ├── test_utils_sha.py ├── test_utils_datetime.py ├── test_utils_dotenv.py ├── test_utils_fixes.py ├── test_init_lazy_loading.py ├── test_utils_parsing.py ├── test_utils_validators.py └── test_utils_terminal.py ├── src └── huggingface_hub │ ├── py.typed │ ├── inference │ ├── __init__.py │ ├── _mcp │ │ ├── __init__.py │ │ ├── types.py │ │ └── constants.py │ ├── _generated │ │ ├── __init__.py │ │ └── types │ │ │ ├── audio_to_audio.py │ │ │ ├── depth_estimation.py │ │ │ ├── sentence_similarity.py │ │ │ ├── text_classification.py │ │ │ ├── summarization.py │ │ │ ├── zero_shot_image_classification.py │ │ │ ├── feature_extraction.py │ │ │ ├── audio_classification.py │ │ │ ├── image_classification.py │ │ │ ├── text2text_generation.py │ │ │ ├── zero_shot_object_detection.py │ │ │ ├── video_classification.py │ │ │ ├── fill_mask.py │ │ │ ├── visual_question_answering.py │ │ │ ├── zero_shot_classification.py │ │ │ ├── translation.py │ │ │ ├── text_to_video.py │ │ │ ├── text_to_image.py │ │ │ ├── token_classification.py │ │ │ ├── image_segmentation.py │ │ │ ├── object_detection.py │ │ │ ├── image_to_video.py │ │ │ ├── image_to_image.py │ │ │ └── table_question_answering.py │ └── _providers │ │ ├── cerebras.py │ │ ├── publicai.py │ │ ├── groq.py │ │ ├── ovhcloud.py │ │ ├── clarifai.py │ │ ├── zai_org.py │ │ ├── openai.py │ │ ├── fireworks_ai.py │ │ ├── scaleway.py │ │ ├── cohere.py │ │ ├── featherless_ai.py │ │ ├── nscale.py │ │ ├── hyperbolic.py │ │ ├── sambanova.py │ │ └── novita.py │ ├── cli │ ├── __init__.py │ ├── system.py │ └── hf.py │ ├── serialization │ └── __init__.py │ └── utils │ ├── insecure_hashlib.py │ ├── _pagination.py │ ├── _dotenv.py │ ├── _chunk_utils.py │ ├── sha.py │ ├── endpoint_helpers.py │ ├── _terminal.py │ └── _experimental.py ├── .github ├── conda │ ├── build.sh │ └── meta.yaml ├── workflows │ ├── trufflehog.yml │ ├── style-bot.yml │ ├── build_documentation.yaml │ ├── build_pr_documentation.yaml │ ├── upload_pr_documentation.yaml │ ├── build_repocard_examples.yaml │ ├── python-release.yml │ ├── python-release-hf.yml │ ├── release-conda.yml │ ├── claude.yml │ ├── python-quality.yml │ ├── model_card_consistency_reminder.yml │ └── check-installers.yml └── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature_request.md │ └── bug-report.yml ├── docs ├── source │ ├── tm │ │ └── _toctree.yml │ ├── hi │ │ └── _toctree.yml │ ├── ko │ │ ├── package_reference │ │ │ ├── overview.md │ │ │ ├── login.md │ │ │ ├── hf_file_system.md │ │ │ ├── collections.md │ │ │ ├── mixins.md │ │ │ ├── tensorboard.md │ │ │ ├── community.md │ │ │ ├── space_runtime.md │ │ │ ├── file_download.md │ │ │ ├── serialization.md │ │ │ ├── inference_client.md │ │ │ ├── cache.md │ │ │ ├── inference_endpoints.md │ │ │ ├── cards.md │ │ │ └── webhooks_server.md │ │ ├── guides │ │ │ └── search.md │ │ └── _toctree.yml │ ├── en │ │ ├── package_reference │ │ │ ├── overview.md │ │ │ ├── jobs.md │ │ │ ├── hf_file_system.md │ │ │ ├── mixins.md │ │ │ ├── authentication.md │ │ │ ├── collections.md │ │ │ ├── space_runtime.md │ │ │ ├── community.md │ │ │ ├── mcp.md │ │ │ ├── file_download.md │ │ │ ├── tensorboard.md │ │ │ ├── cache.md │ │ │ ├── cards.md │ │ │ ├── inference_client.md │ │ │ ├── inference_endpoints.md │ │ │ └── oauth.md │ │ ├── _redirects.yml │ │ └── guides │ │ │ └── search.md │ ├── fr │ │ └── _toctree.yml │ ├── cn │ │ ├── _toctree.yml │ │ ├── guides │ │ │ └── search.md │ │ └── index.md │ └── de │ │ ├── _toctree.yml │ │ └── guides │ │ └── search.md └── dev │ └── release.md ├── MANIFEST.in ├── utils ├── hf │ ├── hf │ │ └── __init__.py │ ├── setup.py │ └── README.md └── helpers.py ├── codecov.yml ├── .pre-commit-config.yaml ├── Makefile ├── pyproject.toml └── .gitignore /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/huggingface_hub/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/fixtures/empty.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_mcp/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/conda/build.sh: -------------------------------------------------------------------------------- 1 | $PYTHON setup.py install # Python command to install the script. 2 | -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_template.md: -------------------------------------------------------------------------------- 1 | --- 2 | {{card_data}} 3 | --- 4 | 5 | # {{ model_name | default("MyModelName", true)}} 6 | 7 | {{ some_data }} 8 | -------------------------------------------------------------------------------- /docs/source/tm/_toctree.yml: -------------------------------------------------------------------------------- 1 | - title: "Get started" 2 | sections: 3 | - local: index 4 | title: குறியீட்டு 5 | - local: installation 6 | title: நிறுவல் -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_datasetcard_template.md: -------------------------------------------------------------------------------- 1 | --- 2 | {card_data} 3 | --- 4 | 5 | # {{ pretty_name | default("Dataset Name", true)}} 6 | 7 | {{ some_data }} 8 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include src/huggingface_hub/templates/modelcard_template.md 2 | include src/huggingface_hub/templates/datasetcard_template.md 3 | include src/huggingface_hub/py.typed -------------------------------------------------------------------------------- /utils/hf/hf/__init__.py: -------------------------------------------------------------------------------- 1 | raise ImportError( 2 | "The 'hf' package only provides a CLI entry point. It cannot be imported. Please import 'huggingface_hub' instead." 3 | ) 4 | -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_no_metadata.md: -------------------------------------------------------------------------------- 1 | # MyCoolModel 2 | 3 | In this example, we don't have any metadata at the top of the file. In cases like these, `CardData` should be instantiated as empty. 4 | -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_invalid_card_data.md: -------------------------------------------------------------------------------- 1 | --- 2 | [] 3 | --- 4 | 5 | # invalid-card-data 6 | 7 | This card should fail when trying to load it in because the card data between the `---` is a list instead of a dict. 8 | -------------------------------------------------------------------------------- /docs/source/hi/_toctree.yml: -------------------------------------------------------------------------------- 1 | - title: "शुरू हो जाओ" 2 | sections: 3 | - local: index 4 | title: होम 5 | - local: quick-start 6 | title: जल्दी शुरू 7 | - local: installation 8 | title: इंस्टालेशन 9 | -------------------------------------------------------------------------------- /tests/test_testing_configuration.py: -------------------------------------------------------------------------------- 1 | from huggingface_hub import get_token 2 | 3 | 4 | def test_no_token_in_staging_environment(): 5 | """Make sure no token is set in test environment.""" 6 | assert get_token() is None 7 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/cerebras.py: -------------------------------------------------------------------------------- 1 | from ._common import BaseConversationalTask 2 | 3 | 4 | class CerebrasConversationalTask(BaseConversationalTask): 5 | def __init__(self): 6 | super().__init__(provider="cerebras", base_url="https://api.cerebras.ai") 7 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/publicai.py: -------------------------------------------------------------------------------- 1 | from ._common import BaseConversationalTask 2 | 3 | 4 | class PublicAIConversationalTask(BaseConversationalTask): 5 | def __init__(self): 6 | super().__init__(provider="publicai", base_url="https://api.publicai.co") 7 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/overview.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Overview[[overview]] 6 | 7 | 이 섹션은 `huggingface_hub` 클래스와 메서드에 대한 상세하고 기술적인 설명을 포함하고 있습니다. 8 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/overview.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Overview 6 | 7 | This section contains an exhaustive and technical description of `huggingface_hub` classes and methods. 8 | -------------------------------------------------------------------------------- /docs/source/fr/_toctree.yml: -------------------------------------------------------------------------------- 1 | - title: "Introduction" 2 | sections: 3 | - local: index 4 | title: Home 5 | - local: quick-start 6 | title: Démarrage rapide 7 | - local: installation 8 | title: Installation 9 | - title: "Guides" 10 | sections: 11 | - local: guides/integrations 12 | title: Intégrer dans une librarie -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_simple.md: -------------------------------------------------------------------------------- 1 | --- 2 | language: 3 | - en 4 | license: mit 5 | library_name: pytorch-lightning 6 | tags: 7 | - pytorch 8 | - image-classification 9 | datasets: 10 | - beans 11 | metrics: 12 | - acc 13 | --- 14 | 15 | # my-cool-model 16 | 17 | ## Model description 18 | 19 | You can embed local or remote images using `![](...)` 20 | -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_windows_line_breaks.md: -------------------------------------------------------------------------------- 1 | --- 2 | license: mit 3 | language: eo 4 | thumbnail: https://huggingface.co/blog/assets/01_how-to-train/EsperBERTo-thumbnail-v2.png 5 | widget: 6 | - text: "Jen la komenco de bela ." 7 | - text: "Uno du " 8 | - text: "Jen finiĝas bela ." 9 | --- 10 | 11 | # Hello old Windows line breaks 12 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: 2 | # https://docs.codecov.com/docs/pull-request-comments#requiring-changes 3 | require_changes: true 4 | # https://docs.codecov.com/docs/pull-request-comments#after_n_builds 5 | after_n_builds: 12 6 | 7 | coverage: 8 | status: 9 | # not in PRs 10 | patch: false 11 | project: false 12 | 13 | github_checks: 14 | annotations: false 15 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/groq.py: -------------------------------------------------------------------------------- 1 | from ._common import BaseConversationalTask 2 | 3 | 4 | class GroqConversationalTask(BaseConversationalTask): 5 | def __init__(self): 6 | super().__init__(provider="groq", base_url="https://api.groq.com") 7 | 8 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 9 | return "/openai/v1/chat/completions" 10 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/ovhcloud.py: -------------------------------------------------------------------------------- 1 | from huggingface_hub.inference._providers._common import BaseConversationalTask 2 | 3 | 4 | _PROVIDER = "ovhcloud" 5 | _BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net" 6 | 7 | 8 | class OVHcloudConversationalTask(BaseConversationalTask): 9 | def __init__(self): 10 | super().__init__(provider=_PROVIDER, base_url=_BASE_URL) 11 | -------------------------------------------------------------------------------- /.github/workflows/trufflehog.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | 4 | name: Secret Leaks 5 | 6 | jobs: 7 | trufflehog: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout code 11 | uses: actions/checkout@v4 12 | with: 13 | fetch-depth: 0 14 | - name: Secret Scanning 15 | uses: trufflesecurity/trufflehog@main 16 | with: 17 | extra_args: --results=verified,unknown -------------------------------------------------------------------------------- /.github/workflows/style-bot.yml: -------------------------------------------------------------------------------- 1 | name: Style Bot 2 | 3 | on: 4 | issue_comment: 5 | types: [created] 6 | 7 | permissions: 8 | contents: write 9 | pull-requests: write 10 | 11 | jobs: 12 | style: 13 | uses: ./.github/workflows/style-bot-action.yml 14 | with: 15 | python_quality_dependencies: "[quality]" 16 | style_command_type: "style_only" 17 | secrets: 18 | bot_token: ${{ secrets.HF_STYLE_BOT_ACTION }} -------------------------------------------------------------------------------- /tests/test_utils_runtime.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from huggingface_hub.utils._runtime import is_google_colab, is_notebook 4 | 5 | 6 | class TestRuntimeUtils(unittest.TestCase): 7 | def test_is_notebook(self) -> None: 8 | """Test `is_notebook`.""" 9 | self.assertFalse(is_notebook()) 10 | 11 | def test_is_google_colab(self) -> None: 12 | """Test `is_google_colab`.""" 13 | self.assertFalse(is_google_colab()) 14 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/clarifai.py: -------------------------------------------------------------------------------- 1 | from ._common import BaseConversationalTask 2 | 3 | 4 | _PROVIDER = "clarifai" 5 | _BASE_URL = "https://api.clarifai.com" 6 | 7 | 8 | class ClarifaiConversationalTask(BaseConversationalTask): 9 | def __init__(self): 10 | super().__init__(provider=_PROVIDER, base_url=_BASE_URL) 11 | 12 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 13 | return "/v2/ext/openai/v1/chat/completions" 14 | -------------------------------------------------------------------------------- /.github/workflows/build_documentation.yaml: -------------------------------------------------------------------------------- 1 | name: Build documentation 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - doc-builder* 8 | - v*-release 9 | 10 | jobs: 11 | build: 12 | uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main 13 | with: 14 | commit_sha: ${{ github.sha }} 15 | package: huggingface_hub 16 | languages: cn de fr en hi ko tm 17 | secrets: 18 | hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} 19 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Running Tests 2 | 3 | To run the test suite, please perform the following from the root directory of this repository: 4 | 5 | 1. `pip install -e .[testing]` 6 | 7 | This will install all the testing requirements. 8 | 2. `sudo apt-get update; sudo apt-get install git-lfs -y` 9 | 10 | We need git-lfs on our system to run some of the tests 11 | 12 | 3. `pytest ./tests/` 13 | 14 | We need to set an environmental variable to make sure the private API tests can run. 15 | -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_datasetcard_simple.md: -------------------------------------------------------------------------------- 1 | --- 2 | language: 3 | - en 4 | license: 5 | - bsd-3-clause 6 | annotations_creators: 7 | - crowdsourced 8 | - expert-generated 9 | language_creators: 10 | - found 11 | multilinguality: 12 | - monolingual 13 | size_categories: 14 | - n<1K 15 | task_categories: 16 | - image-segmentation 17 | task_ids: 18 | - semantic-segmentation 19 | pretty_name: Sample Segmentation 20 | --- 21 | 22 | # Dataset Card for Sample Segmentation 23 | 24 | This is a sample dataset card for a semantic segmentation dataset. 25 | -------------------------------------------------------------------------------- /.github/workflows/build_pr_documentation.yaml: -------------------------------------------------------------------------------- 1 | name: Build PR Documentation 2 | 3 | on: 4 | pull_request 5 | 6 | concurrency: 7 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 8 | cancel-in-progress: true 9 | 10 | jobs: 11 | build: 12 | uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main 13 | with: 14 | commit_sha: ${{ github.event.pull_request.head.sha }} 15 | pr_number: ${{ github.event.number }} 16 | package: huggingface_hub 17 | languages: cn de fr en hi ko tm 18 | -------------------------------------------------------------------------------- /.github/workflows/upload_pr_documentation.yaml: -------------------------------------------------------------------------------- 1 | name: Upload PR Documentation 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Build PR Documentation"] 6 | types: 7 | - completed 8 | 9 | jobs: 10 | build: 11 | uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main 12 | with: 13 | package_name: huggingface_hub 14 | secrets: 15 | hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} 16 | comment_bot_app_id: ${{ secrets.COMMENT_BOT_APP_ID }} 17 | comment_bot_secret_pem: ${{ secrets.COMMENT_BOT_SECRET_PEM }} -------------------------------------------------------------------------------- /tests/test_windows.py: -------------------------------------------------------------------------------- 1 | """Contains tests that are specific to windows machines.""" 2 | 3 | import os 4 | import unittest 5 | 6 | from huggingface_hub.file_download import are_symlinks_supported 7 | 8 | 9 | def require_windows(test_case): 10 | if os.name != "nt": 11 | return unittest.skip("test of git lfs workflow")(test_case) 12 | else: 13 | return test_case 14 | 15 | 16 | @require_windows 17 | class WindowsTests(unittest.TestCase): 18 | def test_are_symlink_supported(self) -> None: 19 | self.assertFalse(are_symlinks_supported()) 20 | -------------------------------------------------------------------------------- /docs/source/cn/_toctree.yml: -------------------------------------------------------------------------------- 1 | - title: "Starten" 2 | sections: 3 | - local: index 4 | title: 索引 5 | - local: quick-start 6 | title: 快速入门指南 7 | - local: installation 8 | title: 安装 9 | - title: "guides" 10 | sections: 11 | - local: guides/repository 12 | title: 储存库 13 | - local: guides/search 14 | title: 搜索 15 | - local: guides/collections 16 | title: 集合 17 | - local: guides/community 18 | title: 社区 19 | - local: guides/overview 20 | title: 概览 21 | - local: guides/hf_file_system 22 | title: Hugging Face 文件系统 23 | 24 | -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_invalid_model_index.md: -------------------------------------------------------------------------------- 1 | --- 2 | language: en 3 | license: mit 4 | library_name: timm 5 | tags: 6 | - pytorch 7 | - image-classification 8 | datasets: 9 | - beans 10 | metrics: 11 | - acc 12 | model-index: 13 | - name: my-cool-model 14 | results: 15 | - task: 16 | type: image-classification 17 | metrics: 18 | - type: acc 19 | value: 0.9 20 | --- 21 | 22 | # Invalid Model Index 23 | 24 | In this example, the model index does not define a dataset field. In this case, we'll still initialize CardData, but will leave model-index/eval_results out of it. 25 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.5.0 4 | hooks: 5 | - id: check-yaml 6 | exclude: .github/conda/meta.yaml|tests/cassettes/ 7 | - id: end-of-file-fixer 8 | - id: trailing-whitespace 9 | - id: check-case-conflict 10 | - id: check-merge-conflict 11 | - repo: https://github.com/charliermarsh/ruff-pre-commit # https://github.com/charliermarsh/ruff#usage 12 | rev: v0.1.13 13 | hooks: 14 | - id: ruff 15 | - repo: https://github.com/pappasam/toml-sort 16 | rev: v0.23.1 17 | hooks: 18 | - id: toml-sort-fix 19 | -------------------------------------------------------------------------------- /tests/test_endpoint_helpers.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/login.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 로그인 및 로그아웃[[login-and-logout]] 6 | 7 | `huggingface_hub` 라이브러리를 사용하면 사용자의 기기를 Hub에 프로그래밍적으로 로그인/로그아웃할 수 있습니다. 8 | 9 | 인증에 대한 자세한 내용은 [이 섹션](../quick-start#authentication)을 확인하세요. 10 | 11 | ## 로그인[[login]] 12 | 13 | [[autodoc]] login 14 | 15 | ## 인터프리터_로그인[[interpreter_login]] 16 | 17 | [[autodoc]] interpreter_login 18 | 19 | ## 노트북_로그인[[notebook_login]] 20 | 21 | [[autodoc]] notebook_login 22 | 23 | ## 로그아웃[[logout]] 24 | 25 | [[autodoc]] logout 26 | -------------------------------------------------------------------------------- /src/huggingface_hub/cli/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2025 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: api-inference-community 4 | url: https://github.com/huggingface/api-inference-community/issues 5 | about: For all issues related to the inference API 6 | - name: Website Related 7 | url: https://github.com/huggingface/hub-docs/issues 8 | about: Feature requests and bug reports related to the website 9 | - name: Forum 10 | url: https://discuss.huggingface.co/ 11 | about: General usage questions and community discussions 12 | - name: Blank issue 13 | url: https://github.com/huggingface/huggingface_hub/issues/new 14 | about: Please note that the Forum is in most places the right place for discussions 15 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F680 Feature request" 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/zai_org.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | from huggingface_hub.inference._providers._common import BaseConversationalTask 4 | 5 | 6 | class ZaiConversationalTask(BaseConversationalTask): 7 | def __init__(self): 8 | super().__init__(provider="zai-org", base_url="https://api.z.ai") 9 | 10 | def _prepare_headers(self, headers: Dict, api_key: str) -> Dict[str, Any]: 11 | headers = super()._prepare_headers(headers, api_key) 12 | headers["Accept-Language"] = "en-US,en" 13 | headers["x-source-channel"] = "hugging_face" 14 | return headers 15 | 16 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 17 | return "/api/paas/v4/chat/completions" 18 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/jobs.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Jobs 6 | 7 | Check the [`HfApi`] documentation page for the reference of methods to manage your Jobs on the Hub. 8 | 9 | - Run a Job: [`run_job`] 10 | - Fetch logs: [`fetch_job_logs`] 11 | - Inspect Job: [`inspect_job`] 12 | - List Jobs: [`list_jobs`] 13 | - Cancel Job: [`cancel_job`] 14 | - Run a UV Job: [`run_uv_job`] 15 | 16 | ## Data structures 17 | 18 | ### JobInfo 19 | 20 | [[autodoc]] JobInfo 21 | 22 | ### JobOwner 23 | 24 | [[autodoc]] JobOwner 25 | 26 | 27 | ### JobStage 28 | 29 | [[autodoc]] JobStage 30 | 31 | ### JobStatus 32 | 33 | [[autodoc]] JobStatus 34 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/hf_file_system.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Filesystem API 6 | 7 | The `HfFileSystem` class provides a pythonic file interface to the Hugging Face Hub based on [`fsspec`](https://filesystem-spec.readthedocs.io/en/latest/). 8 | 9 | ## HfFileSystem 10 | 11 | `HfFileSystem` is based on [fsspec](https://filesystem-spec.readthedocs.io/en/latest/), so it is compatible with most of the APIs that it offers. For more details, check out [our guide](../guides/hf_file_system) and fsspec's [API Reference](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem). 12 | 13 | [[autodoc]] HfFileSystem 14 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/hf_file_system.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 파일 시스템 API[[filesystem-api]] 6 | 7 | [`HfFileSystem`] 클래스는 [`fsspec`](https://filesystem-spec.readthedocs.io/en/latest/)을 기반으로 Hugging Face Hub에 Python 파일 인터페이스를 제공합니다. 8 | 9 | ## [HfFileSystem](Hf파일시스템) 10 | 11 | [`HfFileSystem`]은 [`fsspec`](https://filesystem-spec.readthedocs.io/en/latest/)을 기반으로 하므로 제공되는 대부분의 API와 호환됩니다. 자세한 내용은 [가이드](../guides/hf_file_system) 및 fsspec의 [API 레퍼런스](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem)를 확인하세요. 12 | 13 | [[autodoc]] HfFileSystem 14 | - __init__ 15 | - resolve_path 16 | - ls 17 | -------------------------------------------------------------------------------- /.github/workflows/build_repocard_examples.yaml: -------------------------------------------------------------------------------- 1 | name: Build and push Model Card and Dataset Card examples 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | env: 9 | HF_TOKEN: ${{ secrets.HUGGINGFACE_PRODUCTION_USER_TOKEN }} 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: 3.13 21 | 22 | # Install dependencies 23 | - name: Configure and install dependencies 24 | run: | 25 | pip install --upgrade pip 26 | pip install . 27 | pip install Jinja2 28 | 29 | # Push cards 30 | - name: Push cards 31 | run: python utils/push_repocard_examples.py 32 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/collections.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 컬렉션 관리[[managing-collections]] 6 | 7 | Hub에서 Space를 관리하는 메소드에 대한 자세한 설명은 [`HfApi`] 페이지를 확인하세요. 8 | 9 | - 컬렉션 내용 가져오기: [`get_collection`] 10 | - 새로운 컬렉션 생성: [`create_collection`] 11 | - 컬렉션 업데이트: [`update_collection_metadata`] 12 | - 컬렉션 삭제: [`delete_collection`] 13 | - 컬렉션에 항목 추가: [`add_collection_item`] 14 | - 컬렉션의 항목 업데이트: [`update_collection_item`] 15 | - 컬렉션에서 항목 제거: [`delete_collection_item`] 16 | 17 | 18 | ### Collection[[huggingface_hub.Collection]] 19 | 20 | [[autodoc]] Collection 21 | 22 | ### CollectionItem[[huggingface_hub.CollectionItem]] 23 | 24 | [[autodoc]] CollectionItem 25 | -------------------------------------------------------------------------------- /.github/workflows/python-release.yml: -------------------------------------------------------------------------------- 1 | name: Python release 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | env: 9 | PYPI_TOKEN: ${{ secrets.PYPI_TOKEN_DIST_HUGGINGFACE_HUB }} 10 | 11 | jobs: 12 | python_release: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: 3.9 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install setuptools wheel 25 | 26 | - run: python setup.py sdist bdist_wheel 27 | 28 | - run: | 29 | pip install twine 30 | 31 | - name: Upload to PyPi 32 | run: | 33 | twine upload dist/* -u __token__ -p "$PYPI_TOKEN" 34 | -------------------------------------------------------------------------------- /docs/source/en/_redirects.yml: -------------------------------------------------------------------------------- 1 | # Move "how-to" pages to the guides/ folder 2 | how-to-cache: guides/manage-cache 3 | how-to-discussions-and-pull-requests: guides/community 4 | how-to-downstream: guides/download 5 | how-to-inference: guides/inference 6 | how-to-manage: guides/repository 7 | how-to-model-cards: guides/model-cards 8 | how-to-upstream: guides/upload 9 | search-the-hub: guides/search 10 | guides/manage_spaces: guides/manage-spaces 11 | package_reference/inference_api: package_reference/inference_client 12 | package_reference/login: package_reference/authentication 13 | # Alias for hf-transfer description 14 | hf_transfer: package_reference/environment_variables#hfhubenablehftransfer 15 | 16 | # Alias for auth 17 | authentication: quick-start#authentication 18 | 19 | # Rename webhooks_server to webhooks 20 | guides/webhooks_server: guides/webhooks 21 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/mixins.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Mixins & serialization methods 6 | 7 | ## Mixins 8 | 9 | The `huggingface_hub` library offers a range of mixins that can be used as a parent class for your objects, in order to 10 | provide simple uploading and downloading functions. Check out our [integration guide](../guides/integrations) to learn 11 | how to integrate any ML framework with the Hub. 12 | 13 | ### Generic 14 | 15 | [[autodoc]] ModelHubMixin 16 | - all 17 | - _save_pretrained 18 | - _from_pretrained 19 | 20 | ### PyTorch 21 | 22 | [[autodoc]] PyTorchModelHubMixin 23 | 24 | ### Fastai 25 | 26 | [[autodoc]] from_pretrained_fastai 27 | 28 | [[autodoc]] push_to_hub_fastai 29 | -------------------------------------------------------------------------------- /.github/conda/meta.yaml: -------------------------------------------------------------------------------- 1 | {% set name = "huggingface_hub" %} 2 | 3 | package: 4 | name: "{{ name|lower }}" 5 | version: "{{ HUB_VERSION }}" 6 | 7 | source: 8 | path: ../../ 9 | 10 | build: 11 | noarch: python 12 | 13 | requirements: 14 | host: 15 | - python 16 | - pip 17 | - fsspec 18 | - filelock 19 | - httpx 20 | - tqdm 21 | - typing-extensions 22 | - packaging 23 | - pyyaml 24 | - hf-xet 25 | run: 26 | - python 27 | - pip 28 | - filelock 29 | - httpx 30 | - tqdm 31 | - typing-extensions 32 | - packaging 33 | - pyyaml 34 | - hf-xet 35 | test: 36 | imports: 37 | - huggingface_hub 38 | 39 | about: 40 | home: https://huggingface.co 41 | license: Apache License 2.0 42 | license_file: LICENSE 43 | summary: "Client library to download and publish models and other files on the huggingface.co hub" 44 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/mixins.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 믹스인 & 직렬화 메소드[[mixins--serialization-methods]] 6 | 7 | ## 믹스인[[mixins]] 8 | 9 | `huggingface_hub` 라이브러리는 객체에 함수들의 업로드 및 다운로드 기능을 손쉽게 제공하기 위해서, 부모 클래스로 사용될 수 있는 다양한 믹스인을 제공합니다. 10 | ML 프레임워크를 Hub와 통합하는 방법은 [통합 가이드](../guides/integrations)를 통해 배울 수 있습니다. 11 | 12 | ### 제네릭[[huggingface_hub.ModelHubMixin]] 13 | 14 | [[autodoc]] ModelHubMixin 15 | - all 16 | - _save_pretrained 17 | - _from_pretrained 18 | 19 | ### PyTorch[[huggingface_hub.PyTorchModelHubMixin]] 20 | 21 | [[autodoc]] PyTorchModelHubMixin 22 | 23 | ### Fastai[[huggingface_hub.from_pretrained_fastai]] 24 | 25 | [[autodoc]] from_pretrained_fastai 26 | 27 | [[autodoc]] push_to_hub_fastai 28 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/authentication.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Authentication 6 | 7 | The `huggingface_hub` library allows users to programmatically manage authentication to the Hub. This includes logging in, logging out, switching between tokens, and listing available tokens. 8 | 9 | For more details about authentication, check out [this section](../quick-start#authentication). 10 | 11 | ## login 12 | 13 | [[autodoc]] login 14 | 15 | ## interpreter_login 16 | 17 | [[autodoc]] interpreter_login 18 | 19 | ## notebook_login 20 | 21 | [[autodoc]] notebook_login 22 | 23 | ## logout 24 | 25 | [[autodoc]] logout 26 | 27 | ## auth_switch 28 | 29 | [[autodoc]] auth_switch 30 | 31 | ## auth_list 32 | 33 | [[autodoc]] auth_list 34 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/collections.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Managing collections 6 | 7 | Check out the [`HfApi`] documentation page for the reference of methods to manage your Space on the Hub. 8 | 9 | - Get collection content: [`get_collection`] 10 | - Create new collection: [`create_collection`] 11 | - Update a collection: [`update_collection_metadata`] 12 | - Delete a collection: [`delete_collection`] 13 | - Add an item to a collection: [`add_collection_item`] 14 | - Update an item in a collection: [`update_collection_item`] 15 | - Remove an item from a collection: [`delete_collection_item`] 16 | 17 | 18 | ### Collection 19 | 20 | [[autodoc]] Collection 21 | 22 | ### CollectionItem 23 | 24 | [[autodoc]] CollectionItem 25 | -------------------------------------------------------------------------------- /tests/testing_constants.py: -------------------------------------------------------------------------------- 1 | USER = "__DUMMY_TRANSFORMERS_USER__" 2 | FULL_NAME = "Dummy User" 3 | PASS = "__DUMMY_TRANSFORMERS_PASS__" 4 | 5 | # Not critical, only usable on the sandboxed CI instance. 6 | TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL" 7 | 8 | # Used to create repos that we don't own (example: for gated repo) 9 | # Token is not critical. Also public in https://github.com/huggingface/datasets-server 10 | OTHER_USER = "DVUser" 11 | OTHER_TOKEN = "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" 12 | 13 | # Used to test enterprise features, typically creating private repos by default 14 | ENTERPRISE_USER = "EnterpriseAdmin" 15 | ENTERPRISE_ORG = "EnterpriseOrgPrivate" 16 | ENTERPRISE_TOKEN = "hf_enterprise_admin_token" 17 | 18 | ENDPOINT_PRODUCTION = "https://huggingface.co" 19 | ENDPOINT_STAGING = "https://hub-ci.huggingface.co" 20 | 21 | ENDPOINT_PRODUCTION_URL_SCHEME = ENDPOINT_PRODUCTION + "/{repo_id}/resolve/{revision}/{filename}" 22 | -------------------------------------------------------------------------------- /tests/fixtures/cards/sample_simple_model_index.md: -------------------------------------------------------------------------------- 1 | --- 2 | language: en 3 | license: mit 4 | library_name: timm 5 | tags: 6 | - pytorch 7 | - image-classification 8 | datasets: 9 | - beans 10 | metrics: 11 | - accuracy 12 | model-index: 13 | - name: my-cool-model 14 | results: 15 | - task: 16 | type: image-classification 17 | dataset: 18 | type: beans 19 | name: Beans 20 | metrics: 21 | - type: accuracy 22 | value: 0.9 23 | - task: 24 | type: image-classification 25 | dataset: 26 | type: beans 27 | name: Beans 28 | config: default 29 | split: test 30 | revision: 5503434ddd753f426f4b38109466949a1217c2bb 31 | args: 32 | date: 20220120 33 | metrics: 34 | - type: f1 35 | value: 0.66 36 | --- 37 | 38 | # my-cool-model 39 | 40 | ## Model description 41 | 42 | This is a test model card with multiple evaluations across different (dataset, metric) configurations. 43 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/tensorboard.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # TensorBoard 로거[[tensorboard-logger]] 6 | 7 | TensorBoard는 기계학습 실험을 위한 시각화 도구입니다. 주로 손실 및 정확도와 같은 지표를 추적 및 시각화하고, 모델 그래프와 8 | 히스토그램을 보여주고, 이미지를 표시하는 등 다양한 기능을 제공합니다. 또한 TensorBoard는 Hugging Face Hub와 잘 통합되어 있습니다. 9 | `tfevents` 같은 TensorBoard 추적을 Hub에 푸시하면 Hub는 이를 자동으로 감지하여 시각화 인스턴스를 시작합니다. 10 | TensorBoard와 Hub의 통합에 대한 자세한 정보는 [가이드](https://huggingface.co/docs/hub/tensorboard)를 확인하세요. 11 | 12 | 이 통합을 위해, `huggingface_hub`는 로그를 Hub로 푸시하기 위한 사용자 정의 로거를 제공합니다. 13 | 이 로거는 추가적인 코드 없이 [SummaryWriter](https://tensorboardx.readthedocs.io/en/latest/tensorboard.html)의 대체제로 사용될 수 있습니다. 14 | 추적은 계속해서 로컬에 저장되며 백그라운드 작업이 일정한 시간마다 Hub에 푸시하는 형태로 동작합니다. 15 | 16 | ## HFSummaryWriter[[huggingface_hub.HFSummaryWriter]] 17 | 18 | [[autodoc]] HFSummaryWriter 19 | -------------------------------------------------------------------------------- /.github/workflows/python-release-hf.yml: -------------------------------------------------------------------------------- 1 | name: hf PyPI release 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | workflow_dispatch: 8 | inputs: 9 | tag: 10 | description: "Tag to release (e.g., v0.30.0)" 11 | required: true 12 | 13 | env: 14 | PYPI_TOKEN: ${{ secrets.PYPI_TOKEN_DIST_HF }} 15 | 16 | jobs: 17 | python_release: 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/checkout@v5 22 | - name: Set up Python 23 | uses: actions/setup-python@v6 24 | with: 25 | python-version: '3.10' 26 | - name: Install dependencies 27 | run: | 28 | pip install --upgrade pip 29 | pip install setuptools wheel 30 | 31 | - run: cd utils/hf && python setup.py sdist bdist_wheel 32 | 33 | - run: | 34 | pip install twine 35 | 36 | - name: Upload to PyPI 37 | run: | 38 | cd utils/hf && twine upload dist/* -u __token__ -p "$PYPI_TOKEN" 39 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/space_runtime.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Managing your Space runtime 6 | 7 | Check the [`HfApi`] documentation page for the reference of methods to manage your Space on the Hub. 8 | 9 | - Duplicate a Space: [`duplicate_space`] 10 | - Fetch current runtime: [`get_space_runtime`] 11 | - Manage secrets: [`add_space_secret`] and [`delete_space_secret`] 12 | - Manage hardware: [`request_space_hardware`] 13 | - Manage state: [`pause_space`], [`restart_space`], [`set_space_sleep_time`] 14 | 15 | ## Data structures 16 | 17 | ### SpaceRuntime 18 | 19 | [[autodoc]] SpaceRuntime 20 | 21 | ### SpaceHardware 22 | 23 | [[autodoc]] SpaceHardware 24 | 25 | ### SpaceStage 26 | 27 | [[autodoc]] SpaceStage 28 | 29 | ### SpaceStorage 30 | 31 | [[autodoc]] SpaceStorage 32 | 33 | ### SpaceVariable 34 | 35 | [[autodoc]] SpaceVariable 36 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/community.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Interacting with Discussions and Pull Requests 6 | 7 | Check the [`HfApi`] documentation page for the reference of methods enabling 8 | interaction with Pull Requests and Discussions on the Hub. 9 | 10 | - [`get_repo_discussions`] 11 | - [`get_discussion_details`] 12 | - [`create_discussion`] 13 | - [`create_pull_request`] 14 | - [`rename_discussion`] 15 | - [`comment_discussion`] 16 | - [`edit_discussion_comment`] 17 | - [`change_discussion_status`] 18 | - [`merge_pull_request`] 19 | 20 | ## Data structures 21 | 22 | [[autodoc]] Discussion 23 | 24 | [[autodoc]] DiscussionWithDetails 25 | 26 | [[autodoc]] DiscussionEvent 27 | 28 | [[autodoc]] DiscussionComment 29 | 30 | [[autodoc]] DiscussionStatusChange 31 | 32 | [[autodoc]] DiscussionCommit 33 | 34 | [[autodoc]] DiscussionTitleChange 35 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/mcp.md: -------------------------------------------------------------------------------- 1 | # MCP Client 2 | 3 | The `huggingface_hub` library now includes an [`MCPClient`], designed to empower Large Language Models (LLMs) with the ability to interact with external Tools via the [Model Context Protocol](https://modelcontextprotocol.io) (MCP). This client extends an [`AsyncInferenceClient`] to seamlessly integrate Tool usage. 4 | 5 | The [`MCPClient`] connects to MCP servers (local `stdio` scripts or remote `http`/`sse` services) that expose tools. It feeds these tools to an LLM (via [`AsyncInferenceClient`]). If the LLM decides to use a tool, [`MCPClient`] manages the execution request to the MCP server and relays the Tool's output back to the LLM, often streaming results in real-time. 6 | 7 | We also provide a higher-level [`Agent`] class. This 'Tiny Agent' simplifies creating conversational Agents by managing the chat loop and state, acting as a wrapper around [`MCPClient`]. 8 | 9 | 10 | 11 | ## MCP Client 12 | 13 | [[autodoc]] MCPClient 14 | 15 | ## Agent 16 | 17 | [[autodoc]] Agent -------------------------------------------------------------------------------- /tests/test_utils_experimental.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import warnings 3 | from unittest.mock import patch 4 | 5 | from huggingface_hub.utils import experimental 6 | 7 | 8 | @experimental 9 | def dummy_function(): 10 | return "success" 11 | 12 | 13 | class TestExperimentalFlag(unittest.TestCase): 14 | def test_experimental_warning(self): 15 | with patch("huggingface_hub.constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING", False): 16 | with warnings.catch_warnings(record=True) as w: 17 | warnings.simplefilter("always") 18 | self.assertEqual(dummy_function(), "success") 19 | self.assertEqual(len(w), 1) 20 | 21 | def test_experimental_no_warning(self): 22 | with patch("huggingface_hub.constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING", True): 23 | with warnings.catch_warnings(record=True) as w: 24 | warnings.simplefilter("always") 25 | self.assertEqual(dummy_function(), "success") 26 | self.assertEqual(len(w), 0) 27 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/community.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Discussions 및 Pull Requests를 이용하여 상호작용하기[[interacting-with-discussions-and-pull-requests]] 6 | 7 | Hub에서 Discussions 및 Pull Requests를 이용하여 상호 작용할 수 있는 방법에 대해 참조하고자 한다면 [`HfApi`] 문서 페이지를 확인하세요. 8 | 9 | - [`get_repo_discussions`] 10 | - [`get_discussion_details`] 11 | - [`create_discussion`] 12 | - [`create_pull_request`] 13 | - [`rename_discussion`] 14 | - [`comment_discussion`] 15 | - [`edit_discussion_comment`] 16 | - [`change_discussion_status`] 17 | - [`merge_pull_request`] 18 | 19 | ## 데이터 구조[[huggingface_hub.Discussion]] 20 | 21 | [[autodoc]] Discussion 22 | 23 | [[autodoc]] DiscussionWithDetails 24 | 25 | [[autodoc]] DiscussionEvent 26 | 27 | [[autodoc]] DiscussionComment 28 | 29 | [[autodoc]] DiscussionStatusChange 30 | 31 | [[autodoc]] DiscussionCommit 32 | 33 | [[autodoc]] DiscussionTitleChange 34 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/audio_to_audio.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class AudioToAudioInput(BaseInferenceType): 13 | """Inputs for Audio to Audio inference""" 14 | 15 | inputs: Any 16 | """The input audio data""" 17 | 18 | 19 | @dataclass_with_extra 20 | class AudioToAudioOutputElement(BaseInferenceType): 21 | """Outputs of inference for the Audio To Audio task 22 | A generated audio file with its label. 23 | """ 24 | 25 | blob: Any 26 | """The generated audio file.""" 27 | content_type: str 28 | """The content type of audio file.""" 29 | label: str 30 | """The label of the audio file.""" 31 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/depth_estimation.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class DepthEstimationInput(BaseInferenceType): 13 | """Inputs for Depth Estimation inference""" 14 | 15 | inputs: Any 16 | """The input image data""" 17 | parameters: Optional[dict[str, Any]] = None 18 | """Additional inference parameters for Depth Estimation""" 19 | 20 | 21 | @dataclass_with_extra 22 | class DepthEstimationOutput(BaseInferenceType): 23 | """Outputs of inference for the Depth Estimation task""" 24 | 25 | depth: Any 26 | """The predicted depth as an image""" 27 | predicted_depth: Any 28 | """The predicted depth as a tensor""" 29 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/space_runtime.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Space 런타임 관리[[managing-your-space-runtime]] 6 | 7 | Hub의 Space를 관리하는 메소드에 대한 자세한 설명은 [`HfApi`]페이지를 확인하세요. 8 | 9 | - Space 복제: [`duplicate_space`] 10 | - 현재 런타임 가져오기: [`get_space_runtime`] 11 | - 보안 관리: [`add_space_secret`] 및 [`delete_space_secret`] 12 | - 하드웨어 관리: [`request_space_hardware`] 13 | - 상태 관리: [`pause_space`], [`restart_space`], [`set_space_sleep_time`] 14 | 15 | ## 데이터 구조[[data-structures]] 16 | 17 | ### SpaceRuntime[[huggingface_hub.SpaceRuntime]] 18 | 19 | [[autodoc]] SpaceRuntime 20 | 21 | ### SpaceHardware[[huggingface_hub.SpaceHardware]] 22 | 23 | [[autodoc]] SpaceHardware 24 | 25 | ### SpaceStage[[huggingface_hub.SpaceStage]] 26 | 27 | [[autodoc]] SpaceStage 28 | 29 | ### SpaceStorage[[huggingface_hub.SpaceStorage]] 30 | 31 | [[autodoc]] SpaceStorage 32 | 33 | ### SpaceVariable[[huggingface_hub.SpaceVariable]] 34 | 35 | [[autodoc]] SpaceVariable -------------------------------------------------------------------------------- /src/huggingface_hub/serialization/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ruff: noqa: F401 15 | """Contains helpers to serialize tensors.""" 16 | 17 | from ._base import StateDictSplit, split_state_dict_into_shards_factory 18 | from ._torch import ( 19 | get_torch_storage_id, 20 | get_torch_storage_size, 21 | load_state_dict_from_file, 22 | load_torch_model, 23 | save_torch_model, 24 | save_torch_state_dict, 25 | split_torch_state_dict_into_shards, 26 | ) 27 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: quality style test 2 | 3 | 4 | check_dirs := src tests utils setup.py 5 | 6 | 7 | quality: 8 | ruff check $(check_dirs) # linter 9 | ruff format --check $(check_dirs) # formatter 10 | python utils/check_inference_input_params.py 11 | python utils/check_static_imports.py 12 | python utils/check_all_variable.py 13 | python utils/generate_async_inference_client.py 14 | python utils/generate_cli_reference.py 15 | 16 | ty check src 17 | 18 | style: 19 | ruff format $(check_dirs) # formatter 20 | ruff check --fix $(check_dirs) # linter 21 | python utils/check_static_imports.py --update 22 | python utils/check_all_variable.py --update 23 | python utils/generate_async_inference_client.py --update 24 | python utils/generate_cli_reference.py --update 25 | 26 | inference_check: 27 | python utils/generate_inference_types.py 28 | python utils/check_task_parameters.py 29 | 30 | inference_update: 31 | python utils/generate_inference_types.py --update 32 | python utils/check_task_parameters.py --update 33 | 34 | 35 | repocard: 36 | python utils/push_repocard_examples.py 37 | 38 | 39 | test: 40 | pytest ./tests/ 41 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/file_download.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Downloading files 6 | 7 | ## Download a single file 8 | 9 | ### hf_hub_download 10 | 11 | [[autodoc]] huggingface_hub.hf_hub_download 12 | 13 | ### hf_hub_url 14 | 15 | [[autodoc]] huggingface_hub.hf_hub_url 16 | 17 | ## Download a snapshot of the repo 18 | 19 | [[autodoc]] huggingface_hub.snapshot_download 20 | 21 | ## Get metadata about a file 22 | 23 | ### get_hf_file_metadata 24 | 25 | [[autodoc]] huggingface_hub.get_hf_file_metadata 26 | 27 | ### HfFileMetadata 28 | 29 | [[autodoc]] huggingface_hub.HfFileMetadata 30 | 31 | ## Caching 32 | 33 | The methods displayed above are designed to work with a caching system that prevents 34 | re-downloading files. The caching system was updated in v0.8.0 to become the central 35 | cache-system shared across libraries that depend on the Hub. 36 | 37 | Read the [cache-system guide](../guides/manage-cache) for a detailed presentation of caching at HF. 38 | -------------------------------------------------------------------------------- /docs/source/de/_toctree.yml: -------------------------------------------------------------------------------- 1 | - title: "Starten" 2 | sections: 3 | - local: index 4 | title: Home 5 | - local: quick-start 6 | title: Kurzanleitung 7 | - local: installation 8 | title: Installation 9 | - title: "Anleitungen" 10 | sections: 11 | - local: guides/overview 12 | title: Übersicht 13 | - local: guides/download 14 | title: Dateien herunterladen 15 | - local: guides/upload 16 | title: Dateien hochladen 17 | - local: guides/hf_file_system 18 | title: HfFileSystem 19 | - local: guides/repository 20 | title: Repository 21 | - local: guides/search 22 | title: Suche 23 | - local: guides/inference 24 | title: Inferenz 25 | - local: guides/community 26 | title: Community-Tab 27 | - local: guides/manage-cache 28 | title: Cache 29 | - local: guides/model-cards 30 | title: Model Cards 31 | - local: guides/manage-spaces 32 | title: Verwalten Ihres Spaces 33 | - local: guides/integrations 34 | title: Integrieren einer Bibliothek 35 | - local: guides/webhooks_server 36 | title: Webhooks server 37 | -------------------------------------------------------------------------------- /src/huggingface_hub/cli/system.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """Contains commands to print information about the environment and version. 15 | 16 | Usage: 17 | hf env 18 | hf version 19 | """ 20 | 21 | from huggingface_hub import __version__ 22 | 23 | from ..utils import dump_environment_info 24 | 25 | 26 | def env() -> None: 27 | """Print information about the environment.""" 28 | dump_environment_info() 29 | 30 | 31 | def version() -> None: 32 | """Print CLI version.""" 33 | print(__version__) 34 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_mcp/types.py: -------------------------------------------------------------------------------- 1 | from typing import Literal, TypedDict, Union 2 | 3 | from typing_extensions import NotRequired 4 | 5 | 6 | class InputConfig(TypedDict, total=False): 7 | id: str 8 | description: str 9 | type: str 10 | password: bool 11 | 12 | 13 | class StdioServerConfig(TypedDict): 14 | type: Literal["stdio"] 15 | command: str 16 | args: list[str] 17 | env: dict[str, str] 18 | cwd: str 19 | allowed_tools: NotRequired[list[str]] 20 | 21 | 22 | class HTTPServerConfig(TypedDict): 23 | type: Literal["http"] 24 | url: str 25 | headers: dict[str, str] 26 | allowed_tools: NotRequired[list[str]] 27 | 28 | 29 | class SSEServerConfig(TypedDict): 30 | type: Literal["sse"] 31 | url: str 32 | headers: dict[str, str] 33 | allowed_tools: NotRequired[list[str]] 34 | 35 | 36 | ServerConfig = Union[StdioServerConfig, HTTPServerConfig, SSEServerConfig] 37 | 38 | 39 | # AgentConfig root object 40 | class AgentConfig(TypedDict): 41 | model: str 42 | provider: str 43 | apiKey: NotRequired[str] 44 | inputs: list[InputConfig] 45 | servers: list[ServerConfig] 46 | -------------------------------------------------------------------------------- /src/huggingface_hub/utils/insecure_hashlib.py: -------------------------------------------------------------------------------- 1 | # Taken from https://github.com/mlflow/mlflow/pull/10119 2 | # 3 | # DO NOT use this function for security purposes (e.g., password hashing). 4 | # 5 | # In Python >= 3.9, insecure hashing algorithms such as MD5 fail in FIPS-compliant 6 | # environments unless `usedforsecurity=False` is explicitly passed. 7 | # 8 | # References: 9 | # - https://github.com/mlflow/mlflow/issues/9905 10 | # - https://github.com/mlflow/mlflow/pull/10119 11 | # - https://docs.python.org/3/library/hashlib.html 12 | # - https://github.com/huggingface/transformers/pull/27038 13 | # 14 | # Usage: 15 | # ```python 16 | # # Use 17 | # from huggingface_hub.utils.insecure_hashlib import sha256 18 | # # instead of 19 | # from hashlib import sha256 20 | # 21 | # # Use 22 | # from huggingface_hub.utils import insecure_hashlib 23 | # # instead of 24 | # import hashlib 25 | # ``` 26 | import functools 27 | import hashlib 28 | 29 | 30 | md5 = functools.partial(hashlib.md5, usedforsecurity=False) 31 | sha1 = functools.partial(hashlib.sha1, usedforsecurity=False) 32 | sha256 = functools.partial(hashlib.sha256, usedforsecurity=False) 33 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/sentence_similarity.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class SentenceSimilarityInputData(BaseInferenceType): 13 | sentences: list[str] 14 | """A list of strings which will be compared against the source_sentence.""" 15 | source_sentence: str 16 | """The string that you wish to compare the other strings with. This can be a phrase, 17 | sentence, or longer passage, depending on the model being used. 18 | """ 19 | 20 | 21 | @dataclass_with_extra 22 | class SentenceSimilarityInput(BaseInferenceType): 23 | """Inputs for Sentence similarity inference""" 24 | 25 | inputs: SentenceSimilarityInputData 26 | parameters: Optional[dict[str, Any]] = None 27 | """Additional inference parameters for Sentence Similarity""" 28 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/file_download.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 파일 다운로드 하기[[downloading-files]] 6 | 7 | ## 단일 파일 다운로드하기[[download-a-single-file]] 8 | 9 | ### hf_hub_download[[huggingface_hub.hf_hub_download]] 10 | 11 | [[autodoc]]huggingface_hub.hf_hub_download 12 | 13 | ### hf_hub_url[[huggingface_hub.hf_hub_url]] 14 | 15 | [[autodoc]]huggingface_hub.hf_hub_url 16 | 17 | ## 리포지토리의 스냅샷 다운로드하기[[huggingface_hub.snapshot_download]] 18 | 19 | [[autodoc]]huggingface_hub.snapshot_download 20 | 21 | ## 파일에 대한 메타데이터 가져오기[[get-metadata-about-a-file]] 22 | 23 | ### get_hf_file_metadata[[huggingface_hub.get_hf_file_metadata]] 24 | 25 | [[autodoc]]huggingface_hub.get_hf_file_metadata 26 | 27 | ### HfFileMetadata[[huggingface_hub.HfFileMetadata]] 28 | 29 | [[autodoc]]huggingface_hub.HfFileMetadata 30 | 31 | ## 캐싱[[caching]] 32 | 33 | 위에 나열된 메소드들은 파일을 재다운로드하지 않도록 하는 캐싱 시스템과 함께 작동하도록 설계되었습니다. v0.8.0에서의 업데이트로, 캐싱 시스템은 Hub를 기반으로 하는 다양한 라이브러리 간의 공유 중앙 캐시 시스템으로 발전했습니다. 34 | 35 | Hugging Face에서의 캐싱에 대한 자세한 설명은[캐시 시스템 가이드](../guides/manage-cache)를 참조하세요. 36 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/openai.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from huggingface_hub.hf_api import InferenceProviderMapping 4 | from huggingface_hub.inference._providers._common import BaseConversationalTask 5 | 6 | 7 | class OpenAIConversationalTask(BaseConversationalTask): 8 | def __init__(self): 9 | super().__init__(provider="openai", base_url="https://api.openai.com") 10 | 11 | def _prepare_api_key(self, api_key: Optional[str]) -> str: 12 | if api_key is None: 13 | raise ValueError("You must provide an api_key to work with OpenAI API.") 14 | if api_key.startswith("hf_"): 15 | raise ValueError( 16 | "OpenAI provider is not available through Hugging Face routing, please use your own OpenAI API key." 17 | ) 18 | return api_key 19 | 20 | def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping: 21 | if model is None: 22 | raise ValueError("Please provide an OpenAI model ID, e.g. `gpt-4o` or `o1`.") 23 | return InferenceProviderMapping( 24 | provider="openai", providerId=model, task="conversational", status="live", hf_model_id=model 25 | ) 26 | -------------------------------------------------------------------------------- /.github/workflows/release-conda.yml: -------------------------------------------------------------------------------- 1 | name: Release Conda 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | branches: 8 | - conda_* 9 | 10 | env: 11 | ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_API_TOKEN }} 12 | 13 | jobs: 14 | build_and_package: 15 | runs-on: ubuntu-latest 16 | defaults: 17 | run: 18 | shell: bash -l {0} 19 | 20 | steps: 21 | - name: Checkout repository 22 | uses: actions/checkout@v1 23 | 24 | - name: Install miniconda 25 | uses: conda-incubator/setup-miniconda@v2 26 | with: 27 | auto-update-conda: true 28 | auto-activate-base: false 29 | python-version: 3.9 30 | activate-environment: "build-hub" 31 | 32 | - name: Setup conda env 33 | run: | 34 | conda install -c defaults anaconda-client conda-build 35 | 36 | - name: Extract version 37 | run: echo "HUB_VERSION=`python setup.py --version`" >> $GITHUB_ENV 38 | 39 | - name: Build conda packages 40 | run: | 41 | conda info 42 | conda-build .github/conda 43 | 44 | - name: Upload to Anaconda 45 | run: | 46 | anaconda upload `conda-build .github/conda --output` --force 47 | -------------------------------------------------------------------------------- /.github/workflows/claude.yml: -------------------------------------------------------------------------------- 1 | name: Claude PR Assistant 2 | 3 | on: 4 | issue_comment: 5 | types: [created] 6 | pull_request_review_comment: 7 | types: [created] 8 | issues: 9 | types: [opened, assigned] 10 | pull_request_review: 11 | types: [submitted] 12 | 13 | jobs: 14 | claude-code-action: 15 | if: | 16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) || 17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || 18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) || 19 | (github.event_name == 'issues' && contains(github.event.issue.body, '@claude')) 20 | runs-on: ubuntu-latest 21 | permissions: 22 | contents: read 23 | pull-requests: read 24 | issues: read 25 | id-token: write 26 | steps: 27 | - name: Checkout repository 28 | uses: actions/checkout@v4 29 | with: 30 | fetch-depth: 1 31 | 32 | - name: Run Claude PR Action 33 | uses: anthropics/claude-code-action@beta 34 | with: 35 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} 36 | timeout_minutes: "60" 37 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/tensorboard.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # TensorBoard logger 6 | 7 | TensorBoard is a visualization toolkit for machine learning experimentation. TensorBoard allows tracking and visualizing 8 | metrics such as loss and accuracy, visualizing the model graph, viewing histograms, displaying images and much more. 9 | TensorBoard is well integrated with the Hugging Face Hub. The Hub automatically detects TensorBoard traces (such as 10 | `tfevents`) when pushed to the Hub which starts an instance to visualize them. To get more information about TensorBoard 11 | integration on the Hub, check out [this guide](https://huggingface.co/docs/hub/tensorboard). 12 | 13 | To benefit from this integration, `huggingface_hub` provides a custom logger to push logs to the Hub. It works as a 14 | drop-in replacement for [SummaryWriter](https://tensorboardx.readthedocs.io/en/latest/tensorboard.html) with no extra 15 | code needed. Traces are still saved locally and a background job push them to the Hub at regular interval. 16 | 17 | ## HFSummaryWriter 18 | 19 | [[autodoc]] HFSummaryWriter 20 | -------------------------------------------------------------------------------- /utils/hf/setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from setuptools import setup 4 | 5 | 6 | def get_version() -> str: 7 | rel_path = os.path.join(os.path.dirname(__file__), "../../src/huggingface_hub/__init__.py") 8 | with open(rel_path, "r") as fp: 9 | for line in fp.read().splitlines(): 10 | if line.startswith("__version__"): 11 | delim = '"' if '"' in line else "'" 12 | return line.split(delim)[1] 13 | raise RuntimeError("Unable to find version string.") 14 | 15 | 16 | install_requires = [ 17 | f"huggingface_hub=={get_version()}", 18 | ] 19 | 20 | setup( 21 | name="hf", 22 | version=get_version(), 23 | author="Hugging Face, Inc.", 24 | author_email="julien@huggingface.co", 25 | description="CLI extracted from the huggingface_hub library to interact with the Hugging Face Hub", 26 | long_description=open("README.md", "r", encoding="utf-8").read(), 27 | long_description_content_type="text/markdown", 28 | license="Apache", 29 | url="https://github.com/huggingface/huggingface_hub", 30 | packages=["hf"], # dummy package to raise ImportError on import 31 | entry_points={"console_scripts": ["hf=huggingface_hub.cli.hf:main"]}, 32 | python_requires=">=3.9.0", 33 | install_requires=install_requires, 34 | classifiers=[], 35 | ) 36 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/fireworks_ai.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | from huggingface_hub.hf_api import InferenceProviderMapping 4 | 5 | from ._common import BaseConversationalTask 6 | 7 | 8 | class FireworksAIConversationalTask(BaseConversationalTask): 9 | def __init__(self): 10 | super().__init__(provider="fireworks-ai", base_url="https://api.fireworks.ai") 11 | 12 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 13 | return "/inference/v1/chat/completions" 14 | 15 | def _prepare_payload_as_dict( 16 | self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping 17 | ) -> Optional[dict]: 18 | payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) 19 | response_format = parameters.get("response_format") 20 | if isinstance(response_format, dict) and response_format.get("type") == "json_schema": 21 | json_schema_details = response_format.get("json_schema") 22 | if isinstance(json_schema_details, dict) and "schema" in json_schema_details: 23 | payload["response_format"] = { # type: ignore [index] 24 | "type": "json_object", 25 | "schema": json_schema_details["schema"], 26 | } 27 | return payload 28 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/scaleway.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Optional, Union 2 | 3 | from huggingface_hub.inference._common import RequestParameters, _as_dict 4 | 5 | from ._common import BaseConversationalTask, InferenceProviderMapping, TaskProviderHelper, filter_none 6 | 7 | 8 | class ScalewayConversationalTask(BaseConversationalTask): 9 | def __init__(self): 10 | super().__init__(provider="scaleway", base_url="https://api.scaleway.ai") 11 | 12 | 13 | class ScalewayFeatureExtractionTask(TaskProviderHelper): 14 | def __init__(self): 15 | super().__init__(provider="scaleway", base_url="https://api.scaleway.ai", task="feature-extraction") 16 | 17 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 18 | return "/v1/embeddings" 19 | 20 | def _prepare_payload_as_dict( 21 | self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping 22 | ) -> Optional[Dict]: 23 | parameters = filter_none(parameters) 24 | return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters} 25 | 26 | def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: 27 | embeddings = _as_dict(response)["data"] 28 | return [embedding["embedding"] for embedding in embeddings] 29 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/serialization.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 직렬화[[serialization]] 6 | 7 | `huggingface_hub`에는 ML 라이브러리가 모델 가중치를 표준화된 방식으로 직렬화 할 수 있도록 돕는 헬퍼를 포함하고 있습니다. 라이브러리의 이 부분은 아직 개발 중이며 향후 버전에서 개선될 예정입니다. 개선 목표는 Hub에서 가중치의 직렬화 방식을 통일하고, 라이브러리 간 코드 중복을 줄이며, Hub에서의 규약을 촉진하는 것입니다. 8 | 9 | ## 상태 사전을 샤드로 나누기[[split-state-dict-into-shards]] 10 | 11 | 현재 이 모듈은 상태 딕셔너리(예: 레이어 이름과 관련 텐서 간의 매핑)를 받아 여러 샤드로 나누고, 이 과정에서 적절한 인덱스를 생성하는 단일 헬퍼를 포함하고 있습니다. 이 헬퍼는 `torch` 텐서에 사용 가능하며, 다른 ML 프레임워크로 쉽게 확장될 수 있도록 설계되었습니다. 12 | 13 | ### split_torch_state_dict_into_shards[[huggingface_hub.split_torch_state_dict_into_shards]] 14 | 15 | [[autodoc]] huggingface_hub.split_torch_state_dict_into_shards 16 | 17 | ### split_state_dict_into_shards_factory[[huggingface_hub.split_state_dict_into_shards_factory]] 18 | 19 | 이것은 각 프레임워크별 헬퍼가 파생되는 기본 틀입니다. 실제로는 아직 지원되지 않는 프레임워크에 맞게 조정할 필요가 있는 경우가 아니면 이 틀을 직접 사용할 것으로 예상되지 않습니다. 그런 경우가 있다면, `huggingface_hub` 리포지토리에 [새로운 이슈를 개설](https://github.com/huggingface/huggingface_hub/issues/new) 하여 알려주세요. 20 | 21 | [[autodoc]] huggingface_hub.split_state_dict_into_shards_factory 22 | 23 | ## 도우미 24 | 25 | ### get_torch_storage_id[[huggingface_hub.get_torch_storage_id]] 26 | 27 | [[autodoc]] huggingface_hub.get_torch_storage_id -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/cohere.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | from huggingface_hub.hf_api import InferenceProviderMapping 4 | 5 | from ._common import BaseConversationalTask 6 | 7 | 8 | _PROVIDER = "cohere" 9 | _BASE_URL = "https://api.cohere.com" 10 | 11 | 12 | class CohereConversationalTask(BaseConversationalTask): 13 | def __init__(self): 14 | super().__init__(provider=_PROVIDER, base_url=_BASE_URL) 15 | 16 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 17 | return "/compatibility/v1/chat/completions" 18 | 19 | def _prepare_payload_as_dict( 20 | self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping 21 | ) -> Optional[dict]: 22 | payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) 23 | response_format = parameters.get("response_format") 24 | if isinstance(response_format, dict) and response_format.get("type") == "json_schema": 25 | json_schema_details = response_format.get("json_schema") 26 | if isinstance(json_schema_details, dict) and "schema" in json_schema_details: 27 | payload["response_format"] = { # type: ignore [index] 28 | "type": "json_object", 29 | "schema": json_schema_details["schema"], 30 | } 31 | 32 | return payload 33 | -------------------------------------------------------------------------------- /tests/test_offline_utils.py: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | 3 | import httpx 4 | import pytest 5 | 6 | from huggingface_hub.file_download import http_get 7 | 8 | from .testing_utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline 9 | 10 | 11 | def test_offline_with_timeout(): 12 | with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT): 13 | with pytest.raises(RequestWouldHangIndefinitelyError): 14 | httpx.request("GET", "https://huggingface.co") 15 | with pytest.raises(httpx.ConnectTimeout): 16 | httpx.request("GET", "https://huggingface.co", timeout=1.0) 17 | with pytest.raises(httpx.ConnectTimeout): 18 | http_get("https://huggingface.co", BytesIO()) 19 | 20 | 21 | def test_offline_with_connection_error(): 22 | with offline(OfflineSimulationMode.CONNECTION_FAILS): 23 | with pytest.raises(httpx.ConnectError): 24 | httpx.request("GET", "https://huggingface.co") 25 | with pytest.raises(httpx.ConnectError): 26 | http_get("https://huggingface.co", BytesIO()) 27 | 28 | 29 | def test_offline_with_datasets_offline_mode_enabled(): 30 | with offline(OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1): 31 | from huggingface_hub.errors import OfflineModeIsEnabled 32 | 33 | with pytest.raises(OfflineModeIsEnabled): 34 | http_get("https://huggingface.co", BytesIO()) 35 | -------------------------------------------------------------------------------- /tests/test_utils_chunks.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from huggingface_hub.utils._chunk_utils import chunk_iterable 4 | 5 | 6 | class TestUtilsCommon(unittest.TestCase): 7 | def test_chunk_iterable_non_truncated(self): 8 | # Can iterable over any iterable (iterator, list, tuple,...) 9 | for iterable in (range(12), list(range(12)), tuple(range(12))): 10 | # 12 is a multiple of 4 -> last chunk is not truncated 11 | for chunk, expected_chunk in zip( 12 | chunk_iterable(iterable, chunk_size=4), 13 | [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], 14 | ): 15 | self.assertListEqual(list(chunk), expected_chunk) 16 | 17 | def test_chunk_iterable_last_chunk_truncated(self): 18 | # Can iterable over any iterable (iterator, list, tuple,...) 19 | for iterable in (range(12), list(range(12)), tuple(range(12))): 20 | # 12 is NOT a multiple of 5 -> last chunk is truncated 21 | for chunk, expected_chunk in zip( 22 | chunk_iterable(iterable, chunk_size=5), 23 | [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11]], 24 | ): 25 | self.assertListEqual(list(chunk), expected_chunk) 26 | 27 | def test_chunk_iterable_validation(self): 28 | with self.assertRaises(ValueError): 29 | next(chunk_iterable(range(128), 0)) 30 | 31 | with self.assertRaises(ValueError): 32 | next(chunk_iterable(range(128), -1)) 33 | -------------------------------------------------------------------------------- /tests/test_login_utils.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import unittest 3 | from typing import Optional 4 | 5 | from huggingface_hub._login import _set_store_as_git_credential_helper_globally 6 | from huggingface_hub.utils import run_subprocess 7 | 8 | 9 | class TestSetGlobalStore(unittest.TestCase): 10 | previous_config: Optional[str] 11 | 12 | def setUp(self) -> None: 13 | """Get current global config value.""" 14 | try: 15 | self.previous_config = run_subprocess("git config --global credential.helper").stdout 16 | except subprocess.CalledProcessError: 17 | self.previous_config = None # Means global credential.helper value not set 18 | 19 | run_subprocess("git config --global credential.helper store") 20 | 21 | def tearDown(self) -> None: 22 | """Reset global config value.""" 23 | if self.previous_config is None: 24 | run_subprocess("git config --global --unset credential.helper") 25 | else: 26 | run_subprocess(f"git config --global credential.helper {self.previous_config}") 27 | 28 | def test_set_store_as_git_credential_helper_globally(self) -> None: 29 | """Test `_set_store_as_git_credential_helper_globally` works as expected. 30 | 31 | Previous value from the machine is restored after the test. 32 | """ 33 | _set_store_as_git_credential_helper_globally() 34 | new_config = run_subprocess("git config --global credential.helper").stdout 35 | self.assertEqual(new_config, "store\n") 36 | -------------------------------------------------------------------------------- /docs/source/cn/guides/search.md: -------------------------------------------------------------------------------- 1 | 3 | 4 | # 搜索 Hub 5 | 6 | 在本教程中,您将学习如何使用 `huggingface_hub` 在 Hub 上搜索模型、数据集和Spaces。 7 | 8 | ## 如何列出仓库? 9 | 10 | `huggingface_hub`库包括一个 HTTP 客户端 [`HfApi`],用于与 Hub 交互。 除此之外,它还可以列出存储在 Hub 上的模型、数据集和Spaces: 11 | 12 | ```py 13 | >>> from huggingface_hub import HfApi 14 | >>> api = HfApi() 15 | >>> models = api.list_models() 16 | ``` 17 | 18 | [`list_models`] 返回一个迭代器,包含存储在 Hub 上的模型。 19 | 20 | 同样,您可以使用 [`list_datasets`] 列出数据集,使用 [`list_spaces`] 列出 Spaces。 21 | 22 | ## 如何过滤仓库? 23 | 24 | 列出仓库是一个好开始,但现在您可能希望对搜索结果进行过滤。 25 | 列出时,可以使用多个属性来过滤结果,例如: 26 | - `filter` 27 | - `author` 28 | - `search` 29 | - ... 30 | 31 | 让我们看一个示例,获取所有在 Hub 上进行图像分类的模型,这些模型已在 imagenet 数据集上训练,并使用 PyTorch 运行。 32 | 33 | ```py 34 | models = hf_api.list_models( 35 | task="image-classification", 36 | library="pytorch", 37 | trained_dataset="imagenet", 38 | ) 39 | ``` 40 | 41 | 在过滤时,您还可以对模型进行排序,并仅获取前几个结果。例如,以下示例获取了 Hub 上下载量最多的前 5 个数据集: 42 | 43 | ```py 44 | >>> list(list_datasets(sort="downloads", limit=5)) 45 | [DatasetInfo( 46 | id='argilla/databricks-dolly-15k-curated-en', 47 | author='argilla', 48 | sha='4dcd1dedbe148307a833c931b21ca456a1fc4281', 49 | last_modified=datetime.datetime(2023, 10, 2, 12, 32, 53, tzinfo=datetime.timezone.utc), 50 | private=False, 51 | downloads=8889377, 52 | (...) 53 | ``` 54 | 55 | 56 | 57 | 如果您想要在Hub上探索可用的过滤器, 请在浏览器中访问 [models](https://huggingface.co/models) 和 [datasets](https://huggingface.co/datasets) 页面 58 | ,尝试不同的参数并查看URL中的值。 59 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/featherless_ai.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional, Union 2 | 3 | from huggingface_hub.hf_api import InferenceProviderMapping 4 | from huggingface_hub.inference._common import RequestParameters, _as_dict 5 | 6 | from ._common import BaseConversationalTask, BaseTextGenerationTask, filter_none 7 | 8 | 9 | _PROVIDER = "featherless-ai" 10 | _BASE_URL = "https://api.featherless.ai" 11 | 12 | 13 | class FeatherlessTextGenerationTask(BaseTextGenerationTask): 14 | def __init__(self): 15 | super().__init__(provider=_PROVIDER, base_url=_BASE_URL) 16 | 17 | def _prepare_payload_as_dict( 18 | self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping 19 | ) -> Optional[dict]: 20 | params = filter_none(parameters.copy()) 21 | params["max_tokens"] = params.pop("max_new_tokens", None) 22 | 23 | return {"prompt": inputs, **params, "model": provider_mapping_info.provider_id} 24 | 25 | def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: 26 | output = _as_dict(response)["choices"][0] 27 | return { 28 | "generated_text": output["text"], 29 | "details": { 30 | "finish_reason": output.get("finish_reason"), 31 | "seed": output.get("seed"), 32 | }, 33 | } 34 | 35 | 36 | class FeatherlessConversationalTask(BaseConversationalTask): 37 | def __init__(self): 38 | super().__init__(provider=_PROVIDER, base_url=_BASE_URL) 39 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/inference_client.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 추론[[inference]] 6 | 7 | 추론은 학습된 모델을 사용하여 새로운 데이터를 예측하는 과정입니다. 이 과정은 계산량이 많을 수 있기 때문에, 전용 서버에서 실행하는 것이 흥미로운 옵션이 될 수 있습니다. `huggingface_hub` 라이브러리는 호스팅된 모델에 대한 추론을 실행하는 간단한 방법을 제공합니다. 연결할 수 있는 서비스는 여러가지가 있습니다: 8 | 9 | - [추론 API](https://huggingface.co/docs/api-inference/index): Hugging Face의 인프라에서 가속화된 추론을 무료로 실행할 수 있는 서비스입니다. 이 서비스는 시작하기 위한 빠른 방법이며, 다양한 모델을 테스트하고 AI 제품을 프로토타입화하는 데에도 유용합니다. 10 | - [추론 엔드포인트](https://huggingface.co/inference-endpoints): 모델을 쉽게 운영 환경으로 배포할 수 있는 제품입니다. 추론은 여러분이 선택한 클라우드 제공업체의 전용 및 완전히 관리되는 인프라에서 Hugging Face에 의해 실행됩니다. 11 | 12 | 이러한 서비스는 [`InferenceClient`] 객체를 사용하여 호출할 수 있습니다. 자세한 사용 방법에 대해서는 [이 가이드](../guides/inference)를 참조해주세요. 13 | 14 | ## 추론 클라이언트[[huggingface_hub.InferenceClient]] 15 | 16 | [[autodoc]] InferenceClient 17 | 18 | ## 비동기 추론 클라이언트[[huggingface_hub.AsyncInferenceClient]] 19 | 20 | 비동기 버전의 클라이언트도 제공되며, 이는 `asyncio`와 `aiohttp`를 기반으로 작동합니다. 21 | 이를 사용하려면 `aiohttp`를 직접 설치하거나 `[inference]` 추가 기능을 사용할 수 있습니다: 22 | 23 | ```sh 24 | pip install --upgrade huggingface_hub[inference] 25 | # 또는 26 | # pip install aiohttp 27 | ``` 28 | 29 | [[autodoc]] AsyncInferenceClient 30 | 31 | ## 추론 시간 초과 오류[[huggingface_hub.InferenceTimeoutError]] 32 | 33 | [[autodoc]] InferenceTimeoutError 34 | 35 | ## 반환 유형[[return-types]] 36 | 37 | 대부분의 작업에 대해, 반환 값은 내장된 유형(string, list, image...)을 갖습니다. 보다 복잡한 유형을 위한 목록은 다음과 같습니다. 38 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/cache.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Cache-system reference 6 | 7 | The caching system was updated in v0.8.0 to become the central cache-system shared 8 | across libraries that depend on the Hub. Read the [cache-system guide](../guides/manage-cache) 9 | for a detailed presentation of caching at HF. 10 | 11 | ## Helpers 12 | 13 | ### try_to_load_from_cache 14 | 15 | [[autodoc]] huggingface_hub.try_to_load_from_cache 16 | 17 | ### cached_assets_path 18 | 19 | [[autodoc]] huggingface_hub.cached_assets_path 20 | 21 | ### scan_cache_dir 22 | 23 | [[autodoc]] huggingface_hub.scan_cache_dir 24 | 25 | ## Data structures 26 | 27 | All structures are built and returned by [`scan_cache_dir`] and are immutable. 28 | 29 | ### HFCacheInfo 30 | 31 | [[autodoc]] huggingface_hub.HFCacheInfo 32 | 33 | ### CachedRepoInfo 34 | 35 | [[autodoc]] huggingface_hub.CachedRepoInfo 36 | - size_on_disk_str 37 | - refs 38 | 39 | ### CachedRevisionInfo 40 | 41 | [[autodoc]] huggingface_hub.CachedRevisionInfo 42 | - size_on_disk_str 43 | - nb_files 44 | 45 | ### CachedFileInfo 46 | 47 | [[autodoc]] huggingface_hub.CachedFileInfo 48 | - size_on_disk_str 49 | 50 | ### DeleteCacheStrategy 51 | 52 | [[autodoc]] huggingface_hub.DeleteCacheStrategy 53 | - expected_freed_size_str 54 | 55 | ## Exceptions 56 | 57 | ### CorruptedCacheException 58 | 59 | [[autodoc]] huggingface_hub.CorruptedCacheException 60 | -------------------------------------------------------------------------------- /tests/test_utils_sha.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from hashlib import sha256 4 | from io import BytesIO 5 | 6 | from huggingface_hub.utils import SoftTemporaryDirectory 7 | from huggingface_hub.utils.sha import git_hash, sha_fileobj 8 | 9 | 10 | def test_sha_fileobj(): 11 | with SoftTemporaryDirectory() as tmpdir: 12 | content = b"Random content" * 1000 13 | sha = sha256(content).digest() 14 | 15 | # Test with file object 16 | filepath = os.path.join(tmpdir, "file.bin") 17 | with open(filepath, "wb+") as file: 18 | file.write(content) 19 | 20 | with open(filepath, "rb") as fileobj: 21 | assert sha_fileobj(fileobj, None) == sha 22 | with open(filepath, "rb") as fileobj: 23 | assert sha_fileobj(fileobj, 50) == sha 24 | with open(filepath, "rb") as fileobj: 25 | assert sha_fileobj(fileobj, 50_000) == sha 26 | 27 | # Test with in-memory file object 28 | assert sha_fileobj(BytesIO(content), None) == sha 29 | assert sha_fileobj(BytesIO(content), 50) == sha 30 | assert sha_fileobj(BytesIO(content), 50_000) == sha 31 | 32 | 33 | def test_git_hash(tmpdir): 34 | """Test the `git_hash` output is the same as `git hash-object` command.""" 35 | path = os.path.join(tmpdir, "file.txt") 36 | with open(path, "wb") as file: 37 | file.write(b"Hello, World!") 38 | 39 | output = subprocess.run(f"git hash-object -t blob {path}", shell=True, capture_output=True, text=True) 40 | assert output.stdout.strip() == git_hash(b"Hello, World!") 41 | -------------------------------------------------------------------------------- /tests/test_utils_datetime.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from datetime import datetime, timezone 3 | 4 | import pytest 5 | 6 | from huggingface_hub.utils import parse_datetime 7 | 8 | 9 | class TestDatetimeUtils(unittest.TestCase): 10 | def test_parse_datetime(self): 11 | """Test `parse_datetime` works correctly on datetimes returned by server.""" 12 | self.assertEqual( 13 | parse_datetime("2022-08-19T07:19:38.123Z"), 14 | datetime(2022, 8, 19, 7, 19, 38, 123000, tzinfo=timezone.utc), 15 | ) 16 | 17 | # Test nanoseconds precision (should be truncated to microseconds) 18 | self.assertEqual( 19 | parse_datetime("2022-08-19T07:19:38.123456789Z"), 20 | datetime(2022, 8, 19, 7, 19, 38, 123456, tzinfo=timezone.utc), 21 | ) 22 | 23 | # Test without milliseconds (should add .000) 24 | self.assertEqual( 25 | parse_datetime("2024-11-16T00:27:02Z"), 26 | datetime(2024, 11, 16, 0, 27, 2, 0, tzinfo=timezone.utc), 27 | ) 28 | 29 | with pytest.raises(ValueError, match=r".*Cannot parse '2022-08-19T07:19:38' as a datetime.*"): 30 | parse_datetime("2022-08-19T07:19:38") 31 | 32 | with pytest.raises( 33 | ValueError, 34 | match=r".*Cannot parse '2022-08-19T07:19:38.123' as a datetime.*", 35 | ): 36 | parse_datetime("2022-08-19T07:19:38.123") 37 | 38 | with pytest.raises( 39 | ValueError, 40 | match=r".*Cannot parse '2022-08-19 07:19:38.123Z\+6:00' as a datetime.*", 41 | ): 42 | parse_datetime("2022-08-19 07:19:38.123Z+6:00") 43 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/text_classification.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | TextClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] 12 | 13 | 14 | @dataclass_with_extra 15 | class TextClassificationParameters(BaseInferenceType): 16 | """Additional inference parameters for Text Classification""" 17 | 18 | function_to_apply: Optional["TextClassificationOutputTransform"] = None 19 | """The function to apply to the model outputs in order to retrieve the scores.""" 20 | top_k: Optional[int] = None 21 | """When specified, limits the output to the top K most probable classes.""" 22 | 23 | 24 | @dataclass_with_extra 25 | class TextClassificationInput(BaseInferenceType): 26 | """Inputs for Text Classification inference""" 27 | 28 | inputs: str 29 | """The text to classify""" 30 | parameters: Optional[TextClassificationParameters] = None 31 | """Additional inference parameters for Text Classification""" 32 | 33 | 34 | @dataclass_with_extra 35 | class TextClassificationOutputElement(BaseInferenceType): 36 | """Outputs of inference for the Text Classification task""" 37 | 38 | label: str 39 | """The predicted class label.""" 40 | score: float 41 | """The corresponding probability.""" 42 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/summarization.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | SummarizationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"] 12 | 13 | 14 | @dataclass_with_extra 15 | class SummarizationParameters(BaseInferenceType): 16 | """Additional inference parameters for summarization.""" 17 | 18 | clean_up_tokenization_spaces: Optional[bool] = None 19 | """Whether to clean up the potential extra spaces in the text output.""" 20 | generate_parameters: Optional[dict[str, Any]] = None 21 | """Additional parametrization of the text generation algorithm.""" 22 | truncation: Optional["SummarizationTruncationStrategy"] = None 23 | """The truncation strategy to use.""" 24 | 25 | 26 | @dataclass_with_extra 27 | class SummarizationInput(BaseInferenceType): 28 | """Inputs for Summarization inference""" 29 | 30 | inputs: str 31 | """The input text to summarize.""" 32 | parameters: Optional[SummarizationParameters] = None 33 | """Additional inference parameters for summarization.""" 34 | 35 | 36 | @dataclass_with_extra 37 | class SummarizationOutput(BaseInferenceType): 38 | """Outputs of inference for the Summarization task""" 39 | 40 | summary_text: str 41 | """The summarized text.""" 42 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class ZeroShotImageClassificationParameters(BaseInferenceType): 13 | """Additional inference parameters for Zero Shot Image Classification""" 14 | 15 | candidate_labels: list[str] 16 | """The candidate labels for this image""" 17 | hypothesis_template: Optional[str] = None 18 | """The sentence used in conjunction with `candidate_labels` to attempt the image 19 | classification by replacing the placeholder with the candidate labels. 20 | """ 21 | 22 | 23 | @dataclass_with_extra 24 | class ZeroShotImageClassificationInput(BaseInferenceType): 25 | """Inputs for Zero Shot Image Classification inference""" 26 | 27 | inputs: str 28 | """The input image data to classify as a base64-encoded string.""" 29 | parameters: ZeroShotImageClassificationParameters 30 | """Additional inference parameters for Zero Shot Image Classification""" 31 | 32 | 33 | @dataclass_with_extra 34 | class ZeroShotImageClassificationOutputElement(BaseInferenceType): 35 | """Outputs of inference for the Zero Shot Image Classification task""" 36 | 37 | label: str 38 | """The predicted class label.""" 39 | score: float 40 | """The corresponding probability.""" 41 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/feature_extraction.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Literal, Optional, Union 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | FeatureExtractionInputTruncationDirection = Literal["left", "right"] 12 | 13 | 14 | @dataclass_with_extra 15 | class FeatureExtractionInput(BaseInferenceType): 16 | """Feature Extraction Input. 17 | Auto-generated from TEI specs. 18 | For more details, check out 19 | https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts. 20 | """ 21 | 22 | inputs: Union[list[str], str] 23 | """The text or list of texts to embed.""" 24 | normalize: Optional[bool] = None 25 | prompt_name: Optional[str] = None 26 | """The name of the prompt that should be used by for encoding. If not set, no prompt 27 | will be applied. 28 | Must be a key in the `sentence-transformers` configuration `prompts` dictionary. 29 | For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ", 30 | ...}, 31 | then the sentence "What is the capital of France?" will be encoded as 32 | "query: What is the capital of France?" because the prompt text will be prepended before 33 | any text to encode. 34 | """ 35 | truncate: Optional[bool] = None 36 | truncation_direction: Optional["FeatureExtractionInputTruncationDirection"] = None 37 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/audio_classification.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | AudioClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] 12 | 13 | 14 | @dataclass_with_extra 15 | class AudioClassificationParameters(BaseInferenceType): 16 | """Additional inference parameters for Audio Classification""" 17 | 18 | function_to_apply: Optional["AudioClassificationOutputTransform"] = None 19 | """The function to apply to the model outputs in order to retrieve the scores.""" 20 | top_k: Optional[int] = None 21 | """When specified, limits the output to the top K most probable classes.""" 22 | 23 | 24 | @dataclass_with_extra 25 | class AudioClassificationInput(BaseInferenceType): 26 | """Inputs for Audio Classification inference""" 27 | 28 | inputs: str 29 | """The input audio data as a base64-encoded string. If no `parameters` are provided, you can 30 | also provide the audio data as a raw bytes payload. 31 | """ 32 | parameters: Optional[AudioClassificationParameters] = None 33 | """Additional inference parameters for Audio Classification""" 34 | 35 | 36 | @dataclass_with_extra 37 | class AudioClassificationOutputElement(BaseInferenceType): 38 | """Outputs for Audio Classification inference""" 39 | 40 | label: str 41 | """The predicted class label.""" 42 | score: float 43 | """The corresponding probability.""" 44 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/image_classification.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | ImageClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] 12 | 13 | 14 | @dataclass_with_extra 15 | class ImageClassificationParameters(BaseInferenceType): 16 | """Additional inference parameters for Image Classification""" 17 | 18 | function_to_apply: Optional["ImageClassificationOutputTransform"] = None 19 | """The function to apply to the model outputs in order to retrieve the scores.""" 20 | top_k: Optional[int] = None 21 | """When specified, limits the output to the top K most probable classes.""" 22 | 23 | 24 | @dataclass_with_extra 25 | class ImageClassificationInput(BaseInferenceType): 26 | """Inputs for Image Classification inference""" 27 | 28 | inputs: str 29 | """The input image data as a base64-encoded string. If no `parameters` are provided, you can 30 | also provide the image data as a raw bytes payload. 31 | """ 32 | parameters: Optional[ImageClassificationParameters] = None 33 | """Additional inference parameters for Image Classification""" 34 | 35 | 36 | @dataclass_with_extra 37 | class ImageClassificationOutputElement(BaseInferenceType): 38 | """Outputs of inference for the Image Classification task""" 39 | 40 | label: str 41 | """The predicted class label.""" 42 | score: float 43 | """The corresponding probability.""" 44 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/text2text_generation.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | Text2TextGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"] 12 | 13 | 14 | @dataclass_with_extra 15 | class Text2TextGenerationParameters(BaseInferenceType): 16 | """Additional inference parameters for Text2text Generation""" 17 | 18 | clean_up_tokenization_spaces: Optional[bool] = None 19 | """Whether to clean up the potential extra spaces in the text output.""" 20 | generate_parameters: Optional[dict[str, Any]] = None 21 | """Additional parametrization of the text generation algorithm""" 22 | truncation: Optional["Text2TextGenerationTruncationStrategy"] = None 23 | """The truncation strategy to use""" 24 | 25 | 26 | @dataclass_with_extra 27 | class Text2TextGenerationInput(BaseInferenceType): 28 | """Inputs for Text2text Generation inference""" 29 | 30 | inputs: str 31 | """The input text data""" 32 | parameters: Optional[Text2TextGenerationParameters] = None 33 | """Additional inference parameters for Text2text Generation""" 34 | 35 | 36 | @dataclass_with_extra 37 | class Text2TextGenerationOutput(BaseInferenceType): 38 | """Outputs of inference for the Text2text Generation task""" 39 | 40 | generated_text: Any 41 | text2_text_generation_output_generated_text: Optional[str] = None 42 | """The generated text.""" 43 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/cache.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 캐시 시스템 참조[[cache-system-reference]] 6 | 7 | 버전 0.8.0에서의 업데이트로, 캐시 시스템은 Hub에 의존하는 라이브러리 전체에서 공유되는 중앙 캐시 시스템으로 발전하였습니다. Hugging Face 캐싱에 대한 자세한 설명은 [캐시 시스템 가이드](../guides/manage-cache)를 참조하세요. 8 | 9 | ## 도우미 함수[[helpers]] 10 | 11 | ### try_to_load_from_cache[[huggingface_hub.try_to_load_from_cache]] 12 | 13 | [[autodoc]] huggingface_hub.try_to_load_from_cache 14 | 15 | ### cached_assets_path[[huggingface_hub.cached_assets_path]] 16 | 17 | [[autodoc]] huggingface_hub.cached_assets_path 18 | 19 | ### scan_cache_dir[[huggingface_hub.scan_cache_dir]] 20 | 21 | [[autodoc]] huggingface_hub.scan_cache_dir 22 | 23 | ## 데이터 구조[[data-structures]] 24 | 25 | 모든 구조체는 [`scan_cache_dir`]에 의해 생성되고 반환되며, 불변(immutable)입니다. 26 | 27 | ### HFCacheInfo[[huggingface_hub.HFCacheInfo]] 28 | 29 | [[autodoc]] huggingface_hub.HFCacheInfo 30 | 31 | ### CachedRepoInfo[[huggingface_hub.CachedRepoInfo]] 32 | 33 | [[autodoc]] huggingface_hub.CachedRepoInfo 34 | - size_on_disk_str 35 | - refs 36 | 37 | ### CachedRevisionInfo[[huggingface_hub.CachedRevisionInfo]] 38 | 39 | [[autodoc]] huggingface_hub.CachedRevisionInfo 40 | - size_on_disk_str 41 | - nb_files 42 | 43 | ### CachedFileInfo[[huggingface_hub.CachedFileInfo]] 44 | 45 | [[autodoc]] huggingface_hub.CachedFileInfo 46 | - size_on_disk_str 47 | 48 | ### DeleteCacheStrategy[[huggingface_hub.DeleteCacheStrategy]] 49 | 50 | [[autodoc]] huggingface_hub.DeleteCacheStrategy 51 | - expected_freed_size_str 52 | 53 | ## 예외[[exceptions]] 54 | 55 | ### CorruptedCacheException[[huggingface_hub.CorruptedCacheException]] 56 | 57 | [[autodoc]] huggingface_hub.CorruptedCacheException 58 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from .base import BaseInferenceType, dataclass_with_extra 7 | 8 | 9 | @dataclass_with_extra 10 | class ZeroShotObjectDetectionParameters(BaseInferenceType): 11 | """Additional inference parameters for Zero Shot Object Detection""" 12 | 13 | candidate_labels: list[str] 14 | """The candidate labels for this image""" 15 | 16 | 17 | @dataclass_with_extra 18 | class ZeroShotObjectDetectionInput(BaseInferenceType): 19 | """Inputs for Zero Shot Object Detection inference""" 20 | 21 | inputs: str 22 | """The input image data as a base64-encoded string.""" 23 | parameters: ZeroShotObjectDetectionParameters 24 | """Additional inference parameters for Zero Shot Object Detection""" 25 | 26 | 27 | @dataclass_with_extra 28 | class ZeroShotObjectDetectionBoundingBox(BaseInferenceType): 29 | """The predicted bounding box. Coordinates are relative to the top left corner of the input 30 | image. 31 | """ 32 | 33 | xmax: int 34 | xmin: int 35 | ymax: int 36 | ymin: int 37 | 38 | 39 | @dataclass_with_extra 40 | class ZeroShotObjectDetectionOutputElement(BaseInferenceType): 41 | """Outputs of inference for the Zero Shot Object Detection task""" 42 | 43 | box: ZeroShotObjectDetectionBoundingBox 44 | """The predicted bounding box. Coordinates are relative to the top left corner of the input 45 | image. 46 | """ 47 | label: str 48 | """A candidate label""" 49 | score: float 50 | """The associated score / probability""" 51 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.yml: -------------------------------------------------------------------------------- 1 | name: "\U0001F41B Bug Report" 2 | description: Report a bug on huggingface_hub 3 | labels: ["bug"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this bug report! 9 | - type: textarea 10 | id: bug-description 11 | attributes: 12 | label: Describe the bug 13 | description: A clear and concise description of what the bug is. If you intend to submit a pull request for this issue, tell us in the description. Thanks! 14 | placeholder: Bug description 15 | validations: 16 | required: true 17 | - type: textarea 18 | id: reproduction 19 | attributes: 20 | label: Reproduction 21 | description: Please provide a minimal reproducible code which we can copy/paste and reproduce the issue. 22 | placeholder: Reproduction 23 | - type: textarea 24 | id: logs 25 | attributes: 26 | label: Logs 27 | description: "Please include the Python logs if you can. If possible, run the code with `HF_DEBUG=1` as environment variable." 28 | render: shell 29 | - type: textarea 30 | id: system-info 31 | attributes: 32 | label: System info 33 | description: | 34 | Please dump your environment info by running the following command and copy-paste the result here: 35 | ```txt 36 | hf env 37 | ``` 38 | 39 | If you are working in a notebook, please run it in a code cell: 40 | ```py 41 | from huggingface_hub import dump_environment_info 42 | 43 | dump_environment_info() 44 | ``` 45 | render: shell 46 | placeholder: | 47 | - huggingface_hub version: 0.11.0.dev0 48 | - Platform: Linux-5.15.0-52-generic-x86_64-with-glibc2.35 49 | - Python version: 3.10.6 50 | ... 51 | validations: 52 | required: true 53 | -------------------------------------------------------------------------------- /docs/dev/release.md: -------------------------------------------------------------------------------- 1 | This document covers all steps that need to be done in order to do a release of the `huggingface_hub` library. 2 | 3 | 1. On a clone of the main repo, not your fork, checkout the main branch and pull the latest changes: 4 | ``` 5 | git checkout main 6 | git pull 7 | ``` 8 | 9 | 2. Checkout a new branch with the version that you'd like to release: v-release, 10 | for example `v0.5-release`. All patches will be done to that same branch. 11 | 12 | 3. Update the `__version__` variable in the `src/huggingface_hub/__init__.py` file to point 13 | to the version you're releasing: 14 | ``` 15 | __version__ = "" 16 | ``` 17 | 18 | 4. Make sure that the conda build works correctly by building it locally: 19 | ``` 20 | conda install -c defaults anaconda-client conda-build 21 | HUB_VERSION= conda-build .github/conda 22 | ``` 23 | 24 | 5. Make sure that the pip wheel works correctly by building it locally and installing it: 25 | ``` 26 | pip install setuptools wheel 27 | python setup.py sdist bdist_wheel 28 | pip install dist/huggingface_hub--py3-none-any.whl 29 | ``` 30 | 31 | 6. Commit, tag, and push the branch: 32 | ``` 33 | git commit -am "Release: v" 34 | git tag v -m "Adds tag v for pypi and conda" 35 | git push -u --tags origin v-release 36 | ``` 37 | 38 | 7. Verify that the docs have been built correctly. You can check that on the following link: 39 | https://huggingface.co/docs/huggingface_hub/v 40 | 41 | 8. Checkout main once again to update the version in the `__init__.py` file: 42 | ``` 43 | git checkout main 44 | ``` 45 | 46 | 9. Update the version to contain the `.dev0` suffix: 47 | ``` 48 | __version__ = ".dev0" # For example, after releasing v0.5.0 or v0.5.1: "0.6.0.dev0". 49 | ``` 50 | 51 | 10. Push the changes! 52 | ``` 53 | git push origin main 54 | ``` 55 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/video_classification.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | VideoClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] 12 | 13 | 14 | @dataclass_with_extra 15 | class VideoClassificationParameters(BaseInferenceType): 16 | """Additional inference parameters for Video Classification""" 17 | 18 | frame_sampling_rate: Optional[int] = None 19 | """The sampling rate used to select frames from the video.""" 20 | function_to_apply: Optional["VideoClassificationOutputTransform"] = None 21 | """The function to apply to the model outputs in order to retrieve the scores.""" 22 | num_frames: Optional[int] = None 23 | """The number of sampled frames to consider for classification.""" 24 | top_k: Optional[int] = None 25 | """When specified, limits the output to the top K most probable classes.""" 26 | 27 | 28 | @dataclass_with_extra 29 | class VideoClassificationInput(BaseInferenceType): 30 | """Inputs for Video Classification inference""" 31 | 32 | inputs: Any 33 | """The input video data""" 34 | parameters: Optional[VideoClassificationParameters] = None 35 | """Additional inference parameters for Video Classification""" 36 | 37 | 38 | @dataclass_with_extra 39 | class VideoClassificationOutputElement(BaseInferenceType): 40 | """Outputs of inference for the Video Classification task""" 41 | 42 | label: str 43 | """The predicted class label.""" 44 | score: float 45 | """The corresponding probability.""" 46 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/fill_mask.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class FillMaskParameters(BaseInferenceType): 13 | """Additional inference parameters for Fill Mask""" 14 | 15 | targets: Optional[list[str]] = None 16 | """When passed, the model will limit the scores to the passed targets instead of looking up 17 | in the whole vocabulary. If the provided targets are not in the model vocab, they will be 18 | tokenized and the first resulting token will be used (with a warning, and that might be 19 | slower). 20 | """ 21 | top_k: Optional[int] = None 22 | """When passed, overrides the number of predictions to return.""" 23 | 24 | 25 | @dataclass_with_extra 26 | class FillMaskInput(BaseInferenceType): 27 | """Inputs for Fill Mask inference""" 28 | 29 | inputs: str 30 | """The text with masked tokens""" 31 | parameters: Optional[FillMaskParameters] = None 32 | """Additional inference parameters for Fill Mask""" 33 | 34 | 35 | @dataclass_with_extra 36 | class FillMaskOutputElement(BaseInferenceType): 37 | """Outputs of inference for the Fill Mask task""" 38 | 39 | score: float 40 | """The corresponding probability""" 41 | sequence: str 42 | """The corresponding input with the mask token prediction.""" 43 | token: int 44 | """The predicted token id (to replace the masked one).""" 45 | token_str: Any 46 | fill_mask_output_token_str: Optional[str] = None 47 | """The predicted token (to replace the masked one).""" 48 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/visual_question_answering.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class VisualQuestionAnsweringInputData(BaseInferenceType): 13 | """One (image, question) pair to answer""" 14 | 15 | image: Any 16 | """The image.""" 17 | question: str 18 | """The question to answer based on the image.""" 19 | 20 | 21 | @dataclass_with_extra 22 | class VisualQuestionAnsweringParameters(BaseInferenceType): 23 | """Additional inference parameters for Visual Question Answering""" 24 | 25 | top_k: Optional[int] = None 26 | """The number of answers to return (will be chosen by order of likelihood). Note that we 27 | return less than topk answers if there are not enough options available within the 28 | context. 29 | """ 30 | 31 | 32 | @dataclass_with_extra 33 | class VisualQuestionAnsweringInput(BaseInferenceType): 34 | """Inputs for Visual Question Answering inference""" 35 | 36 | inputs: VisualQuestionAnsweringInputData 37 | """One (image, question) pair to answer""" 38 | parameters: Optional[VisualQuestionAnsweringParameters] = None 39 | """Additional inference parameters for Visual Question Answering""" 40 | 41 | 42 | @dataclass_with_extra 43 | class VisualQuestionAnsweringOutputElement(BaseInferenceType): 44 | """Outputs of inference for the Visual Question Answering task""" 45 | 46 | score: float 47 | """The associated score / probability""" 48 | answer: Optional[str] = None 49 | """The answer to the question""" 50 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/inference_endpoints.md: -------------------------------------------------------------------------------- 1 | # 추론 엔드포인트 [[inference-endpoints]] 2 | 3 | Hugging Face가 관리하는 추론 엔드포인트는 우리가 모델을 쉽고 안전하게 배포할 수 있게 해주는 도구입니다. 이러한 추론 엔드포인트는 [Hub](https://huggingface.co/models)에 있는 모델을 기반으로 설계되었습니다. 이 문서는 `huggingface_hub`와 추론 엔드포인트 통합에 관한 참조 페이지이며, 더욱 자세한 정보는 [공식 문서](https://huggingface.co/docs/inference-endpoints/index)를 통해 확인할 수 있습니다. 4 | 5 | > [!TIP] 6 | > 'huggingface_hub'를 사용하여 추론 엔드포인트를 프로그래밍 방식으로 관리하는 방법을 알고 싶다면, [관련 가이드](../guides/inference_endpoints)를 확인해 보세요. 7 | 8 | 추론 엔드포인트는 API로 쉽게 접근할 수 있습니다. 이 엔드포인트들은 [Swagger](https://api.endpoints.huggingface.cloud/)를 통해 문서화되어 있고, [`InferenceEndpoint`] 클래스는 이 API를 사용해 만든 간단한 래퍼입니다. 9 | 10 | ## 매소드 [[methods]] 11 | 12 | 다음과 같은 추론 엔드포인트의 기능이 [`HfApi`]안에 구현되어 있습니다: 13 | 14 | - [`get_inference_endpoint`]와 [`list_inference_endpoints`]를 사용해 엔드포인트 정보를 조회할 수 있습니다. 15 | - [`create_inference_endpoint`], [`update_inference_endpoint`], [`delete_inference_endpoint`]로 엔드포인트를 배포하고 관리할 수 있습니다. 16 | - [`pause_inference_endpoint`]와 [`resume_inference_endpoint`]로 엔드포인트를 잠시 멈추거나 다시 시작할 수 있습니다. 17 | - [`scale_to_zero_inference_endpoint`]로 엔드포인트의 복제본을 0개로 설정할 수 있습니다. 18 | 19 | ## InferenceEndpoint [[huggingface_hub.InferenceEndpoint]] 20 | 21 | 기본 데이터 클래스는 [`InferenceEndpoint`]입니다. 여기에는 구성 및 현재 상태를 가지고 있는 배포된 `InferenceEndpoint`에 대한 정보가 포함되어 있습니다. 배포 후에는 [`InferenceEndpoint.client`]와 [`InferenceEndpoint.async_client`]를 사용해 엔드포인트에서 추론 작업을 할 수 있고, 이때 [`InferenceClient`]와 [`AsyncInferenceClient`] 객체를 반환합니다. 22 | 23 | [[autodoc]] InferenceEndpoint 24 | - from_raw 25 | - client 26 | - async_client 27 | - all 28 | 29 | ## InferenceEndpointStatus [[huggingface_hub.InferenceEndpointStatus]] 30 | 31 | [[autodoc]] InferenceEndpointStatus 32 | 33 | ## InferenceEndpointType [[huggingface_hub.InferenceEndpointType]] 34 | 35 | [[autodoc]] InferenceEndpointType 36 | 37 | ## InferenceEndpointError [[huggingface_hub.InferenceEndpointError]] 38 | 39 | [[autodoc]] InferenceEndpointError 40 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/zero_shot_classification.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class ZeroShotClassificationParameters(BaseInferenceType): 13 | """Additional inference parameters for Zero Shot Classification""" 14 | 15 | candidate_labels: list[str] 16 | """The set of possible class labels to classify the text into.""" 17 | hypothesis_template: Optional[str] = None 18 | """The sentence used in conjunction with `candidate_labels` to attempt the text 19 | classification by replacing the placeholder with the candidate labels. 20 | """ 21 | multi_label: Optional[bool] = None 22 | """Whether multiple candidate labels can be true. If false, the scores are normalized such 23 | that the sum of the label likelihoods for each sequence is 1. If true, the labels are 24 | considered independent and probabilities are normalized for each candidate. 25 | """ 26 | 27 | 28 | @dataclass_with_extra 29 | class ZeroShotClassificationInput(BaseInferenceType): 30 | """Inputs for Zero Shot Classification inference""" 31 | 32 | inputs: str 33 | """The text to classify""" 34 | parameters: ZeroShotClassificationParameters 35 | """Additional inference parameters for Zero Shot Classification""" 36 | 37 | 38 | @dataclass_with_extra 39 | class ZeroShotClassificationOutputElement(BaseInferenceType): 40 | """Outputs of inference for the Zero Shot Classification task""" 41 | 42 | label: str 43 | """The predicted class label.""" 44 | score: float 45 | """The corresponding probability.""" 46 | -------------------------------------------------------------------------------- /.github/workflows/python-quality.yml: -------------------------------------------------------------------------------- 1 | name: Python quality 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths-ignore: 8 | - "js/**" 9 | - "api-inference-community/**" 10 | pull_request: 11 | types: [assigned, opened, synchronize, reopened] 12 | paths-ignore: 13 | - "js/**" 14 | - "api-inference-community/**" 15 | 16 | jobs: 17 | check_code_quality: 18 | runs-on: ubuntu-latest 19 | env: 20 | UV_HTTP_TIMEOUT: 600 # max 10min to install deps 21 | 22 | steps: 23 | - uses: actions/checkout@v2 24 | - name: Set up Python 25 | uses: actions/setup-python@v2 26 | with: 27 | python-version: 3.9 28 | 29 | # Setup venv 30 | # TODO: revisit when https://github.com/astral-sh/uv/issues/1526 is addressed. 31 | - name: Setup venv + uv 32 | run: | 33 | pip install --upgrade uv 34 | uv venv 35 | 36 | - name: Install dependencies 37 | run: uv pip install "huggingface_hub[dev] @ ." 38 | - run: .venv/bin/ruff check tests src # linter 39 | - run: .venv/bin/ruff format --check tests src # formatter 40 | - run: .venv/bin/python utils/check_inference_input_params.py 41 | - run: .venv/bin/python utils/check_static_imports.py 42 | - run: .venv/bin/python utils/check_all_variable.py 43 | - run: .venv/bin/python utils/generate_async_inference_client.py 44 | - run: .venv/bin/python utils/generate_cli_reference.py --verbose 45 | - run: .venv/bin/python utils/generate_inference_types.py 46 | - run: .venv/bin/python utils/check_task_parameters.py 47 | - run: uvx ty check src 48 | # Run type checking at least on huggingface_hub root file to check all modules 49 | # that can be lazy-loaded actually exist. 50 | - run: .venv/bin/mypy src/huggingface_hub/__init__.py --follow-imports=silent --show-traceback 51 | 52 | # Run mypy on full package 53 | - run: .venv/bin/mypy src 54 | -------------------------------------------------------------------------------- /.github/workflows/model_card_consistency_reminder.yml: -------------------------------------------------------------------------------- 1 | name: Model and Dataset Card consistency reminder 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - src/huggingface_hub/repocard.py 7 | - src/huggingface_hub/templates/modelcard_template.md 8 | - src/huggingface_hub/templates/datasetcard_template.md 9 | 10 | jobs: 11 | comment: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: maintain-comment 15 | uses: actions-cool/maintain-one-comment@v3 16 | with: 17 | body: | 18 | It looks like you've updated code related to model or dataset cards in this PR. 19 | 20 | Some content is duplicated among the following files. Please make sure that everything stays consistent. 21 | - [src/.../repocard.py](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/repocard.py) 22 | - [src/.../datasetcard_template.md](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md) 23 | - [src/.../modelcard_template.md](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md) 24 | - [modelcard.md](https://github.com/huggingface/hub-docs/blob/main/modelcard.md) (`hub-docs` repo) 25 | - [docs/hub/model-cards.md](https://github.com/huggingface/hub-docs/blob/main/docs/hub/model-cards.md) (`hub-docs` repo) 26 | - [docs/hub/model-card-annotated.md](https://github.com/huggingface/hub-docs/blob/main/docs/hub/model-card-annotated.md) (`hub-docs` repo) 27 | - [datasetcard.md](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md) (`hub-docs` repo) 28 | - [docs/hub/datasets-cards.md](https://github.com/huggingface/hub-docs/blob/main/docs/hub/datasets-cards.md) (`hub-docs` repo) 29 | token: ${{ secrets.comment_bot_token }} 30 | body-include: '' 31 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/translation.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | TranslationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"] 12 | 13 | 14 | @dataclass_with_extra 15 | class TranslationParameters(BaseInferenceType): 16 | """Additional inference parameters for Translation""" 17 | 18 | clean_up_tokenization_spaces: Optional[bool] = None 19 | """Whether to clean up the potential extra spaces in the text output.""" 20 | generate_parameters: Optional[dict[str, Any]] = None 21 | """Additional parametrization of the text generation algorithm.""" 22 | src_lang: Optional[str] = None 23 | """The source language of the text. Required for models that can translate from multiple 24 | languages. 25 | """ 26 | tgt_lang: Optional[str] = None 27 | """Target language to translate to. Required for models that can translate to multiple 28 | languages. 29 | """ 30 | truncation: Optional["TranslationTruncationStrategy"] = None 31 | """The truncation strategy to use.""" 32 | 33 | 34 | @dataclass_with_extra 35 | class TranslationInput(BaseInferenceType): 36 | """Inputs for Translation inference""" 37 | 38 | inputs: str 39 | """The text to translate.""" 40 | parameters: Optional[TranslationParameters] = None 41 | """Additional inference parameters for Translation""" 42 | 43 | 44 | @dataclass_with_extra 45 | class TranslationOutput(BaseInferenceType): 46 | """Outputs of inference for the Translation task""" 47 | 48 | translation_text: str 49 | """The translated text.""" 50 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/nscale.py: -------------------------------------------------------------------------------- 1 | import base64 2 | from typing import Any, Optional, Union 3 | 4 | from huggingface_hub.hf_api import InferenceProviderMapping 5 | from huggingface_hub.inference._common import RequestParameters, _as_dict 6 | 7 | from ._common import BaseConversationalTask, TaskProviderHelper, filter_none 8 | 9 | 10 | class NscaleConversationalTask(BaseConversationalTask): 11 | def __init__(self): 12 | super().__init__(provider="nscale", base_url="https://inference.api.nscale.com") 13 | 14 | 15 | class NscaleTextToImageTask(TaskProviderHelper): 16 | def __init__(self): 17 | super().__init__(provider="nscale", base_url="https://inference.api.nscale.com", task="text-to-image") 18 | 19 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 20 | return "/v1/images/generations" 21 | 22 | def _prepare_payload_as_dict( 23 | self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping 24 | ) -> Optional[dict]: 25 | mapped_model = provider_mapping_info.provider_id 26 | # Combine all parameters except inputs and parameters 27 | parameters = filter_none(parameters) 28 | if "width" in parameters and "height" in parameters: 29 | parameters["size"] = f"{parameters.pop('width')}x{parameters.pop('height')}" 30 | if "num_inference_steps" in parameters: 31 | parameters.pop("num_inference_steps") 32 | if "cfg_scale" in parameters: 33 | parameters.pop("cfg_scale") 34 | payload = { 35 | "response_format": "b64_json", 36 | "prompt": inputs, 37 | "model": mapped_model, 38 | **parameters, 39 | } 40 | return payload 41 | 42 | def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: 43 | response_dict = _as_dict(response) 44 | return base64.b64decode(response_dict["data"][0]["b64_json"]) 45 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/text_to_video.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class TextToVideoParameters(BaseInferenceType): 13 | """Additional inference parameters for Text To Video""" 14 | 15 | guidance_scale: Optional[float] = None 16 | """A higher guidance scale value encourages the model to generate videos closely linked to 17 | the text prompt, but values too high may cause saturation and other artifacts. 18 | """ 19 | negative_prompt: Optional[list[str]] = None 20 | """One or several prompt to guide what NOT to include in video generation.""" 21 | num_frames: Optional[float] = None 22 | """The num_frames parameter determines how many video frames are generated.""" 23 | num_inference_steps: Optional[int] = None 24 | """The number of denoising steps. More denoising steps usually lead to a higher quality 25 | video at the expense of slower inference. 26 | """ 27 | seed: Optional[int] = None 28 | """Seed for the random number generator.""" 29 | 30 | 31 | @dataclass_with_extra 32 | class TextToVideoInput(BaseInferenceType): 33 | """Inputs for Text To Video inference""" 34 | 35 | inputs: str 36 | """The input text data (sometimes called "prompt")""" 37 | parameters: Optional[TextToVideoParameters] = None 38 | """Additional inference parameters for Text To Video""" 39 | 40 | 41 | @dataclass_with_extra 42 | class TextToVideoOutput(BaseInferenceType): 43 | """Outputs of inference for the Text To Video task""" 44 | 45 | video: Any 46 | """The generated video returned as raw bytes in the payload.""" 47 | -------------------------------------------------------------------------------- /tests/test_utils_dotenv.py: -------------------------------------------------------------------------------- 1 | # AI-generated module (ChatGPT) 2 | from huggingface_hub.utils._dotenv import load_dotenv 3 | 4 | 5 | def test_basic_key_value(): 6 | data = "KEY=value" 7 | assert load_dotenv(data) == {"KEY": "value"} 8 | 9 | 10 | def test_whitespace_and_comments(): 11 | data = """ 12 | # This is a comment 13 | KEY = value # inline comment 14 | EMPTY= 15 | """ 16 | assert load_dotenv(data) == {"KEY": "value", "EMPTY": ""} 17 | 18 | 19 | def test_quoted_values(): 20 | data = """ 21 | SINGLE='single quoted' 22 | DOUBLE="double quoted" 23 | ESCAPED="line\\nbreak" 24 | """ 25 | assert load_dotenv(data) == {"SINGLE": "single quoted", "DOUBLE": "double quoted", "ESCAPED": "line\nbreak"} 26 | 27 | 28 | def test_export_and_inline_comment(): 29 | data = "export KEY=value # this is a comment" 30 | assert load_dotenv(data) == {"KEY": "value"} 31 | 32 | 33 | def test_ignore_invalid_lines(): 34 | data = """ 35 | this is not valid 36 | KEY=value 37 | """ 38 | assert load_dotenv(data) == {"KEY": "value"} 39 | 40 | 41 | def test_complex_quotes(): 42 | data = r""" 43 | QUOTED="some value with # not comment" 44 | ESCAPE="escaped \$dollar and \\backslash" 45 | """ 46 | assert load_dotenv(data) == { 47 | "QUOTED": "some value with # not comment", 48 | "ESCAPE": "escaped $dollar and \\backslash", 49 | } 50 | 51 | 52 | def test_no_value(): 53 | data = "NOVALUE=" 54 | assert load_dotenv(data) == {"NOVALUE": ""} 55 | 56 | 57 | def test_multiple_lines(): 58 | data = """ 59 | A=1 60 | B="two" 61 | C='three' 62 | D=4 63 | """ 64 | assert load_dotenv(data) == {"A": "1", "B": "two", "C": "three", "D": "4"} 65 | 66 | 67 | def test_environ(): 68 | data = """ 69 | A=1 70 | B 71 | C=3 72 | MISSING 73 | EMPTY 74 | """ 75 | environ = {"A": "one", "B": "two", "D": "four", "EMPTY": ""} 76 | assert load_dotenv(data, environ=environ) == {"A": "1", "B": "two", "C": "3", "EMPTY": ""} 77 | -------------------------------------------------------------------------------- /tests/test_utils_fixes.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import unittest 3 | from pathlib import Path 4 | 5 | import filelock 6 | import pytest 7 | 8 | from huggingface_hub.utils import SoftTemporaryDirectory, WeakFileLock, yaml_dump 9 | 10 | 11 | class TestYamlDump(unittest.TestCase): 12 | def test_yaml_dump_emoji(self) -> None: 13 | self.assertEqual(yaml_dump({"emoji": "👀"}), "emoji: 👀\n") 14 | 15 | def test_yaml_dump_japanese_characters(self) -> None: 16 | self.assertEqual(yaml_dump({"some unicode": "日本か"}), "some unicode: 日本か\n") 17 | 18 | def test_yaml_dump_explicit_no_unicode(self) -> None: 19 | self.assertEqual(yaml_dump({"emoji": "👀"}, allow_unicode=False), 'emoji: "\\U0001F440"\n') 20 | 21 | 22 | class TestTemporaryDirectory(unittest.TestCase): 23 | def test_temporary_directory(self) -> None: 24 | with SoftTemporaryDirectory(prefix="prefix", suffix="suffix") as path: 25 | self.assertIsInstance(path, Path) 26 | self.assertTrue(path.name.startswith("prefix")) 27 | self.assertTrue(path.name.endswith("suffix")) 28 | self.assertTrue(path.is_dir()) 29 | # Tmpdir is deleted 30 | self.assertFalse(path.is_dir()) 31 | 32 | 33 | class TestWeakFileLock: 34 | def test_lock_log_every( 35 | self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch, caplog: pytest.LogCaptureFixture 36 | ) -> None: 37 | monkeypatch.setattr("huggingface_hub.constants.FILELOCK_LOG_EVERY_SECONDS", 0.1) 38 | lock_file = tmp_path / ".lock" 39 | 40 | with caplog.at_level(logging.INFO, logger="huggingface_hub.utils._fixes"): 41 | with WeakFileLock(lock_file): 42 | with pytest.raises(filelock.Timeout) as exc_info: 43 | with WeakFileLock(lock_file, timeout=0.3): 44 | pass 45 | assert exc_info.value.lock_file == str(lock_file) 46 | 47 | assert len(caplog.records) >= 3 48 | assert caplog.records[0].message.startswith(f"Still waiting to acquire lock on {lock_file}") 49 | -------------------------------------------------------------------------------- /src/huggingface_hub/utils/_pagination.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2022-present, the HuggingFace Inc. team. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Contains utilities to handle pagination on Huggingface Hub.""" 16 | 17 | from typing import Iterable, Optional 18 | 19 | import httpx 20 | 21 | from . import get_session, hf_raise_for_status, http_backoff, logging 22 | 23 | 24 | logger = logging.get_logger(__name__) 25 | 26 | 27 | def paginate(path: str, params: dict, headers: dict) -> Iterable: 28 | """Fetch a list of models/datasets/spaces and paginate through results. 29 | 30 | This is using the same "Link" header format as GitHub. 31 | See: 32 | - https://requests.readthedocs.io/en/latest/api/#requests.Response.links 33 | - https://docs.github.com/en/rest/guides/traversing-with-pagination#link-header 34 | """ 35 | session = get_session() 36 | r = session.get(path, params=params, headers=headers) 37 | hf_raise_for_status(r) 38 | yield from r.json() 39 | 40 | # Follow pages 41 | # Next link already contains query params 42 | next_page = _get_next_page(r) 43 | while next_page is not None: 44 | logger.debug(f"Pagination detected. Requesting next page: {next_page}") 45 | r = http_backoff("GET", next_page, headers=headers) 46 | hf_raise_for_status(r) 47 | yield from r.json() 48 | next_page = _get_next_page(r) 49 | 50 | 51 | def _get_next_page(response: httpx.Response) -> Optional[str]: 52 | return response.links.get("next", {}).get("url") 53 | -------------------------------------------------------------------------------- /tests/test_init_lazy_loading.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import jedi 4 | 5 | 6 | class TestHuggingfaceHubInit(unittest.TestCase): 7 | @unittest.skip( 8 | reason="`jedi.Completion.get_signatures()` output differs between Python 3.12 and earlier versions, affecting test consistency" 9 | ) 10 | def test_autocomplete_on_root_imports(self) -> None: 11 | """Test autocomplete with `huggingface_hub` works with Jedi. 12 | 13 | Not all autocomplete systems are based on Jedi but if this one works we can 14 | assume others do as well. 15 | """ 16 | source = """from huggingface_hub import c""" 17 | script = jedi.Script(source, path="example.py") 18 | completions = script.complete(1, len(source)) 19 | 20 | for completion in completions: 21 | if completion.name == "create_commit": 22 | # Assert `create_commit` is suggestion from `huggingface_hub` lib 23 | self.assertEqual(completion.module_name, "huggingface_hub") 24 | 25 | # Assert autocomplete knows where `create_commit` lives 26 | # It would not be the case with a dynamic import. 27 | goto_list = completion.goto() 28 | self.assertEqual(len(goto_list), 1) 29 | 30 | # Assert docstring is find. This means autocomplete can also provide 31 | # the help section. 32 | signature_list = goto_list[0].get_signatures() 33 | self.assertEqual(len(signature_list), 2) # create_commit has 2 signatures (normal and `run_as_future`) 34 | self.assertTrue(signature_list[0].docstring().startswith("create_commit(repo_id: str,")) 35 | break 36 | else: 37 | self.fail( 38 | "Jedi autocomplete did not suggest `create_commit` to complete the" 39 | f" line `{source}`. It is most probable that static imports are not" 40 | " correct in `./src/huggingface_hub/__init__.py`. Please run `make" 41 | " style` to fix this." 42 | ) 43 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/cards.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Repository Cards 6 | 7 | The huggingface_hub library provides a Python interface to create, share, and update Model/Dataset Cards. 8 | Visit the [dedicated documentation page](https://huggingface.co/docs/hub/models-cards) for a deeper view of what 9 | Model Cards on the Hub are, and how they work under the hood. You can also check out our [Model Cards guide](../how-to-model-cards) to 10 | get a feel for how you would use these utilities in your own projects. 11 | 12 | ## Repo Card 13 | 14 | The `RepoCard` object is the parent class of [`ModelCard`], [`DatasetCard`] and `SpaceCard`. 15 | 16 | [[autodoc]] huggingface_hub.repocard.RepoCard 17 | - __init__ 18 | - all 19 | 20 | ## Card Data 21 | 22 | The [`CardData`] object is the parent class of [`ModelCardData`] and [`DatasetCardData`]. 23 | 24 | [[autodoc]] huggingface_hub.repocard_data.CardData 25 | 26 | ## Model Cards 27 | 28 | ### ModelCard 29 | 30 | [[autodoc]] ModelCard 31 | 32 | ### ModelCardData 33 | 34 | [[autodoc]] ModelCardData 35 | 36 | ## Dataset Cards 37 | 38 | Dataset cards are also known as Data Cards in the ML Community. 39 | 40 | ### DatasetCard 41 | 42 | [[autodoc]] DatasetCard 43 | 44 | ### DatasetCardData 45 | 46 | [[autodoc]] DatasetCardData 47 | 48 | ## Space Cards 49 | 50 | ### SpaceCard 51 | 52 | [[autodoc]] SpaceCard 53 | 54 | ### SpaceCardData 55 | 56 | [[autodoc]] SpaceCardData 57 | 58 | ## Utilities 59 | 60 | ### EvalResult 61 | 62 | [[autodoc]] EvalResult 63 | 64 | ### model_index_to_eval_results 65 | 66 | [[autodoc]] huggingface_hub.repocard_data.model_index_to_eval_results 67 | 68 | ### eval_results_to_model_index 69 | 70 | [[autodoc]] huggingface_hub.repocard_data.eval_results_to_model_index 71 | 72 | ### metadata_eval_result 73 | 74 | [[autodoc]] huggingface_hub.repocard.metadata_eval_result 75 | 76 | ### metadata_update 77 | 78 | [[autodoc]] huggingface_hub.repocard.metadata_update 79 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/text_to_image.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class TextToImageParameters(BaseInferenceType): 13 | """Additional inference parameters for Text To Image""" 14 | 15 | guidance_scale: Optional[float] = None 16 | """A higher guidance scale value encourages the model to generate images closely linked to 17 | the text prompt, but values too high may cause saturation and other artifacts. 18 | """ 19 | height: Optional[int] = None 20 | """The height in pixels of the output image""" 21 | negative_prompt: Optional[str] = None 22 | """One prompt to guide what NOT to include in image generation.""" 23 | num_inference_steps: Optional[int] = None 24 | """The number of denoising steps. More denoising steps usually lead to a higher quality 25 | image at the expense of slower inference. 26 | """ 27 | scheduler: Optional[str] = None 28 | """Override the scheduler with a compatible one.""" 29 | seed: Optional[int] = None 30 | """Seed for the random number generator.""" 31 | width: Optional[int] = None 32 | """The width in pixels of the output image""" 33 | 34 | 35 | @dataclass_with_extra 36 | class TextToImageInput(BaseInferenceType): 37 | """Inputs for Text To Image inference""" 38 | 39 | inputs: str 40 | """The input text data (sometimes called "prompt")""" 41 | parameters: Optional[TextToImageParameters] = None 42 | """Additional inference parameters for Text To Image""" 43 | 44 | 45 | @dataclass_with_extra 46 | class TextToImageOutput(BaseInferenceType): 47 | """Outputs of inference for the Text To Image task""" 48 | 49 | image: Any 50 | """The generated image returned as raw bytes in the payload.""" 51 | -------------------------------------------------------------------------------- /tests/test_utils_parsing.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | 5 | from huggingface_hub.utils._parsing import format_timesince, parse_duration, parse_size 6 | 7 | 8 | @pytest.mark.parametrize( 9 | ("value", "expected"), 10 | [ 11 | ("10", 10), 12 | ("10k", 10_000), 13 | ("5M", 5_000_000), 14 | ("2G", 2_000_000_000), 15 | ("1T", 1_000_000_000_000), 16 | ("0", 0), 17 | ], 18 | ) 19 | def test_parse_size_valid(value, expected): 20 | assert parse_size(value) == expected 21 | 22 | 23 | @pytest.mark.parametrize( 24 | "value", 25 | [ 26 | "1.5G", 27 | "3KB", 28 | "-5M", 29 | "10X", 30 | "abc", 31 | "", 32 | "123abc456", 33 | " 10 K", 34 | ], 35 | ) 36 | def test_parse_size_invalid(value): 37 | with pytest.raises(ValueError): 38 | parse_size(value) 39 | 40 | 41 | @pytest.mark.parametrize( 42 | ("value", "expected"), 43 | [ 44 | ("10s", 10), 45 | ("5m", 300), 46 | ("2h", 7_200), 47 | ("1d", 86_400), 48 | ("1w", 604_800), 49 | ("1mo", 2_592_000), 50 | ("1y", 31_536_000), 51 | ("0", 0), 52 | ], 53 | ) 54 | def test_parse_duration_valid(value, expected): 55 | assert parse_duration(value) == expected 56 | 57 | 58 | @pytest.mark.parametrize( 59 | "value", 60 | [ 61 | "1.5h", 62 | "3month", 63 | "-5m", 64 | "10X", 65 | "abc", 66 | "", 67 | "123abc456", 68 | " 10 m", 69 | ], 70 | ) 71 | def test_parse_duration_invalid(value): 72 | with pytest.raises(ValueError): 73 | parse_duration(value) 74 | 75 | 76 | @pytest.mark.parametrize( 77 | ("value", "expected"), 78 | [ 79 | (1, "a few seconds ago"), 80 | (15, "a few seconds ago"), 81 | (25, "25 seconds ago"), 82 | (80, "1 minute ago"), 83 | (1000, "17 minutes ago"), 84 | (4000, "1 hour ago"), 85 | (8000, "2 hours ago"), 86 | ], 87 | ) 88 | def test_format_timesince(value, expected): 89 | assert format_timesince(time.time() - value) == expected 90 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/token_classification.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | TokenClassificationAggregationStrategy = Literal["none", "simple", "first", "average", "max"] 12 | 13 | 14 | @dataclass_with_extra 15 | class TokenClassificationParameters(BaseInferenceType): 16 | """Additional inference parameters for Token Classification""" 17 | 18 | aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None 19 | """The strategy used to fuse tokens based on model predictions""" 20 | ignore_labels: Optional[list[str]] = None 21 | """A list of labels to ignore""" 22 | stride: Optional[int] = None 23 | """The number of overlapping tokens between chunks when splitting the input text.""" 24 | 25 | 26 | @dataclass_with_extra 27 | class TokenClassificationInput(BaseInferenceType): 28 | """Inputs for Token Classification inference""" 29 | 30 | inputs: str 31 | """The input text data""" 32 | parameters: Optional[TokenClassificationParameters] = None 33 | """Additional inference parameters for Token Classification""" 34 | 35 | 36 | @dataclass_with_extra 37 | class TokenClassificationOutputElement(BaseInferenceType): 38 | """Outputs of inference for the Token Classification task""" 39 | 40 | end: int 41 | """The character position in the input where this group ends.""" 42 | score: float 43 | """The associated score / probability""" 44 | start: int 45 | """The character position in the input where this group begins.""" 46 | word: str 47 | """The corresponding text""" 48 | entity: Optional[str] = None 49 | """The predicted label for a single token""" 50 | entity_group: Optional[str] = None 51 | """The predicted label for a group of one or more tokens""" 52 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.mypy] 2 | ignore_missing_imports = true 3 | 4 | [tool.ty] 5 | [tool.ty.terminal] 6 | # Do not fail CI on warnings; keep output readable 7 | error-on-warning = false 8 | output-format = "concise" 9 | 10 | [tool.ty.rules] 11 | # Minimize noise from optional/extra dependencies not installed in CI or local 12 | unresolved-import = "ignore" 13 | unresolved-attribute = "ignore" 14 | 15 | # Be tolerant with framework/typing edge-cases and runtime-validated code paths 16 | unsupported-base = "ignore" 17 | unsupported-operator = "ignore" 18 | possibly-missing-attribute = "ignore" 19 | possibly-missing-implicit-call = "ignore" 20 | non-subscriptable = "ignore" 21 | call-non-callable = "ignore" 22 | 23 | # Loosen strictness a bit on type matching 24 | missing-argument = "ignore" 25 | deprecated = "ignore" 26 | 27 | [tool.pytest.ini_options] 28 | # Add the specified `OPTS` to the set of command line arguments as if they had 29 | # been specified by the user. 30 | addopts = "-Werror::FutureWarning --log-cli-level=INFO -sv --durations=0" 31 | # The defined variables will be added to the environment before any tests are 32 | # run, part of pytest-env plugin 33 | env = [ 34 | "DISABLE_SYMLINKS_IN_WINDOWS_TESTS=1", 35 | "HF_TOKEN=", 36 | "HUGGINGFACE_CO_STAGING=1", 37 | "HUGGING_FACE_HUB_TOKEN=", 38 | ] 39 | 40 | [tool.ruff] 41 | exclude = [ 42 | ".eggs", 43 | ".git", 44 | ".git-rewrite", 45 | ".hg", 46 | ".mypy_cache", 47 | ".nox", 48 | ".pants.d", 49 | ".pytype", 50 | ".ruff_cache", 51 | ".svn", 52 | ".tox", 53 | ".venv", 54 | ".venv*", 55 | "__pypackages__", 56 | "_build", 57 | "build", 58 | "dist", 59 | "venv", 60 | ] 61 | line-length = 119 62 | # Ignored rules: 63 | # "E501" -> line length violation 64 | lint.ignore = ["E501"] 65 | lint.select = ["E", "F", "I", "W"] 66 | 67 | [tool.ruff.lint.isort] 68 | known-first-party = ["huggingface_hub"] 69 | lines-after-imports = 2 70 | 71 | [tool.tomlsort] 72 | all = true 73 | in_place = true 74 | spaces_before_inline_comment = 2 # Match Python PEP 8 75 | spaces_indent_inline_array = 4 # Match Python PEP 8 76 | trailing_comma_inline_array = true 77 | -------------------------------------------------------------------------------- /docs/source/en/guides/search.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Search the Hub 6 | 7 | In this tutorial, you will learn how to search models, datasets and spaces on the Hub using `huggingface_hub`. 8 | 9 | ## How to list repositories ? 10 | 11 | `huggingface_hub` library includes an HTTP client [`HfApi`] to interact with the Hub. 12 | Among other things, it can list models, datasets and spaces stored on the Hub: 13 | 14 | ```py 15 | >>> from huggingface_hub import HfApi 16 | >>> api = HfApi() 17 | >>> models = api.list_models() 18 | ``` 19 | 20 | The output of [`list_models`] is an iterator over the models stored on the Hub. 21 | 22 | Similarly, you can use [`list_datasets`] to list datasets and [`list_spaces`] to list Spaces. 23 | 24 | ## How to filter repositories ? 25 | 26 | Listing repositories is great but now you might want to filter your search. 27 | The list helpers have several attributes like: 28 | - `filter` 29 | - `author` 30 | - `search` 31 | - ... 32 | 33 | Let's see an example to get all models on the Hub that does image classification, have been trained on the imagenet dataset and that runs with PyTorch. 34 | 35 | ```py 36 | models = hf_api.list_models(filter=["image-classification", "pytorch", "imagenet"]) 37 | ``` 38 | 39 | While filtering, you can also sort the models and take only the top results. For example, 40 | the following example fetches the top 5 most downloaded datasets on the Hub: 41 | 42 | ```py 43 | >>> list(list_datasets(sort="downloads", limit=5)) 44 | [DatasetInfo( 45 | id='argilla/databricks-dolly-15k-curated-en', 46 | author='argilla', 47 | sha='4dcd1dedbe148307a833c931b21ca456a1fc4281', 48 | last_modified=datetime.datetime(2023, 10, 2, 12, 32, 53, tzinfo=datetime.timezone.utc), 49 | private=False, 50 | downloads=8889377, 51 | (...) 52 | ``` 53 | 54 | 55 | 56 | To explore available filters on the Hub, visit [models](https://huggingface.co/models) and [datasets](https://huggingface.co/datasets) pages 57 | in your browser, search for some parameters and look at the values in the URL. 58 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/hyperbolic.py: -------------------------------------------------------------------------------- 1 | import base64 2 | from typing import Any, Optional, Union 3 | 4 | from huggingface_hub.hf_api import InferenceProviderMapping 5 | from huggingface_hub.inference._common import RequestParameters, _as_dict 6 | from huggingface_hub.inference._providers._common import BaseConversationalTask, TaskProviderHelper, filter_none 7 | 8 | 9 | class HyperbolicTextToImageTask(TaskProviderHelper): 10 | def __init__(self): 11 | super().__init__(provider="hyperbolic", base_url="https://api.hyperbolic.xyz", task="text-to-image") 12 | 13 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 14 | return "/v1/images/generations" 15 | 16 | def _prepare_payload_as_dict( 17 | self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping 18 | ) -> Optional[dict]: 19 | mapped_model = provider_mapping_info.provider_id 20 | parameters = filter_none(parameters) 21 | if "num_inference_steps" in parameters: 22 | parameters["steps"] = parameters.pop("num_inference_steps") 23 | if "guidance_scale" in parameters: 24 | parameters["cfg_scale"] = parameters.pop("guidance_scale") 25 | # For Hyperbolic, the width and height are required parameters 26 | if "width" not in parameters: 27 | parameters["width"] = 512 28 | if "height" not in parameters: 29 | parameters["height"] = 512 30 | return {"prompt": inputs, "model_name": mapped_model, **parameters} 31 | 32 | def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: 33 | response_dict = _as_dict(response) 34 | return base64.b64decode(response_dict["images"][0]["image"]) 35 | 36 | 37 | class HyperbolicTextGenerationTask(BaseConversationalTask): 38 | """ 39 | Special case for Hyperbolic, where text-generation task is handled as a conversational task. 40 | """ 41 | 42 | def __init__(self, task: str): 43 | super().__init__( 44 | provider="hyperbolic", 45 | base_url="https://api.hyperbolic.xyz", 46 | ) 47 | self.task = task 48 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/image_segmentation.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"] 12 | 13 | 14 | @dataclass_with_extra 15 | class ImageSegmentationParameters(BaseInferenceType): 16 | """Additional inference parameters for Image Segmentation""" 17 | 18 | mask_threshold: Optional[float] = None 19 | """Threshold to use when turning the predicted masks into binary values.""" 20 | overlap_mask_area_threshold: Optional[float] = None 21 | """Mask overlap threshold to eliminate small, disconnected segments.""" 22 | subtask: Optional["ImageSegmentationSubtask"] = None 23 | """Segmentation task to be performed, depending on model capabilities.""" 24 | threshold: Optional[float] = None 25 | """Probability threshold to filter out predicted masks.""" 26 | 27 | 28 | @dataclass_with_extra 29 | class ImageSegmentationInput(BaseInferenceType): 30 | """Inputs for Image Segmentation inference""" 31 | 32 | inputs: str 33 | """The input image data as a base64-encoded string. If no `parameters` are provided, you can 34 | also provide the image data as a raw bytes payload. 35 | """ 36 | parameters: Optional[ImageSegmentationParameters] = None 37 | """Additional inference parameters for Image Segmentation""" 38 | 39 | 40 | @dataclass_with_extra 41 | class ImageSegmentationOutputElement(BaseInferenceType): 42 | """Outputs of inference for the Image Segmentation task 43 | A predicted mask / segment 44 | """ 45 | 46 | label: str 47 | """The label of the predicted segment.""" 48 | mask: str 49 | """The corresponding mask as a black-and-white image (base64-encoded).""" 50 | score: Optional[float] = None 51 | """The score or confidence degree the model has.""" 52 | -------------------------------------------------------------------------------- /docs/source/ko/guides/search.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Hub에서 검색하기[[search-the-hub]] 6 | 7 | 이 튜토리얼에서는 `huggingface_hub`를 사용하여 Hub에서 모델, 데이터 세트 및 Spaces를 검색하는 방법을 배웁니다. 8 | 9 | ## 리포지토리를 어떻게 나열하나요?[[how-to-list-repositories-]] 10 | 11 | `huggingface_hub` 라이브러리에는 Hub와 상호작용하기 위한 HTTP 클라이언트[`HfApi`]가 포함되어 있습니다. 12 | 이를 통해, Hub에 저장된 모델, 데이터셋, 그리고 Spaces를 나열할 수 있습니다. 13 | 14 | ```py 15 | >>> from huggingface_hub import HfApi 16 | >>> api = HfApi() 17 | >>> models = api.list_models() 18 | ``` 19 | 20 | [`list_models`]의 출력은 Hub에 저장되어 있는 모델들을 나열한 결과입니다. 21 | 22 | 마찬가지로, [`list_datasets`]를 사용하여 데이터 세트를 나열하고 [`list_spaces`]를 사용하여 Spaces를 나열할 수 있습니다. 23 | 24 | ## 리포지토리를 어떻게 필터링하나요?[[how-to-filter-repositories-]] 25 | 26 | 리포지토리를 나열하는 것도 유용하지만, 검색을 필터링하고 싶을 수도 있습니다. 27 | 리스트에는 다음과 같은 여러 속성이 있습니다. 28 | - `filter` 29 | - `author` 30 | - `search` 31 | - ... 32 | 33 | 이 매개변수 중 두 개는 직관적입니다(`author` 및 `search`). 그렇다면 `filter`는 어떤 것을 나타낼까요? 34 | `filter`는 [`ModelFilter`] 객체(또는 [`DatasetFilter`])를 입력으로 받습니다. 이를 이용해 필터링 하고 싶은 모델을 지정하여 인스턴스를 생성할 수 있습니다. 35 | 36 | PyTorch로 작동되고 imagenet 데이터 세트로 훈련된, 이미지 분류를 위한 Hub의 모든 모델을 찾는 방법으로 예를 들어보겠습니다. 이 과정은 단일 [ModelFilter]를 사용하여 수행할 수 있습니다. 이때, 필터링 속성들은 '논리적 AND'로 결합되어, 지정한 모든 조건을 만족하는 모델만 선택됩니다. 37 | 38 | ```py 39 | models = hf_api.list_models( 40 | filter=ModelFilter( 41 | task="image-classification", 42 | library="pytorch", 43 | trained_dataset="imagenet" 44 | ) 45 | ) 46 | ``` 47 | 48 | 필터링하는 과정에서 모델을 정렬하고 상위 결과만 선택할 수도 있습니다. 다음 예제는 Hub에서 가장 많이 다운로드된 상위 5개 데이터 세트를 가져옵니다. 49 | 50 | ```py 51 | >>> list(list_datasets(sort="downloads", limit=5)) 52 | [DatasetInfo( 53 | id='argilla/databricks-dolly-15k-curated-en', 54 | author='argilla', 55 | sha='4dcd1dedbe148307a833c931b21ca456a1fc4281', 56 | last_modified=datetime.datetime(2023, 10, 2, 12, 32, 53, tzinfo=datetime.timezone.utc), 57 | private=False, 58 | downloads=8889377, 59 | (...) 60 | ``` 61 | 62 | 63 | 64 | Hub에서 사용 가능한 필터에 대해 살펴보려면 웹브라우저에서 [모델](https://huggingface.co/models) 및 [데이터 세트](https://huggingface.co/datasets) 페이지를 방문하여 일부 매개변수를 검색한 다음, URL에서 값들을 확인해보세요. 65 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/inference_client.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Inference 6 | 7 | Inference is the process of using a trained model to make predictions on new data. Because this process can be compute-intensive, running on a dedicated or external service can be an interesting option. 8 | The `huggingface_hub` library provides a unified interface to run inference across multiple services for models hosted on the Hugging Face Hub: 9 | 10 | 1. [Inference Providers](https://huggingface.co/docs/inference-providers/index): a streamlined, unified access to hundreds of machine learning models, powered by our serverless inference partners. This new approach builds on our previous Serverless Inference API, offering more models, improved performance, and greater reliability thanks to world-class providers. Refer to the [documentation](https://huggingface.co/docs/inference-providers/index#partners) for a list of supported providers. 11 | 2. [Inference Endpoints](https://huggingface.co/docs/inference-endpoints/index): a product to easily deploy models to production. Inference is run by Hugging Face in a dedicated, fully managed infrastructure on a cloud provider of your choice. 12 | 3. Local endpoints: you can also run inference with local inference servers like [llama.cpp](https://github.com/ggerganov/llama.cpp), [Ollama](https://ollama.com/), [vLLM](https://github.com/vllm-project/vllm), [LiteLLM](https://docs.litellm.ai/docs/simple_proxy), or [Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) by connecting the client to these local endpoints. 13 | 14 | These services can be called with the [`InferenceClient`] object. Please refer to [this guide](../guides/inference) 15 | for more information on how to use it. 16 | 17 | ## Inference Client 18 | 19 | [[autodoc]] InferenceClient 20 | 21 | ## Async Inference Client 22 | 23 | An async version of the client is also provided, based on `asyncio` and `httpx`. 24 | 25 | [[autodoc]] AsyncInferenceClient 26 | 27 | ## InferenceTimeoutError 28 | 29 | [[autodoc]] InferenceTimeoutError 30 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/cards.md: -------------------------------------------------------------------------------- 1 | # 리포지토리 카드[[repository-cards]] 2 | 3 | huggingface_hub 라이브러리는 모델/데이터 세트 카드를 생성, 공유 및 업데이트하기 위한 Python 인터페이스를 제공합니다. 4 | Hub의 모델 카드가 무엇이며 내부적으로 어떻게 작동하는지 더 깊이 있게 알아보려면 [전용 문서 페이지](https://huggingface.co/docs/hub/models-cards)를 방문하세요. 또한 이러한 유틸리티를 자신의 프로젝트에서 어떻게 사용할 수 있는지 감을 잡기 위해 [모델 카드 가이드](../how-to-model-cards)를 확인할 수 있습니다. 5 | 6 | ## 리포지토리 카드[[huggingface_hub.RepoCard]] 7 | 8 | `RepoCard` 객체는 [`ModelCard`], [`DatasetCard`] 및 `SpaceCard`의 상위 클래스입니다. 9 | 10 | [[autodoc]] huggingface_hub.repocard.RepoCard 11 | - __init__ 12 | - all 13 | 14 | ## 카드 데이터[[huggingface_hub.CardData]] 15 | 16 | [`CardData`] 객체는 [`ModelCardData`]와 [`DatasetCardData`]의 상위 클래스입니다. 17 | 18 | [[autodoc]] huggingface_hub.repocard_data.CardData 19 | 20 | ## 모델 카드[[model-cards]] 21 | 22 | ### ModelCard[[huggingface_hub.ModelCard]] 23 | 24 | [[autodoc]] ModelCard 25 | 26 | ### ModelCardData[[huggingface_hub.ModelCardData]] 27 | 28 | [[autodoc]] ModelCardData 29 | 30 | ## 데이터 세트 카드[[cards#dataset-cards]] 31 | 32 | ML 커뮤니티에서는 데이터 세트 카드를 데이터 카드라고도 합니다. 33 | 34 | ### DatasetCard[[huggingface_hub.DatasetCard]] 35 | 36 | [[autodoc]] DatasetCard 37 | 38 | ### DatasetCardData[[huggingface_hub.DatasetCardData]] 39 | 40 | [[autodoc]] DatasetCardData 41 | 42 | ## 공간 카드[[space-cards]] 43 | 44 | ### SpaceCard[[huggingface_hub.SpaceCardData]] 45 | 46 | [[autodoc]] SpaceCard 47 | 48 | ### SpaceCardData[[huggingface_hub.SpaceCardData]] 49 | 50 | [[autodoc]] SpaceCardData 51 | 52 | ## 유틸리티[[utilities]] 53 | 54 | ### EvalResult[[huggingface_hub.EvalResult]] 55 | 56 | [[autodoc]] EvalResult 57 | 58 | ### model_index_to_eval_results[[huggingface_hub.repocard_data.model_index_to_eval_results]] 59 | 60 | [[autodoc]] huggingface_hub.repocard_data.model_index_to_eval_results 61 | 62 | ### eval_results_to_model_index[[huggingface_hub.repocard_data.eval_results_to_model_index]] 63 | 64 | [[autodoc]] huggingface_hub.repocard_data.eval_results_to_model_index 65 | 66 | ### metadata_eval_result[[huggingface_hub.metadata_eval_result]] 67 | 68 | [[autodoc]] huggingface_hub.repocard.metadata_eval_result 69 | 70 | ### metadata_update[[huggingface_hub.metadata_update]] 71 | 72 | [[autodoc]] huggingface_hub.repocard.metadata_update -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/sambanova.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional, Union 2 | 3 | from huggingface_hub.hf_api import InferenceProviderMapping 4 | from huggingface_hub.inference._common import RequestParameters, _as_dict 5 | from huggingface_hub.inference._providers._common import BaseConversationalTask, TaskProviderHelper, filter_none 6 | 7 | 8 | class SambanovaConversationalTask(BaseConversationalTask): 9 | def __init__(self): 10 | super().__init__(provider="sambanova", base_url="https://api.sambanova.ai") 11 | 12 | def _prepare_payload_as_dict( 13 | self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping 14 | ) -> Optional[dict]: 15 | response_format_config = parameters.get("response_format") 16 | if isinstance(response_format_config, dict): 17 | if response_format_config.get("type") == "json_schema": 18 | json_schema_config = response_format_config.get("json_schema", {}) 19 | strict = json_schema_config.get("strict") 20 | if isinstance(json_schema_config, dict) and (strict is True or strict is None): 21 | json_schema_config["strict"] = False 22 | 23 | payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) 24 | return payload 25 | 26 | 27 | class SambanovaFeatureExtractionTask(TaskProviderHelper): 28 | def __init__(self): 29 | super().__init__(provider="sambanova", base_url="https://api.sambanova.ai", task="feature-extraction") 30 | 31 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 32 | return "/v1/embeddings" 33 | 34 | def _prepare_payload_as_dict( 35 | self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping 36 | ) -> Optional[dict]: 37 | parameters = filter_none(parameters) 38 | return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters} 39 | 40 | def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: 41 | embeddings = _as_dict(response)["data"] 42 | return [embedding["embedding"] for embedding in embeddings] 43 | -------------------------------------------------------------------------------- /src/huggingface_hub/utils/_dotenv.py: -------------------------------------------------------------------------------- 1 | # AI-generated module (ChatGPT) 2 | import re 3 | from typing import Optional 4 | 5 | 6 | def load_dotenv(dotenv_str: str, environ: Optional[dict[str, str]] = None) -> dict[str, str]: 7 | """ 8 | Parse a DOTENV-format string and return a dictionary of key-value pairs. 9 | Handles quoted values, comments, export keyword, and blank lines. 10 | """ 11 | env: dict[str, str] = {} 12 | line_pattern = re.compile( 13 | r""" 14 | ^\s* 15 | (?:export[^\S\n]+)? # optional export 16 | ([A-Za-z_][A-Za-z0-9_]*) # key 17 | [^\S\n]*(=)?[^\S\n]* 18 | ( # value group 19 | (?: 20 | '(?:\\'|[^'])*' # single-quoted value 21 | | \"(?:\\\"|[^\"])*\" # double-quoted value 22 | | [^#\n\r]+? # unquoted value 23 | ) 24 | )? 25 | [^\S\n]*(?:\#.*)?$ # optional inline comment 26 | """, 27 | re.VERBOSE, 28 | ) 29 | 30 | for line in dotenv_str.splitlines(): 31 | line = line.strip() 32 | if not line or line.startswith("#"): 33 | continue # Skip comments and empty lines 34 | 35 | match = line_pattern.match(line) 36 | if match: 37 | key = match.group(1) 38 | val = None 39 | if match.group(2): # if there is '=' 40 | raw_val = match.group(3) or "" 41 | val = raw_val.strip() 42 | # Remove surrounding quotes if quoted 43 | if (val.startswith('"') and val.endswith('"')) or (val.startswith("'") and val.endswith("'")): 44 | val = val[1:-1] 45 | val = val.replace(r"\n", "\n").replace(r"\t", "\t").replace(r"\"", '"').replace(r"\\", "\\") 46 | if raw_val.startswith('"'): 47 | val = val.replace(r"\$", "$") # only in double quotes 48 | elif environ is not None: 49 | # Get it from the current environment 50 | val = environ.get(key) 51 | 52 | if val is not None: 53 | env[key] = val 54 | 55 | return env 56 | -------------------------------------------------------------------------------- /utils/helpers.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2024-present, the HuggingFace Inc. team. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Contains helpers used by the scripts in `./utils`.""" 16 | 17 | import subprocess 18 | import tempfile 19 | from pathlib import Path 20 | 21 | from ruff.__main__ import find_ruff_bin 22 | 23 | 24 | def check_and_update_file_content(file: Path, expected_content: str, update: bool): 25 | # Ensure the expected content ends with a newline to satisfy end-of-file-fixer hook 26 | expected_content = expected_content.rstrip("\n") + "\n" 27 | content = file.read_text() if file.exists() else None 28 | if content != expected_content: 29 | if update: 30 | file.write_text(expected_content) 31 | print(f" {file} has been updated. Please make sure the changes are accurate and commit them.") 32 | else: 33 | print(f"❌ Expected content mismatch in {file}.") 34 | exit(1) 35 | 36 | 37 | def format_source_code(code: str) -> str: 38 | """Format the generated source code using Ruff.""" 39 | with tempfile.TemporaryDirectory() as tmpdir: 40 | filepath = Path(tmpdir) / "tmp.py" 41 | filepath.write_text(code) 42 | ruff_bin = find_ruff_bin() 43 | if not ruff_bin: 44 | raise FileNotFoundError("Ruff executable not found.") 45 | try: 46 | subprocess.run([ruff_bin, "check", str(filepath), "--fix", "--quiet"], check=True) 47 | subprocess.run([ruff_bin, "format", str(filepath), "--quiet"], check=True) 48 | except subprocess.CalledProcessError as e: 49 | raise RuntimeError(f"Error running Ruff: {e}") 50 | return filepath.read_text() 51 | -------------------------------------------------------------------------------- /tests/test_utils_validators.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from pathlib import Path 3 | from unittest.mock import Mock, patch 4 | 5 | from huggingface_hub.utils import ( 6 | HFValidationError, 7 | validate_hf_hub_args, 8 | validate_repo_id, 9 | ) 10 | 11 | 12 | @patch("huggingface_hub.utils._validators.validate_repo_id") 13 | class TestHfHubValidator(unittest.TestCase): 14 | """Test `validate_hf_hub_args` decorator calls all default validators.""" 15 | 16 | def test_validate_repo_id_as_arg(self, validate_repo_id_mock: Mock) -> None: 17 | """Test `validate_repo_id` is called when `repo_id` is passed as arg.""" 18 | self.dummy_function(123) 19 | validate_repo_id_mock.assert_called_once_with(123) 20 | 21 | def test_validate_repo_id_as_kwarg(self, validate_repo_id_mock: Mock) -> None: 22 | """Test `validate_repo_id` is called when `repo_id` is passed as kwarg.""" 23 | self.dummy_function(repo_id=123) 24 | validate_repo_id_mock.assert_called_once_with(123) 25 | 26 | @staticmethod 27 | @validate_hf_hub_args 28 | def dummy_function(repo_id: str) -> None: 29 | pass 30 | 31 | 32 | class TestRepoIdValidator(unittest.TestCase): 33 | VALID_VALUES = ( 34 | "123", 35 | "foo", 36 | "foo/bar", 37 | "Foo-BAR_foo.bar123", 38 | ) 39 | NOT_VALID_VALUES = ( 40 | Path("foo/bar"), # Must be a string 41 | "a" * 100, # Too long 42 | "datasets/foo/bar", # Repo_type forbidden in repo_id 43 | ".repo_id", # Cannot start with . 44 | "repo_id.", # Cannot end with . 45 | "foo--bar", # Cannot contain "--" 46 | "foo..bar", # Cannot contain "." 47 | "foo.git", # Cannot end with ".git" 48 | ) 49 | 50 | def test_valid_repo_ids(self) -> None: 51 | """Test `repo_id` validation on valid values.""" 52 | for repo_id in self.VALID_VALUES: 53 | validate_repo_id(repo_id) 54 | 55 | def test_not_valid_repo_ids(self) -> None: 56 | """Test `repo_id` validation on not valid values.""" 57 | for repo_id in self.NOT_VALID_VALUES: 58 | with self.assertRaises(HFValidationError, msg=f"'{repo_id}' must not be valid"): 59 | validate_repo_id(repo_id) 60 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/inference_endpoints.md: -------------------------------------------------------------------------------- 1 | # Inference Endpoints 2 | 3 | Inference Endpoints provides a secure production solution to easily deploy models on a dedicated and autoscaling infrastructure managed by Hugging Face. An Inference Endpoint is built from a model from the [Hub](https://huggingface.co/models). This page is a reference for `huggingface_hub`'s integration with Inference Endpoints. For more information about the Inference Endpoints product, check out its [official documentation](https://huggingface.co/docs/inference-endpoints/index). 4 | 5 | > [!TIP] 6 | > Check out the [related guide](../guides/inference_endpoints) to learn how to use `huggingface_hub` to manage your Inference Endpoints programmatically. 7 | 8 | Inference Endpoints can be fully managed via API. The endpoints are documented with [Swagger](https://api.endpoints.huggingface.cloud/). The [`InferenceEndpoint`] class is a simple wrapper built on top on this API. 9 | 10 | ## Methods 11 | 12 | A subset of the Inference Endpoint features are implemented in [`HfApi`]: 13 | 14 | - [`get_inference_endpoint`] and [`list_inference_endpoints`] to get information about your Inference Endpoints 15 | - [`create_inference_endpoint`], [`update_inference_endpoint`] and [`delete_inference_endpoint`] to deploy and manage Inference Endpoints 16 | - [`pause_inference_endpoint`] and [`resume_inference_endpoint`] to pause and resume an Inference Endpoint 17 | - [`scale_to_zero_inference_endpoint`] to manually scale an Endpoint to 0 replicas 18 | 19 | ## InferenceEndpoint 20 | 21 | The main dataclass is [`InferenceEndpoint`]. It contains information about a deployed `InferenceEndpoint`, including its configuration and current state. Once deployed, you can run inference on the Endpoint using the [`InferenceEndpoint.client`] and [`InferenceEndpoint.async_client`] properties that respectively return an [`InferenceClient`] and an [`AsyncInferenceClient`] object. 22 | 23 | [[autodoc]] InferenceEndpoint 24 | - from_raw 25 | - client 26 | - async_client 27 | - all 28 | 29 | ## InferenceEndpointStatus 30 | 31 | [[autodoc]] InferenceEndpointStatus 32 | 33 | ## InferenceEndpointType 34 | 35 | [[autodoc]] InferenceEndpointType 36 | 37 | ## InferenceEndpointError 38 | 39 | [[autodoc]] InferenceEndpointError 40 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/object_detection.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class ObjectDetectionParameters(BaseInferenceType): 13 | """Additional inference parameters for Object Detection""" 14 | 15 | threshold: Optional[float] = None 16 | """The probability necessary to make a prediction.""" 17 | 18 | 19 | @dataclass_with_extra 20 | class ObjectDetectionInput(BaseInferenceType): 21 | """Inputs for Object Detection inference""" 22 | 23 | inputs: str 24 | """The input image data as a base64-encoded string. If no `parameters` are provided, you can 25 | also provide the image data as a raw bytes payload. 26 | """ 27 | parameters: Optional[ObjectDetectionParameters] = None 28 | """Additional inference parameters for Object Detection""" 29 | 30 | 31 | @dataclass_with_extra 32 | class ObjectDetectionBoundingBox(BaseInferenceType): 33 | """The predicted bounding box. Coordinates are relative to the top left corner of the input 34 | image. 35 | """ 36 | 37 | xmax: int 38 | """The x-coordinate of the bottom-right corner of the bounding box.""" 39 | xmin: int 40 | """The x-coordinate of the top-left corner of the bounding box.""" 41 | ymax: int 42 | """The y-coordinate of the bottom-right corner of the bounding box.""" 43 | ymin: int 44 | """The y-coordinate of the top-left corner of the bounding box.""" 45 | 46 | 47 | @dataclass_with_extra 48 | class ObjectDetectionOutputElement(BaseInferenceType): 49 | """Outputs of inference for the Object Detection task""" 50 | 51 | box: ObjectDetectionBoundingBox 52 | """The predicted bounding box. Coordinates are relative to the top left corner of the input 53 | image. 54 | """ 55 | label: str 56 | """The predicted label for the bounding box.""" 57 | score: float 58 | """The associated score / probability.""" 59 | -------------------------------------------------------------------------------- /src/huggingface_hub/utils/_chunk_utils.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2022-present, the HuggingFace Inc. team. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Contains a utility to iterate by chunks over an iterator.""" 16 | 17 | import itertools 18 | from typing import Iterable, TypeVar 19 | 20 | 21 | T = TypeVar("T") 22 | 23 | 24 | def chunk_iterable(iterable: Iterable[T], chunk_size: int) -> Iterable[Iterable[T]]: 25 | """Iterates over an iterator chunk by chunk. 26 | 27 | Taken from https://stackoverflow.com/a/8998040. 28 | See also https://github.com/huggingface/huggingface_hub/pull/920#discussion_r938793088. 29 | 30 | Args: 31 | iterable (`Iterable`): 32 | The iterable on which we want to iterate. 33 | chunk_size (`int`): 34 | Size of the chunks. Must be a strictly positive integer (e.g. >0). 35 | 36 | Example: 37 | 38 | ```python 39 | >>> from huggingface_hub.utils import chunk_iterable 40 | 41 | >>> for items in chunk_iterable(range(17), chunk_size=8): 42 | ... print(items) 43 | # [0, 1, 2, 3, 4, 5, 6, 7] 44 | # [8, 9, 10, 11, 12, 13, 14, 15] 45 | # [16] # smaller last chunk 46 | ``` 47 | 48 | Raises: 49 | [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) 50 | If `chunk_size` <= 0. 51 | 52 | > [!WARNING] 53 | > The last chunk can be smaller than `chunk_size`. 54 | """ 55 | if not isinstance(chunk_size, int) or chunk_size <= 0: 56 | raise ValueError("`chunk_size` must be a strictly positive integer (>0).") 57 | 58 | iterator = iter(iterable) 59 | while True: 60 | try: 61 | next_item = next(iterator) 62 | except StopIteration: 63 | return 64 | yield itertools.chain((next_item,), itertools.islice(iterator, chunk_size - 1)) 65 | -------------------------------------------------------------------------------- /src/huggingface_hub/utils/sha.py: -------------------------------------------------------------------------------- 1 | """Utilities to efficiently compute the SHA 256 hash of a bunch of bytes.""" 2 | 3 | from typing import BinaryIO, Optional 4 | 5 | from .insecure_hashlib import sha1, sha256 6 | 7 | 8 | def sha_fileobj(fileobj: BinaryIO, chunk_size: Optional[int] = None) -> bytes: 9 | """ 10 | Computes the sha256 hash of the given file object, by chunks of size `chunk_size`. 11 | 12 | Args: 13 | fileobj (file-like object): 14 | The File object to compute sha256 for, typically obtained with `open(path, "rb")` 15 | chunk_size (`int`, *optional*): 16 | The number of bytes to read from `fileobj` at once, defaults to 1MB. 17 | 18 | Returns: 19 | `bytes`: `fileobj`'s sha256 hash as bytes 20 | """ 21 | chunk_size = chunk_size if chunk_size is not None else 1024 * 1024 22 | 23 | sha = sha256() 24 | while True: 25 | chunk = fileobj.read(chunk_size) 26 | sha.update(chunk) 27 | if not chunk: 28 | break 29 | return sha.digest() 30 | 31 | 32 | def git_hash(data: bytes) -> str: 33 | """ 34 | Computes the git-sha1 hash of the given bytes, using the same algorithm as git. 35 | 36 | This is equivalent to running `git hash-object`. See https://git-scm.com/docs/git-hash-object 37 | for more details. 38 | 39 | Note: this method is valid for regular files. For LFS files, the proper git hash is supposed to be computed on the 40 | pointer file content, not the actual file content. However, for simplicity, we directly compare the sha256 of 41 | the LFS file content when we want to compare LFS files. 42 | 43 | Args: 44 | data (`bytes`): 45 | The data to compute the git-hash for. 46 | 47 | Returns: 48 | `str`: the git-hash of `data` as an hexadecimal string. 49 | 50 | Example: 51 | ```python 52 | >>> from huggingface_hub.utils.sha import git_hash 53 | >>> git_hash(b"Hello, World!") 54 | 'b45ef6fec89518d314f546fd6c3025367b721684' 55 | ``` 56 | """ 57 | # Taken from https://gist.github.com/msabramo/763200 58 | # Note: no need to optimize by reading the file in chunks as we're not supposed to hash huge files (5MB maximum). 59 | sha = sha1() 60 | sha.update(b"blob ") 61 | sha.update(str(len(data)).encode()) 62 | sha.update(b"\0") 63 | sha.update(data) 64 | return sha.hexdigest() 65 | -------------------------------------------------------------------------------- /utils/hf/README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | 4 | 5 | huggingface_hub library logo 6 | 7 |
8 |
9 |

10 | 11 |

12 | The official Python client for the Huggingface Hub. 13 |

14 | 15 |

16 | Documentation 17 | GitHub release 18 | PyPi version 19 | PyPI - Downloads 20 | Code coverage 21 |

22 | 23 | --- 24 | 25 | **CLI Documentation**: https://huggingface.co/docs/huggingface_hub/guides/cli 26 | 27 | **Source Code**: https://github.com/huggingface/huggingface_hub 28 | 29 | > [!TIP] 30 | > This package provides a clean CLI interface via `uvx hf`. It is **not meant to be used as a package in scripts**. Use `huggingface_hub` instead. 31 | 32 | ## Usage 33 | 34 | Install and use the CLI with `uv`: 35 | 36 | ```bash 37 | uvx hf version 38 | uvx hf auth whoami 39 | uvx hf download MiniMaxAI/MiniMax-M2 40 | ``` 41 | 42 | ## Note 43 | 44 | The legacy `hf` package (which provided a Mapping interface to HuggingFace) has been moved to [hfdol](https://pypi.org/project/hfdol/). -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/image_to_video.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class ImageToVideoTargetSize(BaseInferenceType): 13 | """The size in pixel of the output video frames.""" 14 | 15 | height: int 16 | width: int 17 | 18 | 19 | @dataclass_with_extra 20 | class ImageToVideoParameters(BaseInferenceType): 21 | """Additional inference parameters for Image To Video""" 22 | 23 | guidance_scale: Optional[float] = None 24 | """For diffusion models. A higher guidance scale value encourages the model to generate 25 | videos closely linked to the text prompt at the expense of lower image quality. 26 | """ 27 | negative_prompt: Optional[str] = None 28 | """One prompt to guide what NOT to include in video generation.""" 29 | num_frames: Optional[float] = None 30 | """The num_frames parameter determines how many video frames are generated.""" 31 | num_inference_steps: Optional[int] = None 32 | """The number of denoising steps. More denoising steps usually lead to a higher quality 33 | video at the expense of slower inference. 34 | """ 35 | prompt: Optional[str] = None 36 | """The text prompt to guide the video generation.""" 37 | seed: Optional[int] = None 38 | """Seed for the random number generator.""" 39 | target_size: Optional[ImageToVideoTargetSize] = None 40 | """The size in pixel of the output video frames.""" 41 | 42 | 43 | @dataclass_with_extra 44 | class ImageToVideoInput(BaseInferenceType): 45 | """Inputs for Image To Video inference""" 46 | 47 | inputs: str 48 | """The input image data as a base64-encoded string. If no `parameters` are provided, you can 49 | also provide the image data as a raw bytes payload. 50 | """ 51 | parameters: Optional[ImageToVideoParameters] = None 52 | """Additional inference parameters for Image To Video""" 53 | 54 | 55 | @dataclass_with_extra 56 | class ImageToVideoOutput(BaseInferenceType): 57 | """Outputs of inference for the Image To Video task""" 58 | 59 | video: Any 60 | """The generated video returned as raw bytes in the payload.""" 61 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/image_to_image.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Any, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class ImageToImageTargetSize(BaseInferenceType): 13 | """The size in pixels of the output image. This parameter is only supported by some 14 | providers and for specific models. It will be ignored when unsupported. 15 | """ 16 | 17 | height: int 18 | width: int 19 | 20 | 21 | @dataclass_with_extra 22 | class ImageToImageParameters(BaseInferenceType): 23 | """Additional inference parameters for Image To Image""" 24 | 25 | guidance_scale: Optional[float] = None 26 | """For diffusion models. A higher guidance scale value encourages the model to generate 27 | images closely linked to the text prompt at the expense of lower image quality. 28 | """ 29 | negative_prompt: Optional[str] = None 30 | """One prompt to guide what NOT to include in image generation.""" 31 | num_inference_steps: Optional[int] = None 32 | """For diffusion models. The number of denoising steps. More denoising steps usually lead to 33 | a higher quality image at the expense of slower inference. 34 | """ 35 | prompt: Optional[str] = None 36 | """The text prompt to guide the image generation.""" 37 | target_size: Optional[ImageToImageTargetSize] = None 38 | """The size in pixels of the output image. This parameter is only supported by some 39 | providers and for specific models. It will be ignored when unsupported. 40 | """ 41 | 42 | 43 | @dataclass_with_extra 44 | class ImageToImageInput(BaseInferenceType): 45 | """Inputs for Image To Image inference""" 46 | 47 | inputs: str 48 | """The input image data as a base64-encoded string. If no `parameters` are provided, you can 49 | also provide the image data as a raw bytes payload. 50 | """ 51 | parameters: Optional[ImageToImageParameters] = None 52 | """Additional inference parameters for Image To Image""" 53 | 54 | 55 | @dataclass_with_extra 56 | class ImageToImageOutput(BaseInferenceType): 57 | """Outputs of inference for the Image To Image task""" 58 | 59 | image: Any 60 | """The output image returned as raw bytes in the payload.""" 61 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_generated/types/table_question_answering.py: -------------------------------------------------------------------------------- 1 | # Inference code generated from the JSON schema spec in @huggingface/tasks. 2 | # 3 | # See: 4 | # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts 5 | # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. 6 | from typing import Literal, Optional 7 | 8 | from .base import BaseInferenceType, dataclass_with_extra 9 | 10 | 11 | @dataclass_with_extra 12 | class TableQuestionAnsweringInputData(BaseInferenceType): 13 | """One (table, question) pair to answer""" 14 | 15 | question: str 16 | """The question to be answered about the table""" 17 | table: dict[str, list[str]] 18 | """The table to serve as context for the questions""" 19 | 20 | 21 | Padding = Literal["do_not_pad", "longest", "max_length"] 22 | 23 | 24 | @dataclass_with_extra 25 | class TableQuestionAnsweringParameters(BaseInferenceType): 26 | """Additional inference parameters for Table Question Answering""" 27 | 28 | padding: Optional["Padding"] = None 29 | """Activates and controls padding.""" 30 | sequential: Optional[bool] = None 31 | """Whether to do inference sequentially or as a batch. Batching is faster, but models like 32 | SQA require the inference to be done sequentially to extract relations within sequences, 33 | given their conversational nature. 34 | """ 35 | truncation: Optional[bool] = None 36 | """Activates and controls truncation.""" 37 | 38 | 39 | @dataclass_with_extra 40 | class TableQuestionAnsweringInput(BaseInferenceType): 41 | """Inputs for Table Question Answering inference""" 42 | 43 | inputs: TableQuestionAnsweringInputData 44 | """One (table, question) pair to answer""" 45 | parameters: Optional[TableQuestionAnsweringParameters] = None 46 | """Additional inference parameters for Table Question Answering""" 47 | 48 | 49 | @dataclass_with_extra 50 | class TableQuestionAnsweringOutputElement(BaseInferenceType): 51 | """Outputs of inference for the Table Question Answering task""" 52 | 53 | answer: str 54 | """The answer of the question given the table. If there is an aggregator, the answer will be 55 | preceded by `AGGREGATOR >`. 56 | """ 57 | cells: list[str] 58 | """list of strings made up of the answer cell values.""" 59 | coordinates: list[list[int]] 60 | """Coordinates of the cells of the answers.""" 61 | aggregator: Optional[str] = None 62 | """If the model has an aggregator, this returns the aggregator.""" 63 | -------------------------------------------------------------------------------- /docs/source/ko/_toctree.yml: -------------------------------------------------------------------------------- 1 | - title: "시작하기" 2 | sections: 3 | - local: index 4 | title: 홈 5 | - local: quick-start 6 | title: 둘러보기 7 | - local: installation 8 | title: 설치 방법 9 | - title: "How-to 가이드" 10 | sections: 11 | - local: guides/overview 12 | title: 개요 13 | - local: guides/download 14 | title: 파일 다운로드하기 15 | - local: guides/upload 16 | title: 파일 업로드하기 17 | - local: guides/cli 18 | title: 명령줄 인터페이스(CLI) 사용하기 19 | - local: guides/hf_file_system 20 | title: Hf파일시스템 21 | - local: guides/search 22 | title: Hub에서 검색하기 23 | - local: guides/inference 24 | title: 추론 25 | - local: guides/inference_endpoints 26 | title: 추론 엔드포인트 27 | - local: guides/community 28 | title: 커뮤니티 29 | - local: guides/collections 30 | title: Collections 31 | - local: guides/manage-cache 32 | title: 캐시 관리 33 | - local: guides/model-cards 34 | title: 모델 카드 35 | - local: guides/manage-spaces 36 | title: Space 관리 37 | - local: guides/integrations 38 | title: 라이브러리 통합 39 | - local: guides/webhooks_server 40 | title: 웹훅 서버 41 | - title: "라이브러리 레퍼런스" 42 | sections: 43 | - local: package_reference/overview 44 | title: 개요 45 | - local: package_reference/login 46 | title: 로그인 및 로그아웃 47 | - local: package_reference/environment_variables 48 | title: 환경 변수 49 | - local: package_reference/hf_api 50 | title: 허깅페이스 Hub API 51 | - local: package_reference/file_download 52 | title: 파일 다운로드하기 53 | - local: package_reference/mixins 54 | title: 믹스인 & 직렬화 메소드 55 | - local: package_reference/inference_types 56 | title: 추론 타입 57 | - local: package_reference/inference_client 58 | title: 추론 클라이언트 59 | - local: package_reference/inference_endpoints 60 | title: 추론 엔드포인트 61 | - local: package_reference/hf_file_system 62 | title: Hf파일시스템 63 | - local: package_reference/utilities 64 | title: 유틸리티 65 | - local: package_reference/community 66 | title: Discussions 및 Pull Requests 67 | - local: package_reference/cache 68 | title: 캐시 시스템 참조 69 | - local: package_reference/cards 70 | title: Repo Cards 와 Repo Card Data 71 | - local: package_reference/collections 72 | title: 컬렉션 관리 73 | - local: package_reference/space_runtime 74 | title: Space 런타임 75 | - local: package_reference/tensorboard 76 | title: TensorBoard 로거 77 | - local: package_reference/webhooks_server 78 | title: 웹훅 서버 79 | - local: package_reference/serialization 80 | title: 직렬화 -------------------------------------------------------------------------------- /src/huggingface_hub/utils/endpoint_helpers.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | """ 13 | Helpful utility functions and classes in relation to exploring API endpoints 14 | with the aim for a user-friendly interface. 15 | """ 16 | 17 | import math 18 | import re 19 | from typing import TYPE_CHECKING 20 | 21 | from ..repocard_data import ModelCardData 22 | 23 | 24 | if TYPE_CHECKING: 25 | from ..hf_api import ModelInfo 26 | 27 | 28 | def _is_emission_within_threshold(model_info: "ModelInfo", minimum_threshold: float, maximum_threshold: float) -> bool: 29 | """Checks if a model's emission is within a given threshold. 30 | 31 | Args: 32 | model_info (`ModelInfo`): 33 | A model info object containing the model's emission information. 34 | minimum_threshold (`float`): 35 | A minimum carbon threshold to filter by, such as 1. 36 | maximum_threshold (`float`): 37 | A maximum carbon threshold to filter by, such as 10. 38 | 39 | Returns: 40 | `bool`: Whether the model's emission is within the given threshold. 41 | """ 42 | if minimum_threshold is None and maximum_threshold is None: 43 | raise ValueError("Both `minimum_threshold` and `maximum_threshold` cannot both be `None`") 44 | if minimum_threshold is None: 45 | minimum_threshold = -1 46 | if maximum_threshold is None: 47 | maximum_threshold = math.inf 48 | 49 | card_data = getattr(model_info, "card_data", None) 50 | if card_data is None or not isinstance(card_data, (dict, ModelCardData)): 51 | return False 52 | 53 | # Get CO2 emission metadata 54 | emission = card_data.get("co2_eq_emissions", None) 55 | if isinstance(emission, dict): 56 | emission = emission["emissions"] 57 | if not emission: 58 | return False 59 | 60 | # Filter out if value is missing or out of range 61 | matched = re.search(r"\d+\.\d+|\d+", str(emission)) 62 | if matched is None: 63 | return False 64 | 65 | emission_value = float(matched.group(0)) 66 | return minimum_threshold <= emission_value <= maximum_threshold 67 | -------------------------------------------------------------------------------- /src/huggingface_hub/utils/_terminal.py: -------------------------------------------------------------------------------- 1 | # Copyright 2025 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """Contains utilities to print stuff to the terminal (styling, helpers).""" 15 | 16 | import os 17 | from typing import Union 18 | 19 | 20 | class ANSI: 21 | """ 22 | Helper for en.wikipedia.org/wiki/ANSI_escape_code 23 | """ 24 | 25 | _blue = "\u001b[34m" 26 | _bold = "\u001b[1m" 27 | _gray = "\u001b[90m" 28 | _green = "\u001b[32m" 29 | _red = "\u001b[31m" 30 | _reset = "\u001b[0m" 31 | _yellow = "\u001b[33m" 32 | 33 | @classmethod 34 | def blue(cls, s: str) -> str: 35 | return cls._format(s, cls._blue) 36 | 37 | @classmethod 38 | def bold(cls, s: str) -> str: 39 | return cls._format(s, cls._bold) 40 | 41 | @classmethod 42 | def gray(cls, s: str) -> str: 43 | return cls._format(s, cls._gray) 44 | 45 | @classmethod 46 | def green(cls, s: str) -> str: 47 | return cls._format(s, cls._green) 48 | 49 | @classmethod 50 | def red(cls, s: str) -> str: 51 | return cls._format(s, cls._bold + cls._red) 52 | 53 | @classmethod 54 | def yellow(cls, s: str) -> str: 55 | return cls._format(s, cls._yellow) 56 | 57 | @classmethod 58 | def _format(cls, s: str, code: str) -> str: 59 | if os.environ.get("NO_COLOR"): 60 | # See https://no-color.org/ 61 | return s 62 | return f"{code}{s}{cls._reset}" 63 | 64 | 65 | def tabulate(rows: list[list[Union[str, int]]], headers: list[str]) -> str: 66 | """ 67 | Inspired by: 68 | 69 | - stackoverflow.com/a/8356620/593036 70 | - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data 71 | """ 72 | col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)] 73 | row_format = ("{{:{}}} " * len(headers)).format(*col_widths) 74 | lines = [] 75 | lines.append(row_format.format(*headers)) 76 | lines.append(row_format.format(*["-" * w for w in col_widths])) 77 | for row in rows: 78 | lines.append(row_format.format(*row)) 79 | return "\n".join(lines) 80 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | wheels/ 22 | pip-wheel-metadata/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | db.sqlite3-journal 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # IPython 80 | profile_default/ 81 | ipython_config.py 82 | 83 | # pyenv 84 | .python-version 85 | 86 | # pipenv 87 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 88 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 89 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 90 | # install all needed dependencies. 91 | #Pipfile.lock 92 | 93 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 94 | __pypackages__/ 95 | 96 | # Celery stuff 97 | celerybeat-schedule 98 | celerybeat.pid 99 | 100 | # SageMath parsed files 101 | *.sage.py 102 | 103 | # Environments 104 | .env 105 | .venv 106 | .venv* 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | .venv* 113 | 114 | # Spyder project settings 115 | .spyderproject 116 | .spyproject 117 | 118 | # Rope project settings 119 | .ropeproject 120 | 121 | # mkdocs documentation 122 | /site 123 | 124 | # mypy 125 | .mypy_cache/ 126 | .dmypy.json 127 | dmypy.json 128 | 129 | # Pyre type checker 130 | .pyre/ 131 | .vscode/ 132 | .idea/ 133 | 134 | .DS_Store 135 | 136 | # Ruff 137 | .ruff_cache 138 | 139 | # Spell checker config 140 | cspell.json 141 | 142 | tmp* 143 | 144 | # Claude Code 145 | CLAUDE.md -------------------------------------------------------------------------------- /src/huggingface_hub/cli/hf.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from huggingface_hub import constants 17 | from huggingface_hub.cli._cli_utils import check_cli_update, typer_factory 18 | from huggingface_hub.cli.auth import auth_cli 19 | from huggingface_hub.cli.cache import cache_cli 20 | from huggingface_hub.cli.download import download 21 | from huggingface_hub.cli.inference_endpoints import ie_cli 22 | from huggingface_hub.cli.jobs import jobs_cli 23 | from huggingface_hub.cli.lfs import lfs_enable_largefiles, lfs_multipart_upload 24 | from huggingface_hub.cli.repo import repo_cli 25 | from huggingface_hub.cli.repo_files import repo_files_cli 26 | from huggingface_hub.cli.system import env, version 27 | from huggingface_hub.cli.upload import upload 28 | from huggingface_hub.cli.upload_large_folder import upload_large_folder 29 | from huggingface_hub.utils import logging 30 | 31 | 32 | app = typer_factory(help="Hugging Face Hub CLI") 33 | 34 | 35 | # top level single commands (defined in their respective files) 36 | app.command(help="Download files from the Hub.")(download) 37 | app.command(help="Upload a file or a folder to the Hub.")(upload) 38 | app.command(help="Upload a large folder to the Hub. Recommended for resumable uploads.")(upload_large_folder) 39 | app.command(name="env", help="Print information about the environment.")(env) 40 | app.command(help="Print information about the hf version.")(version) 41 | app.command(help="Configure your repository to enable upload of files > 5GB.", hidden=True)(lfs_enable_largefiles) 42 | app.command(help="Upload large files to the Hub.", hidden=True)(lfs_multipart_upload) 43 | 44 | 45 | # command groups 46 | app.add_typer(auth_cli, name="auth") 47 | app.add_typer(cache_cli, name="cache") 48 | app.add_typer(repo_cli, name="repo") 49 | app.add_typer(repo_files_cli, name="repo-files") 50 | app.add_typer(jobs_cli, name="jobs") 51 | app.add_typer(ie_cli, name="endpoints") 52 | 53 | 54 | def main(): 55 | if not constants.HF_DEBUG: 56 | logging.set_verbosity_info() 57 | check_cli_update() 58 | app() 59 | 60 | 61 | if __name__ == "__main__": 62 | main() 63 | -------------------------------------------------------------------------------- /tests/test_utils_terminal.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from unittest import mock 4 | 5 | from huggingface_hub.utils._terminal import ANSI, tabulate 6 | 7 | 8 | class TestCLIUtils(unittest.TestCase): 9 | @mock.patch.dict(os.environ, {}, clear=True) 10 | def test_ansi_utils(self) -> None: 11 | """Test `ANSI` works as expected.""" 12 | self.assertEqual( 13 | ANSI.bold("this is bold"), 14 | "\x1b[1mthis is bold\x1b[0m", 15 | ) 16 | 17 | self.assertEqual( 18 | ANSI.gray("this is gray"), 19 | "\x1b[90mthis is gray\x1b[0m", 20 | ) 21 | 22 | self.assertEqual( 23 | ANSI.red("this is red"), 24 | "\x1b[1m\x1b[31mthis is red\x1b[0m", 25 | ) 26 | 27 | self.assertEqual( 28 | ANSI.gray(ANSI.bold("this is bold and grey")), 29 | "\x1b[90m\x1b[1mthis is bold and grey\x1b[0m\x1b[0m", 30 | ) 31 | 32 | @mock.patch.dict(os.environ, {"NO_COLOR": "1"}, clear=True) 33 | def test_ansi_no_color(self) -> None: 34 | """Test `ANSI` respects `NO_COLOR` env var.""" 35 | 36 | self.assertEqual( 37 | ANSI.bold("this is bold"), 38 | "this is bold", 39 | ) 40 | 41 | self.assertEqual( 42 | ANSI.gray("this is gray"), 43 | "this is gray", 44 | ) 45 | 46 | self.assertEqual( 47 | ANSI.red("this is red"), 48 | "this is red", 49 | ) 50 | 51 | self.assertEqual( 52 | ANSI.gray(ANSI.bold("this is bold and grey")), 53 | "this is bold and grey", 54 | ) 55 | 56 | def test_tabulate_utility(self) -> None: 57 | """Test `tabulate` works as expected.""" 58 | rows = [[1, 2, 3], ["a very long value", "foo", "bar"], ["", 123, 456]] 59 | headers = ["Header 1", "something else", "a third column"] 60 | self.assertEqual( 61 | tabulate(rows=rows, headers=headers), 62 | "Header 1 something else a third column \n" 63 | "----------------- -------------- -------------- \n" 64 | " 1 2 3 \n" 65 | "a very long value foo bar \n" 66 | " 123 456 ", 67 | ) 68 | 69 | def test_tabulate_utility_with_too_short_row(self) -> None: 70 | """ 71 | Test `tabulate` throw IndexError when a row has less values than the header 72 | list. 73 | """ 74 | self.assertRaises( 75 | IndexError, 76 | tabulate, 77 | rows=[[1]], 78 | headers=["Header 1", "Header 2"], 79 | ) 80 | -------------------------------------------------------------------------------- /.github/workflows/check-installers.yml: -------------------------------------------------------------------------------- 1 | name: Check CLI installers 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "utils/installers/**" 9 | - ".github/workflows/check-installers.yml" 10 | pull_request: 11 | paths: 12 | - "utils/installers/**" 13 | - ".github/workflows/check-installers.yml" 14 | workflow_dispatch: {} 15 | 16 | permissions: 17 | contents: read 18 | 19 | jobs: 20 | linux-installer: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Checkout repository 24 | uses: actions/checkout@v4 25 | 26 | - name: Run installer 27 | shell: bash 28 | run: | 29 | set -euo pipefail 30 | 31 | HF_TEST_ROOT=$(mktemp -d) 32 | INSTALL_DIR="$HF_TEST_ROOT/install" 33 | BIN_DIR="$HF_TEST_ROOT/bin" 34 | 35 | HF_HOME="$INSTALL_DIR" HF_CLI_BIN_DIR="$BIN_DIR" utils/installers/install.sh --no-modify-path 36 | 37 | export PATH="$BIN_DIR:$PATH" 38 | 39 | if ! hf version | tee /dev/stderr | grep -Eq '[0-9]+(\.[0-9]+){1,2}(\.[a-zA-Z]+[0-9]+)?'; then 40 | echo "incorrect hf version output" >&2 41 | exit 1 42 | fi 43 | 44 | NO_COLOR=1 hf --help 45 | 46 | rm -rf "$HF_TEST_ROOT" 47 | 48 | windows-installer: 49 | runs-on: windows-latest 50 | steps: 51 | - name: Checkout repository 52 | uses: actions/checkout@v4 53 | 54 | - name: Run installer 55 | shell: pwsh 56 | run: | 57 | $hfTestRoot = Join-Path $env:TEMP ([System.Guid]::NewGuid().ToString()) 58 | $installDir = Join-Path $hfTestRoot 'install' 59 | $binDir = Join-Path $hfTestRoot 'bin' 60 | New-Item -ItemType Directory -Path $installDir -Force | Out-Null 61 | New-Item -ItemType Directory -Path $binDir -Force | Out-Null 62 | 63 | $env:HF_HOME = $installDir 64 | $env:HF_CLI_BIN_DIR = $binDir 65 | & "$PWD/utils/installers/install.ps1" -NoModifyPath 66 | 67 | $env:PATH = "$binDir;$env:PATH" 68 | 69 | $hfVersionOutput = & hf.exe version 2>&1 70 | $hfVersionOutput | Out-Host # Show the output 71 | 72 | if ($LASTEXITCODE -ne 0) { 73 | throw 'hf version failed' 74 | } 75 | 76 | if (-not ($hfVersionOutput -match '[0-9]+(\.[0-9]+){1,2}(\.[a-zA-Z]+[0-9]+)?')) { 77 | throw 'incorrect hf version output' 78 | } 79 | 80 | $env:NO_COLOR = '1' 81 | & hf.exe --help 82 | if ($LASTEXITCODE -ne 0) { 83 | throw 'hf --help failed' 84 | } 85 | Remove-Item Env:NO_COLOR 86 | 87 | Remove-Item -Path $hfTestRoot -Recurse -Force 88 | -------------------------------------------------------------------------------- /src/huggingface_hub/utils/_experimental.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2023-present, the HuggingFace Inc. team. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Contains utilities to flag a feature as "experimental" in Huggingface Hub.""" 16 | 17 | import warnings 18 | from functools import wraps 19 | from typing import Callable 20 | 21 | from .. import constants 22 | 23 | 24 | def experimental(fn: Callable) -> Callable: 25 | """Decorator to flag a feature as experimental. 26 | 27 | An experimental feature triggers a warning when used as it might be subject to breaking changes without prior notice 28 | in the future. 29 | 30 | Warnings can be disabled by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment variable. 31 | 32 | Args: 33 | fn (`Callable`): 34 | The function to flag as experimental. 35 | 36 | Returns: 37 | `Callable`: The decorated function. 38 | 39 | Example: 40 | 41 | ```python 42 | >>> from huggingface_hub.utils import experimental 43 | 44 | >>> @experimental 45 | ... def my_function(): 46 | ... print("Hello world!") 47 | 48 | >>> my_function() 49 | UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future without prior 50 | notice. You can disable this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment variable. 51 | Hello world! 52 | ``` 53 | """ 54 | # For classes, put the "experimental" around the "__new__" method => __new__ will be removed in warning message 55 | name = fn.__qualname__[: -len(".__new__")] if fn.__qualname__.endswith(".__new__") else fn.__qualname__ 56 | 57 | @wraps(fn) 58 | def _inner_fn(*args, **kwargs): 59 | if not constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING: 60 | warnings.warn( 61 | f"'{name}' is experimental and might be subject to breaking changes in the future without prior notice." 62 | " You can disable this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment" 63 | " variable.", 64 | UserWarning, 65 | ) 66 | return fn(*args, **kwargs) 67 | 68 | return _inner_fn 69 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_providers/novita.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional, Union 2 | 3 | from huggingface_hub.hf_api import InferenceProviderMapping 4 | from huggingface_hub.inference._common import RequestParameters, _as_dict 5 | from huggingface_hub.inference._providers._common import ( 6 | BaseConversationalTask, 7 | BaseTextGenerationTask, 8 | TaskProviderHelper, 9 | filter_none, 10 | ) 11 | from huggingface_hub.utils import get_session 12 | 13 | 14 | _PROVIDER = "novita" 15 | _BASE_URL = "https://api.novita.ai" 16 | 17 | 18 | class NovitaTextGenerationTask(BaseTextGenerationTask): 19 | def __init__(self): 20 | super().__init__(provider=_PROVIDER, base_url=_BASE_URL) 21 | 22 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 23 | # there is no v1/ route for novita 24 | return "/v3/openai/completions" 25 | 26 | def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: 27 | output = _as_dict(response)["choices"][0] 28 | return { 29 | "generated_text": output["text"], 30 | "details": { 31 | "finish_reason": output.get("finish_reason"), 32 | "seed": output.get("seed"), 33 | }, 34 | } 35 | 36 | 37 | class NovitaConversationalTask(BaseConversationalTask): 38 | def __init__(self): 39 | super().__init__(provider=_PROVIDER, base_url=_BASE_URL) 40 | 41 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 42 | # there is no v1/ route for novita 43 | return "/v3/openai/chat/completions" 44 | 45 | 46 | class NovitaTextToVideoTask(TaskProviderHelper): 47 | def __init__(self): 48 | super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task="text-to-video") 49 | 50 | def _prepare_route(self, mapped_model: str, api_key: str) -> str: 51 | return f"/v3/hf/{mapped_model}" 52 | 53 | def _prepare_payload_as_dict( 54 | self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping 55 | ) -> Optional[dict]: 56 | return {"prompt": inputs, **filter_none(parameters)} 57 | 58 | def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: 59 | response_dict = _as_dict(response) 60 | if not ( 61 | isinstance(response_dict, dict) 62 | and "video" in response_dict 63 | and isinstance(response_dict["video"], dict) 64 | and "video_url" in response_dict["video"] 65 | ): 66 | raise ValueError("Expected response format: { 'video': { 'video_url': string } }") 67 | 68 | video_url = response_dict["video"]["video_url"] 69 | return get_session().get(video_url).content 70 | -------------------------------------------------------------------------------- /src/huggingface_hub/inference/_mcp/constants.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import sys 4 | from pathlib import Path 5 | 6 | from huggingface_hub import ChatCompletionInputTool 7 | 8 | 9 | FILENAME_CONFIG = "agent.json" 10 | PROMPT_FILENAMES = ("PROMPT.md", "AGENTS.md") 11 | 12 | DEFAULT_AGENT = { 13 | "model": "Qwen/Qwen2.5-72B-Instruct", 14 | "provider": "nebius", 15 | "servers": [ 16 | { 17 | "type": "stdio", 18 | "command": "npx", 19 | "args": [ 20 | "-y", 21 | "@modelcontextprotocol/server-filesystem", 22 | str(Path.home() / ("Desktop" if sys.platform == "darwin" else "")), 23 | ], 24 | }, 25 | { 26 | "type": "stdio", 27 | "command": "npx", 28 | "args": ["@playwright/mcp@latest"], 29 | }, 30 | ], 31 | } 32 | 33 | 34 | DEFAULT_SYSTEM_PROMPT = """ 35 | You are an agent - please keep going until the user’s query is completely 36 | resolved, before ending your turn and yielding back to the user. Only terminate 37 | your turn when you are sure that the problem is solved, or if you need more 38 | info from the user to solve the problem. 39 | If you are not sure about anything pertaining to the user’s request, use your 40 | tools to read files and gather the relevant information: do NOT guess or make 41 | up an answer. 42 | You MUST plan extensively before each function call, and reflect extensively 43 | on the outcomes of the previous function calls. DO NOT do this entire process 44 | by making function calls only, as this can impair your ability to solve the 45 | problem and think insightfully. 46 | """.strip() 47 | 48 | MAX_NUM_TURNS = 10 49 | 50 | TASK_COMPLETE_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment] 51 | { 52 | "type": "function", 53 | "function": { 54 | "name": "task_complete", 55 | "description": "Call this tool when the task given by the user is complete", 56 | "parameters": { 57 | "type": "object", 58 | "properties": {}, 59 | }, 60 | }, 61 | } 62 | ) 63 | 64 | ASK_QUESTION_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment] 65 | { 66 | "type": "function", 67 | "function": { 68 | "name": "ask_question", 69 | "description": "Ask the user for more info required to solve or clarify their problem.", 70 | "parameters": { 71 | "type": "object", 72 | "properties": {}, 73 | }, 74 | }, 75 | } 76 | ) 77 | 78 | EXIT_LOOP_TOOLS: list[ChatCompletionInputTool] = [TASK_COMPLETE_TOOL, ASK_QUESTION_TOOL] 79 | 80 | 81 | DEFAULT_REPO_ID = "tiny-agents/tiny-agents" 82 | -------------------------------------------------------------------------------- /docs/source/de/guides/search.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Den Hub durchsuchen 6 | 7 | In diesem Tutorial lernen Sie, wie Sie Modelle, Datensätze und Spaces auf dem Hub mit `huggingface_hub` durchsuchen können. 8 | 9 | ## Wie listet man Repositories auf? 10 | 11 | Die `huggingface_hub`-Bibliothek enthält einen HTTP-Client [`HfApi`], um mit dem Hub zu interagieren. 12 | Unter anderem kann er Modelle, Datensätze und Spaces auflisten, die auf dem Hub gespeichert sind: 13 | 14 | ```py 15 | >>> from huggingface_hub import HfApi 16 | >>> api = HfApi() 17 | >>> models = api.list_models() 18 | ``` 19 | 20 | Die Ausgabe von [`list_models`] ist ein Iterator über die auf dem Hub gespeicherten Modelle. 21 | 22 | Ähnlich können Sie [`list_datasets`] verwenden, um Datensätze aufzulisten und [`list_spaces`], um Spaces aufzulisten. 23 | 24 | ## Wie filtert man Repositories? 25 | 26 | Das Auflisten von Repositories ist großartig, aber jetzt möchten Sie vielleicht Ihre Suche filtern. 27 | Die List-Helfer haben mehrere Attribute wie: 28 | - `filter` 29 | - `author` 30 | - `search` 31 | - ... 32 | 33 | Zwei dieser Parameter sind intuitiv (`author` und `search`), aber was ist mit diesem `filter`? 34 | `filter` nimmt als Eingabe ein [`ModelFilter`]-Objekt (oder [`DatasetFilter`]) entgegen. 35 | Sie können es instanziieren, indem Sie angeben, welche Modelle Sie filtern möchten. 36 | 37 | Hier ist ein Beispiel, um alle Modelle auf dem Hub zu erhalten, die Bildklassifizierung durchführen, 38 | auf dem Imagenet-Datensatz trainiert wurden und mit PyTorch laufen. 39 | Das kann mit einem einzigen [`ModelFilter`] erreicht werden. Attribute werden als "logisches UND" kombiniert. 40 | 41 | ```py 42 | models = hf_api.list_models( 43 | filter=ModelFilter( 44 | task="image-classification", 45 | library="pytorch", 46 | trained_dataset="imagenet" 47 | ) 48 | ) 49 | ``` 50 | 51 | Während des Filterns können Sie auch die Modelle sortieren und nur die Top-Ergebnisse abrufen. 52 | Zum Beispiel holt das folgende Beispiel die 5 am häufigsten heruntergeladenen Datensätze auf dem Hub: 53 | 54 | ```py 55 | >>> list(list_datasets(sort="downloads", limit=5)) 56 | [DatasetInfo( 57 | id='argilla/databricks-dolly-15k-curated-en', 58 | author='argilla', 59 | sha='4dcd1dedbe148307a833c931b21ca456a1fc4281', 60 | last_modified=datetime.datetime(2023, 10, 2, 12, 32, 53, tzinfo=datetime.timezone.utc), 61 | private=False, 62 | downloads=8889377, 63 | (...) 64 | ``` 65 | 66 | 67 | 68 | Eine andere Möglichkeit, dies zu tun, 69 | besteht darin, die [Modelle](https://huggingface.co/models) und [Datensätze](https://huggingface.co/datasets) Seiten 70 | in Ihrem Browser zu besuchen, nach einigen Parametern zu suchen und die Werte in der URL anzusehen. 71 | -------------------------------------------------------------------------------- /docs/source/ko/package_reference/webhooks_server.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 웹훅 서버[[webhooks-server]] 6 | 7 | 웹훅은 MLOps 관련 기능의 기반이 됩니다. 이를 통해 특정 저장소의 새로운 변경 사항을 수신하거나, 관심 있는 특정 사용자/조직에 속한 모든 저장소의 변경 사항을 받아볼 수 있습니다. 8 | Huggingface Hub의 웹훅에 대해 더 자세히 알아보려면 이 [가이드](https://huggingface.co/docs/hub/webhooks)를 읽어보세요. 9 | 10 | > [!TIP] 11 | > 웹훅 서버를 설정하고 Space로 배포하는 방법은 이 단계별 [가이드](../guides/webhooks_server)를 확인하세요. 12 | 13 | > [!WARNING] 14 | > 이 기능은 실험적인 기능입니다. 본 API는 현재 개선 작업 중이며, 향후 사전 통지 없이 주요 변경 사항이 도입될 수 있음을 의미합니다. `requirements`에서 `huggingface_hub`의 버전을 고정하는 것을 권장합니다. 참고로 실험적 기능을 사용하면 경고가 트리거 됩니다. 이 경고 트리거를 비활성화 시키길 원한다면 환경변수 `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1`를 설정하세요. 15 | 16 | ## 서버[[server]] 17 | 여기서 서버는 하나의 [Gradio](https://gradio.app/) 앱을 의미합니다. Gradio에는 사용자 또는 사용자에게 지침을 표시하는 UI와 웹훅을 수신하기 위한 API가 있습니다. 웹훅 엔드포인트를 구현하는 것은 함수에 데코레이터를 추가하는 것만큼 간단합니다. 서버를 Space에 배포하기 전에 Gradio 터널을 사용하여 웹훅을 머신으로 리디렉션하여 디버깅할 수 있습니다. 18 | 19 | ### WebhooksServer[[huggingface_hub.WebhooksServer]] 20 | 21 | [[autodoc]] huggingface_hub.WebhooksServer 22 | 23 | ### @webhook_endpoint[[huggingface_hub.webhook_endpoint]] 24 | 25 | [[autodoc]] huggingface_hub.webhook_endpoint 26 | 27 | ## 페이로드[[huggingface_hub.WebhookPayload]] 28 | 29 | [`WebhookPayload`]는 웹훅의 페이로드를 포함하는 기본 데이터 구조입니다. 이것은 `pydantic` 클래스로서 FastAPI에서 매우 쉽게 사용할 수 있습니다. 즉 WebhookPayload를 웹후크 엔드포인트에 매개변수로 전달하면 자동으로 유효성이 검사되고 파이썬 객체로 파싱됩니다. 30 | 31 | 웹훅 페이로드에 대한 자세한 사항은 이 [가이드](https://huggingface.co/docs/hub/webhooks#webhook-payloads)를 참고하세요. 32 | 33 | [[autodoc]] huggingface_hub.WebhookPayload 34 | 35 | ### WebhookPayload[[huggingface_hub.WebhookPayload]] 36 | 37 | [[autodoc]] huggingface_hub.WebhookPayload 38 | 39 | ### WebhookPayloadComment[[huggingface_hub.WebhookPayloadComment]] 40 | 41 | [[autodoc]] huggingface_hub.WebhookPayloadComment 42 | 43 | ### WebhookPayloadDiscussion[[huggingface_hub.WebhookPayloadDiscussion]] 44 | 45 | [[autodoc]] huggingface_hub.WebhookPayloadDiscussion 46 | 47 | ### WebhookPayloadDiscussionChanges[[huggingface_hub.WebhookPayloadDiscussionChanges]] 48 | 49 | [[autodoc]] huggingface_hub.WebhookPayloadDiscussionChanges 50 | 51 | ### WebhookPayloadEvent[[huggingface_hub.WebhookPayloadEvent]] 52 | 53 | [[autodoc]] huggingface_hub.WebhookPayloadEvent 54 | 55 | ### WebhookPayloadMovedTo[[huggingface_hub.WebhookPayloadMovedTo]] 56 | 57 | [[autodoc]] huggingface_hub.WebhookPayloadMovedTo 58 | 59 | ### WebhookPayloadRepo[[huggingface_hub.WebhookPayloadRepo]] 60 | 61 | [[autodoc]] huggingface_hub.WebhookPayloadRepo 62 | 63 | ### WebhookPayloadUrl[[huggingface_hub.WebhookPayloadUrl]] 64 | 65 | [[autodoc]] huggingface_hub.WebhookPayloadUrl 66 | 67 | ### WebhookPayloadWebhook[[huggingface_hub.WebhookPayloadWebhook]] 68 | 69 | [[autodoc]] huggingface_hub.WebhookPayloadWebhook 70 | -------------------------------------------------------------------------------- /docs/source/cn/index.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # 🤗 Hub 客户端库 6 | 7 | 通过`huggingface_hub` 库,您可以与面向机器学习开发者和协作者的平台 [Hugging Face Hub](https://huggingface.co/)进行交互,找到适用于您所在项目的预训练模型和数据集,体验在平台托管的数百个机器学习应用,还可以创建或分享自己的模型和数据集并于社区共享。以上所有都可以用Python在`huggingface_hub` 库中轻松实现。 8 | 9 | 阅读[快速入门指南](快速入门指南)以开始使用huggingface_hub库。您将学习如何从Hub下载文件,创建存储库以及将文件上传到Hub。继续阅读以了解更多关于如何在🤗Hub上管理您的存储库,如何参与讨论或者甚至如何访问推理API的信息。 10 | 11 | 31 | 32 | 通过 `huggingface_hub`库,您可以与面向机器学习开发者和协作者的平台 [Hugging Face Hub](https://huggingface.co/)进行交互,找到适用于您所在项目的预训练模型和数据集,体验在平台托管的数百个机器学习应用,还可以创建或分享自己的模型和数据集并于社区共享。以上所有都可以用Python在 `huggingface_hub`库中轻松实现。 33 | 34 | 39 | 40 | ## 贡献 41 | 42 | 所有对 huggingface_hub 的贡献都受到欢迎和同等重视!🤗 除了在代码中添加或修复现有问题外,您还可以通过确保其准确且最新来帮助改进文档,在问题上帮助回答问题,并请求您认为可以改进库的新功能。请查看[贡献指南](https://github.com/huggingface/huggingface_hub/blob/main/CONTRIBUTING.md) 了解有关如何提交新问题或功能请求、如何提交拉取请求以及如何测试您的贡献以确保一切正常运行的更多信息。 43 | 44 | 当然,贡献者也应该尊重我们的[行为准则](https://github.com/huggingface/huggingface_hub/blob/main/CODE_OF_CONDUCT.md),以便为每个人创建一个包容和欢迎的协作空间。 45 | -------------------------------------------------------------------------------- /docs/source/en/package_reference/oauth.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | 6 | # OAuth and FastAPI 7 | 8 | OAuth is an open standard for access delegation, commonly used to grant applications limited access to a user's information without exposing their credentials. When combined with FastAPI it allows you to build secure APIs that allow users to log in using external identity providers like Google or GitHub. 9 | In a usual scenario: 10 | - FastAPI will define the API endpoints and handles the HTTP requests. 11 | - OAuth is integrated using libraries like fastapi.security or external tools like Authlib. 12 | - When a user wants to log in, FastAPI redirects them to the OAuth provider’s login page. 13 | - After successful login, the provider redirects back with a token. 14 | - FastAPI verifies this token and uses it to authorize the user or fetch user profile data. 15 | 16 | This approach helps avoid handling passwords directly and offloads identity management to trusted providers. 17 | 18 | # Hugging Face OAuth Integration in FastAPI 19 | 20 | This module provides tools to integrate Hugging Face OAuth into a FastAPI application. It enables user authentication using the Hugging Face platform including mocked behavior for local development and real OAuth flow for Spaces. 21 | 22 | 23 | 24 | ## OAuth Overview 25 | 26 | The `attach_huggingface_oauth` function adds login, logout, and callback endpoints to your FastAPI app. When used in a Space, it connects to the Hugging Face OAuth system. When used locally it will inject a mocked user. Click here to learn more about [adding a Sign-In with HF option to your Space](https://huggingface.co/docs/hub/en/spaces-oauth) 27 | 28 | 29 | ### How to use it? 30 | 31 | ```python 32 | from huggingface_hub import attach_huggingface_oauth, parse_huggingface_oauth 33 | from fastapi import FastAPI, Request 34 | 35 | app = FastAPI() 36 | attach_huggingface_oauth(app) 37 | 38 | @app.get("/") 39 | def greet_json(request: Request): 40 | oauth_info = parse_huggingface_oauth(request) 41 | if oauth_info is None: 42 | return {"msg": "Not logged in!"} 43 | return {"msg": f"Hello, {oauth_info.user_info.preferred_username}!"} 44 | ``` 45 | 46 | > [!TIP] 47 | > You might also be interested in [a practical example that demonstrates OAuth in action](https://huggingface.co/spaces/Wauplin/fastapi-oauth/blob/main/app.py). 48 | > For a more comprehensive implementation, check out [medoidai/GiveBackGPT](https://huggingface.co/spaces/medoidai/GiveBackGPT) Space which implements HF OAuth in a full-scale application. 49 | 50 | 51 | ### attach_huggingface_oauth 52 | 53 | [[autodoc]] attach_huggingface_oauth 54 | 55 | ### parse_huggingface_oauth 56 | 57 | [[autodoc]] parse_huggingface_oauth 58 | 59 | ### OAuthOrgInfo 60 | 61 | [[autodoc]] OAuthOrgInfo 62 | 63 | ### OAuthUserInfo 64 | 65 | [[autodoc]] OAuthUserInfo 66 | 67 | ### OAuthInfo 68 | 69 | [[autodoc]] OAuthInfo 70 | --------------------------------------------------------------------------------