├── tests └── __init__.py ├── local_llm_function_calling ├── py.typed ├── prompters │ ├── __init__.py │ └── llama_function_calling.py ├── model │ ├── __init__.py │ ├── common.py │ ├── huggingface.py │ └── llama.py ├── exceptions.py ├── __init__.py ├── constrainer.py ├── prompter.py └── generator.py ├── docs ├── requirements.txt ├── api.rst ├── Makefile ├── make.bat ├── conf.py ├── index.rst ├── constraining.md ├── about.md ├── quickstart.md └── generation.md ├── .readthedocs.yaml ├── pyproject.toml ├── LICENSE ├── README.md ├── .gitignore └── poetry.lock /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /local_llm_function_calling/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx==6.2.1 2 | sphinx-rtd-theme==1.2.2 3 | myst-parser==2.0.0 4 | -------------------------------------------------------------------------------- /local_llm_function_calling/prompters/__init__.py: -------------------------------------------------------------------------------- 1 | """This collects all of the prompters into a single module""" 2 | from .llama_function_calling import CodeLlamaFunctionCallingPrompter 3 | 4 | __all__ = ["CodeLlamaFunctionCallingPrompter"] 5 | -------------------------------------------------------------------------------- /local_llm_function_calling/model/__init__.py: -------------------------------------------------------------------------------- 1 | """A container for the actual generation models""" 2 | 3 | from .common import Generation, Model, ModelWithNaturalLanguageResponses 4 | 5 | 6 | __all__ = ["Generation", "Model", "ModelWithNaturalLanguageResponses"] 7 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.11" 7 | 8 | sphinx: 9 | configuration: docs/conf.py 10 | 11 | python: 12 | install: 13 | - requirements: docs/requirements.txt 14 | - method: pip 15 | path: . 16 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============= 3 | 4 | .. automodule:: local_llm_function_calling 5 | :members: 6 | 7 | .. automodule:: local_llm_function_calling.model.huggingface 8 | :members: 9 | 10 | .. automodule:: local_llm_function_calling.model.llama 11 | :members: 12 | 13 | .. automodule:: local_llm_function_calling.prompters 14 | :members: 15 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /local_llm_function_calling/exceptions.py: -------------------------------------------------------------------------------- 1 | """Exceptions for the function calling library""" 2 | 3 | 4 | class FunctionCallingError(Exception): 5 | """An error in the function calling library""" 6 | 7 | 8 | class ConstrainerError(FunctionCallingError): 9 | """An error in the constrainer""" 10 | 11 | 12 | class NoValidTokensError(ConstrainerError): 13 | """There are no valid tokens to generate""" 14 | 15 | 16 | class InvalidSchemaError(ConstrainerError): 17 | """The schema is invalid""" 18 | 19 | 20 | class GenerationError(FunctionCallingError): 21 | """An error in the generation""" 22 | 23 | 24 | class SequenceTooLongError(GenerationError): 25 | """The sequence is too long to generate""" 26 | -------------------------------------------------------------------------------- /local_llm_function_calling/__init__.py: -------------------------------------------------------------------------------- 1 | """Call functions with arguments generated by a local large language model""" 2 | from .constrainer import Constrainer, JsonSchemaConstraint, NoValidTokensError 3 | from .exceptions import ConstrainerError, FunctionCallingError, SequenceTooLongError 4 | from .generator import Generator 5 | from .prompter import ( 6 | CompletionModelPrompter, 7 | FunctionType, 8 | InstructModelPrompter, 9 | TextPrompter, 10 | ) 11 | 12 | __all__ = [ 13 | "Constrainer", 14 | "JsonSchemaConstraint", 15 | "NoValidTokensError", 16 | "Generator", 17 | "FunctionType", 18 | "CompletionModelPrompter", 19 | "InstructModelPrompter", 20 | "TextPrompter", 21 | "FunctionCallingError", 22 | "ConstrainerError", 23 | "SequenceTooLongError", 24 | ] 25 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "local-llm-function-calling" 3 | version = "0.1.23" 4 | description = "A tool for generating function arguments and choosing what function to call with local LLMs" 5 | authors = ["rizerphe <44440399+rizerphe@users.noreply.github.com>"] 6 | readme = "README.md" 7 | homepage = "https://github.com/rizerphe/local-llm-function-calling" 8 | documentation = "https://local-llm-function-calling.readthedocs.io/" 9 | keywords = ["llm", "jsonschema", "huggingface", "transformers", "local", "llama.cpp"] 10 | license = "MIT" 11 | packages = [ 12 | {include = "local_llm_function_calling"}, 13 | {include = "local_llm_function_calling/py.typed"}, 14 | ] 15 | 16 | [tool.poetry.dependencies] 17 | python = "^3.11" 18 | transformers = "^4.30.2" 19 | json-schema-enforcer = "^0.1.3" 20 | torch = "^2.0.1" 21 | llama-cpp-python = {version = "^0.1.83", optional = true} 22 | 23 | [tool.poetry.extras] 24 | llama-cpp = ["llama-cpp-python"] 25 | 26 | 27 | [build-system] 28 | requires = ["poetry-core"] 29 | build-backend = "poetry.core.masonry.api" 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 rizerphe 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | import os 7 | import sys 8 | sys.path.insert(0, os.path.abspath('..')) 9 | 10 | # -- Project information ----------------------------------------------------- 11 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 12 | 13 | project = 'local-llm-function-calling' 14 | copyright = '2024, rizerphe' 15 | author = 'rizerphe' 16 | 17 | # -- General configuration --------------------------------------------------- 18 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 19 | 20 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', "myst_parser"] 21 | 22 | templates_path = ['_templates'] 23 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 24 | 25 | 26 | 27 | # -- Options for HTML output ------------------------------------------------- 28 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 29 | 30 | html_theme = 'sphinx_rtd_theme' 31 | html_theme_path = ["_themes", ] 32 | html_static_path = ['_static'] 33 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to local-llm-function-calling's documentation! 2 | ====================================================== 3 | 4 | The ``local-llm-function-calling`` project is designed to constrain the generation of Hugging Face text generation models by enforcing a JSON schema and facilitating the formulation of prompts for function calls, similar to OpenAI's `function calling `_ feature, but actually enforcing the schema unlike OpenAI. 5 | 6 | The project provides a ``Generator`` class that allows users to easily generate text while ensuring compliance with the provided prompt and JSON schema. By utilizing the ``local-llm-function-calling`` library, users can conveniently control the output of text generation models. It uses my own quickly sketched ``json-schema-enforcer`` project as the enforcer. 7 | 8 | Features 9 | -------- 10 | 11 | * Constrains the generation of Hugging Face text generation models to follow a JSON schema. 12 | * Provides a mechanism for formulating prompts for function calls, enabling precise data extraction and formatting. 13 | * Simplifies the text generation process through a user-friendly ``Generator`` class. 14 | 15 | .. toctree:: 16 | :maxdepth: 2 17 | :caption: Table of Contents: 18 | 19 | quickstart 20 | about 21 | generation 22 | constraining 23 | api 24 | 25 | * :ref:`genindex` 26 | * :ref:`search` 27 | 28 | -------------------------------------------------------------------------------- /docs/constraining.md: -------------------------------------------------------------------------------- 1 | # Constrained generation 2 | 3 | You can also use the [Constrainer](local_llm_function_calling.Constrainer) class to just generate text based on constraints. You then have two options: either using a [builtin JSON schema constraint](local_llm_function_calling.JsonSchemaConstraint) or a custom one. 4 | 5 | ## JSON schema 6 | 7 | You can generate based on a simple JSON schema. Note that this does not support the full jsonschema specification, but instead uses a simplified format similar to that of OpenAI. Here's a simple usage example: 8 | 9 | ```python 10 | from local_llm_function_calling import Constrainer, JsonSchemaConstraint 11 | from local_llm_function_calling.model.huggingface import HuggingfaceModel 12 | 13 | 14 | schema = { 15 | "type": "object", 16 | "properties": { 17 | "name": {"type": "string", "maxLength": 10}, 18 | "age": {"type": "integer"} 19 | }, 20 | "enforceOrder": ["name", "age"] 21 | } 22 | 23 | constraint = JsonSchemaConstraint(schema) 24 | constrainer = Constrainer(HuggingfaceModel("gpt2")) 25 | raw_json = constrainer.generate("Prefix.\n", constraint, max_len=100) 26 | truncated_json = raw_json[:constraint.validate(raw_json).end_index] 27 | ``` 28 | 29 |
This is the generated JSON: 30 | 31 | ```json 32 | { 33 | "name": "TheTheThe.", 34 | "age": -1 35 | } 36 | ``` 37 | 38 | Note that gpt2 was used, so it's irrational to expect high quality output. 39 | 40 |
41 | 42 | `raw_json` can containe extra characters at the end, that's why we then create `truncated_json`. The prefix will be prepended to the generated data and then used as the prompt for the model. 43 | 44 | ## Custom constraints 45 | 46 | If you don't want the output to just adhere to a JSON schema, you can also define your own constraints. A constraint is just a callable that takes in the generated text and checks whether what's been generated is valid and whether it's the complete output. Here's a simple example that forces the output to be all-lowercase. 47 | 48 | ```python 49 | def lowercase_sentence_constraint(text: str): 50 | # Has to return (is_valid, is_complete) 51 | return [text.islower(), text.endswith(".")] 52 | 53 | constrainer = Constrainer(HuggingfaceModel("gpt2")) 54 | 55 | generated = constrainer.generate("Prefix.\n", lowercase_sentence_constraint, max_len=10) 56 | ``` 57 | -------------------------------------------------------------------------------- /local_llm_function_calling/model/common.py: -------------------------------------------------------------------------------- 1 | """An abstract definition of a model.""" 2 | from __future__ import annotations 3 | from typing import Any, Iterator, Protocol, TYPE_CHECKING, TypeVar, runtime_checkable 4 | 5 | if TYPE_CHECKING: 6 | from ..prompter import TextPrompter 7 | 8 | 9 | TokenType = TypeVar("TokenType") 10 | 11 | 12 | class Generation(Protocol[TokenType]): 13 | """One single generation""" 14 | 15 | def get_sorted_tokens(self) -> Iterator[TokenType]: 16 | """Get the tokens sorted by probability""" 17 | ... 18 | 19 | def register_token(self, token: TokenType) -> None: 20 | """Select the token for this generation step 21 | 22 | Args: 23 | token (TokenType): The token to select 24 | """ 25 | 26 | def get_generated(self, candidate: TokenType | None = None) -> str: 27 | """Get the generated sequence so far 28 | 29 | Args: 30 | candidate (int | None): The token to add to the sequence 31 | (should be one of the tokens returned by 32 | get_sorted_tokens in this generation step) 33 | """ 34 | ... 35 | 36 | 37 | PrefixType = TypeVar("PrefixType") 38 | 39 | 40 | class Model(Protocol[PrefixType]): 41 | """A container for a generic language model""" 42 | 43 | def start_generation(self, prefix: PrefixType) -> Generation: 44 | """Start a new generation sequence 45 | 46 | Args: 47 | prefix (PrefixType): The generation prefix 48 | """ 49 | ... 50 | 51 | def default_prompter(self) -> TextPrompter[PrefixType, Any]: 52 | """Get the default prompter for this model""" 53 | ... 54 | 55 | 56 | @runtime_checkable 57 | class ModelWithNaturalLanguageResponses(Protocol[PrefixType]): 58 | """A container for a generic language model 59 | that can return natural language responses""" 60 | 61 | def start_generation(self, prefix: PrefixType) -> Generation: 62 | """Start a new generation sequence 63 | 64 | Args: 65 | prefix (PrefixType): The generation prefix 66 | """ 67 | ... 68 | 69 | def default_prompter(self) -> TextPrompter[PrefixType, Any]: 70 | """Get the default prompter for this model""" 71 | ... 72 | 73 | def generate_from_prompt( 74 | self, prefix: PrefixType, max_new_tokens: int | None = None 75 | ) -> str: 76 | """Generate a response to a prompt 77 | 78 | Args: 79 | prefix (PrefixType): The prompt to generate a response to 80 | max_new_tokens (int | None): The maximum number of tokens to generate 81 | """ 82 | ... 83 | -------------------------------------------------------------------------------- /docs/about.md: -------------------------------------------------------------------------------- 1 | # How It Works 2 | 3 | The tool leverages the power of large language models to generate text while enforcing a JSON schema. To understand how it works, let's first explore the basics of large language models and tokens. 4 | 5 | Large language models, are trained on vast amounts of text data to learn the statistical patterns and structures of language. These models are capable of generating coherent and contextually relevant text given a prompt or a partial sentence. In this project, the focus is on using such models to generate text that adheres to a specified JSON schema, or to a different constraint provided by the developer. 6 | 7 | In the context of language models, a token is the fundamental unit of text. It can represent a single character, a word, or even a subword, depending on the tokenization approach used. For example, in English, a token can correspond to a word like "cat" or a subword like "un" and "happy". Tokens are the building blocks that language models operate on, and they carry semantic and syntactic information. 8 | 9 | When generating text, language models typically predict the next token based on the context provided by the preceding tokens. The probability distribution over the vocabulary of tokens is used to determine the likelihood of different tokens occurring next. Higher probabilities indicate more probable tokens based on the training data. When generating text with a prompt like "What is the weather like", the language model examines the preceding tokens (e.g., "What is the weather like") to predict the next token. In this case, the model might assign higher probabilities to tokens like "in" or "today" based on the patterns it has learned during training. It considers the likelihood of different tokens given the context to generate a coherent and contextually appropriate continuation. 10 | 11 | In the "local-llm-function-calling" project, generating text goes beyond selecting tokens solely based on their likelihood. It incorporates the additional constraint of adhering to a given JSON schema, or another user-provided constraint. The schema defines the structure, properties, and constraints of the data that should be generated. This means that even a model that just generates completely random text normally will still generate valid JSON as the output. 12 | 13 | During text generation, the `Constrainer` class constructs the text by iteratively adding tokens to a prefix. It generates tokens according to their likelihood, as suggested by the language model, but also checks whether each token, when appended to the generated text, adheres to the JSON schema. This is achieved by passing the generated text plus each candidate token to the constraint function. 14 | 15 | For example, given the schema and output: 16 | 17 | Schema: 18 | 19 | ```json 20 | { 21 | "type": "object", 22 | "properties": { 23 | "location": { 24 | "type": "string", 25 | "maxLength": 20 26 | }, 27 | "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] } 28 | }, 29 | "required": ["location"] 30 | } 31 | ``` 32 | 33 | Output: 34 | 35 | ```json 36 | { 37 | "location": "San Francisco, CA", 38 | " 39 | ``` 40 | 41 | If the model suggests the next token being `unit`, it will be accepted, and if it suggests, for example, `date`, it won't be accepted as `date` is not a property defined in the schema. The tool selects a token with the highest likelihood that still adheres to the schema. 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Local LLM function calling 2 | 3 | [![Documentation Status](https://readthedocs.org/projects/local-llm-function-calling/badge/?version=latest)](https://local-llm-function-calling.readthedocs.io/en/latest/?badge=latest) [![PyPI version](https://badge.fury.io/py/local-llm-function-calling.svg)](https://badge.fury.io/py/local-llm-function-calling) 4 | 5 | ## Overview 6 | 7 | The `local-llm-function-calling` project is designed to constrain the generation of Hugging Face text generation models by enforcing a JSON schema and facilitating the formulation of prompts for function calls, similar to OpenAI's [function calling](https://openai.com/blog/function-calling-and-other-api-updates) feature, but actually enforcing the schema unlike OpenAI. 8 | 9 | The project provides a `Generator` class that allows users to easily generate text while ensuring compliance with the provided prompt and JSON schema. By utilizing the `local-llm-function-calling` library, users can conveniently control the output of text generation models. It uses my own quickly sketched `json-schema-enforcer` project as the enforcer. 10 | 11 | ## Features 12 | 13 | - Constrains the generation of Hugging Face text generation models to follow a JSON schema. 14 | - Provides a mechanism for formulating prompts for function calls, enabling precise data extraction and formatting. 15 | - Simplifies the text generation process through a user-friendly `Generator` class. 16 | 17 | ## Installation 18 | 19 | To install the `local-llm-function-calling` library, use the following command: 20 | 21 | ```shell 22 | pip install local-llm-function-calling 23 | ``` 24 | 25 | ## Usage 26 | 27 | Here's a simple example demonstrating how to use `local-llm-function-calling`: 28 | 29 | ```python 30 | from local_llm_function_calling import Generator 31 | 32 | # Define a function and models 33 | functions = [ 34 | { 35 | "name": "get_current_weather", 36 | "description": "Get the current weather in a given location", 37 | "parameters": { 38 | "type": "object", 39 | "properties": { 40 | "location": { 41 | "type": "string", 42 | "description": "The city and state, e.g. San Francisco, CA", 43 | "maxLength": 20, 44 | }, 45 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, 46 | }, 47 | "required": ["location"], 48 | }, 49 | } 50 | ] 51 | 52 | # Initialize the generator with the Hugging Face model and our functions 53 | generator = Generator.hf(functions, "gpt2") 54 | 55 | # Generate text using a prompt 56 | function_call = generator.generate("What is the weather like today in Brooklyn?") 57 | print(function_call) 58 | ``` 59 | 60 | ## Custom constraints 61 | 62 | You don't have to use my prompting methods; you can craft your own prompts and your own constraints, and still benefit from the constrained generation: 63 | 64 | ```python 65 | from local_llm_function_calling import Constrainer 66 | from local_llm_function_calling.model.huggingface import HuggingfaceModel 67 | 68 | # Define your own constraint 69 | # (you can also use local_llm_function_calling.JsonSchemaConstraint) 70 | def lowercase_sentence_constraint(text: str): 71 | # Has to return (is_valid, is_complete) 72 | return [text.islower(), text.endswith(".")] 73 | 74 | # Create the constrainer 75 | constrainer = Constrainer(HuggingfaceModel("gpt2")) 76 | 77 | # Generate your text 78 | generated = constrainer.generate("Prefix.\n", lowercase_sentence_constraint, max_len=10) 79 | ``` 80 | 81 | ## Extending and Customizing 82 | 83 | To extend or customize the prompt structure, you can subclass the `TextPrompter` class. This allows you to modify the prompt generation process according to your specific requirements. 84 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /docs/quickstart.md: -------------------------------------------------------------------------------- 1 | # Quickstart 2 | 3 | This is a tool that allows you to replicate OpenAI's [function calling](https://openai.com/blog/function-calling-and-other-api-updates) feature with local models while enforcing the output schema. 4 | 5 | ## Installation 6 | 7 | ```shell 8 | pip install local-llm-function-calling 9 | ``` 10 | 11 | ## Usage 12 | 13 | Import the generator: 14 | 15 | ```python 16 | from local_llm_function_calling import Generator 17 | ``` 18 | 19 | Define your functions ([another project of mine](https://github.com/rizerphe/openai-functions) can help): 20 | 21 | ```python 22 | functions = [ 23 | { 24 | "name": "get_current_weather", 25 | "description": "Get the current weather in a given location", 26 | "parameters": { 27 | "type": "object", 28 | "properties": { 29 | "location": { 30 | "type": "string", 31 | "description": "The city and state, e.g. San Francisco, CA", 32 | "maxLength": 20, 33 | }, 34 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, 35 | }, 36 | "required": ["location"], 37 | }, 38 | } 39 | ] 40 | ``` 41 | 42 | Initialize the generator with the Hugging Face model, tokenizer, and functions: 43 | 44 | ```python 45 | generator = Generator.hf(functions, "gpt2") 46 | ``` 47 | 48 | Generate text using a prompt: 49 | 50 | ```python 51 | function_call = generator.generate("What is the weather like today in Brooklyn?") 52 | print(function_call) 53 | ``` 54 | 55 |
The output: 56 | 57 | ```json 58 | { 59 | "name": "get_current_weather", 60 | "arguments": "{\n \"location\": \"{{{{{{{{{{{{{{{{{{{{\"\n}" 61 | } 62 | ``` 63 | 64 |
65 | 66 | ## Llama.cpp 67 | 68 | The [meta's llama2 family of models](https://ai.meta.com/llama/) (especially codellama) are so much more suited for this task than most other open source models. Far from everyone has the resources required to run the models as is though. One of the solutions is quantization. Quantized models are smaller and require way fewer resources, but produce lower quality results. This tool supports [llama.cpp](https://github.com/ggerganov/llama.cpp), which allows you to run these quantized models. 69 | 70 | Important note: this project uses a thin wrapper aroung the default llama.cpp python bindings that allow you to more easily extract the logits of each token. You can access it at [LogitLlama](local_llm_function_calling.model.llama.LogitLlama). 71 | 72 | To use llama.cpp, you have to install the project with: 73 | 74 | ```sh 75 | pip install local-llm-function-calling[llama-cpp] 76 | ``` 77 | 78 | Then download one of the quantized models (e.g. one of [these](https://huggingface.co/TheBloke/CodeLlama-13B-Instruct-GGUF#provided-files)) and use [LlamaModel](local_llm_function_calling.model.llama.LlamaModel) to load it: 79 | 80 | ```python 81 | from local_llm_function_calling.model.llama import LlamaModel 82 | 83 | generator = Generator( 84 | functions, 85 | LlamaModel( 86 | "codellama-13b-instruct.Q6_K.gguf" 87 | ), 88 | ) 89 | ``` 90 | 91 |
I've started working on integrating this with my own finetuned models, as well as hopefully more in the future - feel free to submit an issue to the github if you want to see your favorite function calling model integrated. 92 | 93 | The prompters are available at [local_llm_function_calling.prompters](local_llm_function_calling.prompters). You can also easily write your own - it just has to implement the same [local_llm_function_calling.TextPrompter](local_llm_function_calling.TextPrompter) protocol for your model type. Here's how to use one, with [my own finetuned model](https://huggingface.co/rizerphe/CodeLlama-function-calling-6320-7b-Instruct-GGUF): 94 | 95 | ```py 96 | from local_llm_function_calling import Generator 97 | from local_llm_function_calling.model.llama import LlamaModel 98 | from local_llm_function_calling.prompters import CodeLlamaFunctionCallingPrompter 99 | 100 | # Define a function and models 101 | functions = [ 102 | { 103 | "name": "get_current_weather", 104 | "description": "Get the current weather in a given location", 105 | "parameters": { 106 | "type": "object", 107 | "properties": { 108 | "location": { 109 | "type": "string", 110 | "description": "The city and state, e.g. San Francisco, CA", 111 | "maxLength": 20, 112 | }, 113 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, 114 | }, 115 | "required": ["location"], 116 | }, 117 | } 118 | ] 119 | 120 | # Initialize the generator with the Hugging Face model and our functions 121 | generator = Generator( 122 | functions, 123 | LlamaModel("codellama-function-calling-6320-7b-instruct.gguf.q2_k.bin"), 124 | CodeLlamaFunctionCallingPrompter(), 125 | ) 126 | 127 | # Generate text using a prompt 128 | function_call = generator.generate( 129 | "What is the weather like today in Brooklyn?", suffix="\n" 130 | ) 131 | print(function_call) 132 | ``` 133 | 134 |
135 | -------------------------------------------------------------------------------- /docs/generation.md: -------------------------------------------------------------------------------- 1 | # Generating a function call 2 | 3 | The library provides a [Generator](local_llm_function_calling.Generator) class that's supposed to fully replace OpenAI's function calling. It combines the functionality of the different [prompters](local_llm_function_calling.TextPrompter) and a constrainer to generate a full function call, similar to what OpenAI does. 4 | 5 | ## Completion models 6 | 7 | The most basic usage example is with a simple completion model. We'll use a huggingface model with a [CompletionModelPrompter](local_llm_function_calling.CompletionModelPrompter), which is its default, to construct the prompt, along with a `Generator` for it: 8 | 9 | ```python 10 | from local_llm_function_calling import Generator 11 | ``` 12 | 13 | We need to specify the functions and the completion model to use - we'll use a simple get weather function and gpt2; if you need help generating schemas [another project of mine](https://github.com/rizerphe/openai-functions) can help. GPT2 is not a model I'd generally recommend because of it's tiny size - switch it out for a larger one if you want to do anything useful (codellama works really well). 14 | 15 | ```python 16 | functions = [ 17 | { 18 | "name": "get_current_weather", 19 | "description": "Get the current weather in a given location", 20 | "parameters": { 21 | "type": "object", 22 | "properties": { 23 | "location": { 24 | "type": "string", 25 | "description": "The city and state, e.g. San Francisco, CA", 26 | "maxLength": 20, 27 | }, 28 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, 29 | }, 30 | "required": ["location"], 31 | }, 32 | } 33 | ] 34 | 35 | generator = Generator.hf(functions, "gpt2") 36 | ``` 37 | 38 | You can pass in the huggingface model itself, or even both the model and the tokenizer, not just its name: 39 | 40 | ```python 41 | Generator.hf(functions, model) 42 | Generator.hf(functions, model, tokenizer) 43 | ``` 44 | 45 | These are shorthands for: 46 | 47 | ```python 48 | from local_llm_function_calling.model.huggingface import HuggingfaceModel 49 | 50 | Generator(functions, HuggingfaceModel(model)) 51 | Generator(functions, HuggingfaceModel(model, tokenizer)) 52 | ``` 53 | 54 | When we have the generator ready, we can then pass in a prompt and have it construct a function call for us: 55 | 56 | ```python 57 | function_call = generator.generate("What is the weather like today in Brooklyn?") 58 | ``` 59 | 60 |
For the example above, GPT2 generates something like this: 61 | 62 | ```json 63 | { 64 | "name": "get_current_weather", 65 | "arguments": "{\n \"location\": \"{{{{{{{{{{{{{{{{{{{{\"\n}" 66 | } 67 | ``` 68 | 69 |
70 | 71 | ## Instruct models 72 | 73 | You might want to use a different prompting scheme, however, for example when using an instruct model. For instruct models specifically, however, there's [InstructModelPrompter](local_llm_function_calling.InstructModelPrompter). Here's how to use it: 74 | 75 | ```python 76 | from local_llm_function_calling import Generator, InstructModelPrompter 77 | 78 | 79 | generator = Generator.hf(functions, "gpt2", prompter=InstructModelPrompter()) 80 | ``` 81 | 82 | Then you use it the same way as you'd use the generator normally: 83 | 84 | ```python 85 | function_call = generator.generate("What is the weather like today in Brooklyn?") 86 | ``` 87 | 88 | ## Llama.cpp 89 | 90 | The [meta's llama2 family of models](https://ai.meta.com/llama/) (especially codellama) are so much more suited for this task than most other open source models. Far from everyone has the resources required to run the models as is though. One of the solutions is quantization. Quantized models are smaller and require way fewer resources, but produce lower quality results. This tool supports [llama.cpp](https://github.com/ggerganov/llama.cpp), which allows you to run these quantized models. 91 | 92 | To use llama.cpp, you have to install the project with: 93 | 94 | ```sh 95 | pip install local-llm-function-calling[llama-cpp] 96 | ``` 97 | 98 | Then download one of the quantized models (e.g. one of [these](https://huggingface.co/TheBloke/CodeLlama-13B-Instruct-GGUF#provided-files)) and use [LlamaModel](local_llm_function_calling.model.llama.LlamaModel) to load it: 99 | 100 | ```python 101 | from local_llm_function_calling.model.llama import LlamaModel 102 | 103 | generator = Generator( 104 | functions, 105 | LlamaModel( 106 | "codellama-13b-instruct.Q6_K.gguf" 107 | ), 108 | ) 109 | ``` 110 | 111 | [LlamaModel](local_llm_function_calling.model.llama.LlamaModel) by default uses a prompt template compatible with the llama2 Instruct models. 112 | 113 | ## Your own prompters 114 | 115 | I would suggest trying out different prompts; mine had very little thought put into them. You can do so by creating a class that adheres to the [TextPrompter](local_llm_function_calling.TextPrompter) protocol and passing them in as the prompter. A prompter has to define a `prompt(prompt: str, functions: list[local_llm_function_calling.prompter.FunctionType], function_to_call: str | None = None) → PromptType` method. If it has a `function_to_call` provided, it should return the full prefix for the call schema generation; otherwise it should return the prefix for the function name generation. `PromptType` depends on the model - for huggingface models it's a string, for llama.cpp models it's an array of integers and bytes objects; integers to allow for special tokens to be passed (a `bos` token is expected to go first). 116 | -------------------------------------------------------------------------------- /local_llm_function_calling/model/huggingface.py: -------------------------------------------------------------------------------- 1 | """A container for huggingface models specifically""" 2 | from typing import Iterator, TypeVar 3 | 4 | import torch 5 | from transformers import AutoModelForCausalLM, AutoTokenizer 6 | 7 | from ..exceptions import SequenceTooLongError 8 | from ..prompter import CompletionModelPrompter, TextPrompter 9 | 10 | PrefixType_contra = TypeVar( 11 | "PrefixType_contra", str, list[str | int], contravariant=True 12 | ) 13 | 14 | 15 | class HuggingfaceGeneration: 16 | """A single generation sequence with a huggingface model""" 17 | 18 | def __init__( 19 | self, 20 | model: AutoModelForCausalLM, 21 | tokenizer: AutoTokenizer, 22 | prefix: str | list[str | int], 23 | ) -> None: 24 | """Create a generation sequence 25 | 26 | Args: 27 | model (AutoModelForCausalLM): The model to use for generation 28 | tokenizer (AutoTokenizer): The tokenizer to use 29 | prefix (str | list[str | int]): The generation prefix 30 | """ 31 | self.model = model 32 | self.tokenizer = tokenizer 33 | self.inputs = ( 34 | self.tokenizer(prefix, return_tensors="pt")["input_ids"] 35 | if isinstance(prefix, str) 36 | else torch.cat( 37 | [ 38 | self.tokenizer( 39 | item, 40 | return_tensors="pt", 41 | add_special_tokens=False, 42 | split_special_tokens=False, 43 | )["input_ids"] 44 | if isinstance(item, str) 45 | else torch.tensor([[item]]) 46 | for item in prefix 47 | ], 48 | dim=-1, 49 | ) 50 | ) 51 | self.generated: list[int] = [] 52 | self.candidates: torch.Tensor | None = None 53 | 54 | def get_sorted_tokens(self) -> Iterator[int]: 55 | """Get the tokens sorted by probability 56 | 57 | Raises: 58 | SequenceTooLongError: If the sequence is too long to generate 59 | 60 | Yields: 61 | The next of the most likely tokens 62 | """ 63 | if self.inputs.shape[1] >= self.model.config.n_positions: 64 | raise SequenceTooLongError() 65 | gen_tokens = self.model.generate( 66 | input_ids=self.inputs, 67 | output_scores=True, 68 | return_dict_in_generate=True, 69 | max_new_tokens=1, 70 | pad_token_id=self.tokenizer.eos_token_id, 71 | ) 72 | tokens_rated = gen_tokens.scores[0].argsort(descending=True)[0] 73 | for token in tokens_rated: 74 | if token == self.tokenizer.eos_token_id: 75 | # Don't yield the EOS token 76 | continue 77 | # This is a very hacky way to get rid of all weird tokens; 78 | # TODO: find a better way to do this 79 | if self.get_generated(token) == self.get_generated(): 80 | continue 81 | yield token 82 | 83 | def register_token(self, token: int) -> None: 84 | """Select the token for this generation step 85 | 86 | Args: 87 | token (int): The token to select 88 | """ 89 | self.generated.append(token) 90 | self.inputs = torch.cat([self.inputs, torch.tensor([[token]])], dim=1) 91 | 92 | def get_generated(self, candidate: int | None = None) -> str: 93 | """Get the generated sequence 94 | 95 | Args: 96 | candidate (int | None): The token to add to the sequence 97 | 98 | Returns: 99 | str: The generated sequence 100 | """ 101 | return self.tokenizer.decode( 102 | self.generated + ([candidate] if candidate else []), 103 | skip_special_tokens=True, 104 | ) 105 | 106 | 107 | class HuggingfaceModel: 108 | """A container for a huggingface model""" 109 | 110 | def __init__( 111 | self, 112 | model: AutoModelForCausalLM | str, 113 | tokenizer: AutoTokenizer | str | None = None, 114 | ) -> None: 115 | """Create a huggingface model 116 | 117 | Args: 118 | model (AutoModelForCausalLM | str): The model to use for generation 119 | tokenizer (AutoTokenizer | str | None): The tokenizer to use. 120 | Automatically loaded if not provided. 121 | """ 122 | if isinstance(model, str): 123 | self.model = AutoModelForCausalLM.from_pretrained(model) 124 | else: 125 | self.model = model 126 | if isinstance(tokenizer, str): 127 | self.tokenizer = AutoTokenizer.from_pretrained(tokenizer) 128 | elif tokenizer is None: 129 | self.tokenizer = AutoTokenizer.from_pretrained(self.model.name_or_path) 130 | else: 131 | self.tokenizer = tokenizer 132 | 133 | def start_generation(self, prefix: PrefixType_contra) -> HuggingfaceGeneration: 134 | """Start a new generation sequence 135 | 136 | Args: 137 | prefix: The generation prefix, either as a string or as a list of 138 | strings/integers; token IDs and string to be automatically tokenized 139 | 140 | Returns: 141 | HuggingfaceGeneration: The generation sequence initialized with the 142 | prefix 143 | """ 144 | return HuggingfaceGeneration(self.model, self.tokenizer, prefix) 145 | 146 | def default_prompter(self) -> TextPrompter[str, str]: 147 | """Get the default prompter for this model 148 | 149 | Returns: 150 | A generic CompletionModelPrompter 151 | """ 152 | 153 | return CompletionModelPrompter() 154 | -------------------------------------------------------------------------------- /local_llm_function_calling/prompters/llama_function_calling.py: -------------------------------------------------------------------------------- 1 | """This is a file containing the prompter for a llama model finetuned for 2 | function calling by me. You can find the model this was made for at 3 | https://huggingface.co/rizerphe/CodeLlama-function-calling-6320-7b-Instruct-GGUF 4 | """ 5 | from __future__ import annotations 6 | import json 7 | from typing import Literal, NotRequired, TYPE_CHECKING, TypedDict 8 | 9 | if TYPE_CHECKING: 10 | from ..prompter import FunctionType, ShouldCallResponse 11 | 12 | 13 | class FunctionCall(TypedDict): 14 | name: str 15 | arguments: str 16 | 17 | 18 | class ChatMessage(TypedDict): 19 | role: Literal["user", "assistant", "function"] 20 | name: NotRequired[str] 21 | content: NotRequired[str] 22 | function_call: NotRequired[FunctionCall] 23 | 24 | 25 | class CodeLlamaFunctionCallingPrompter: 26 | """A prompter for code llama function calling models""" 27 | 28 | def _user_message(self, message: ChatMessage) -> str: 29 | if message["role"] == "user": 30 | return f"[INST] {message.get('content', '')} [/INST]" 31 | return f"[INST] {message.get('content', '')} [/INST]" 32 | 33 | def _assistant_message(self, message: ChatMessage) -> str: 34 | if "content" in message: 35 | return f" {message['content']}" 36 | if "function_call" not in message: 37 | return "" 38 | return ( 39 | "" 40 | + message["function_call"]["name"] 41 | + "\n" 42 | + message["function_call"]["arguments"] 43 | ) 44 | 45 | def _chat_prompt( 46 | self, 47 | chat: list[ChatMessage], 48 | functions: list[FunctionType], 49 | function_to_call: str | None = None, 50 | use_function: bool = False, 51 | ) -> list[bytes | int]: 52 | functions_summary = "\n".join( 53 | f"{json.dumps(f, indent=4)}" for f in functions 54 | ) 55 | system_prompt = ( 56 | f"<>\nAvailable functions:\n{functions_summary} <>\n\n" 57 | ) 58 | user_message: ChatMessage | None = None 59 | result: list[int | bytes] = [1] 60 | for i, message in enumerate(chat): 61 | if i == 0: 62 | if message["role"] != "user": 63 | raise ValueError("First message must be from user") 64 | if "content" not in message: 65 | raise ValueError("First message must have content") 66 | message["content"] = system_prompt + message["content"] 67 | if message["role"] in ["user", "function"]: 68 | if user_message is not None: 69 | result.append(self._user_message(user_message).encode("utf-8")) 70 | result.append(2) 71 | result.append(1) 72 | user_message = message 73 | if message["role"] == "assistant": 74 | if user_message is None: 75 | ... 76 | else: 77 | result.append( 78 | ( 79 | self._user_message(user_message) 80 | + self._assistant_message(message) 81 | ).encode("utf-8") 82 | ) 83 | result.append(2) 84 | result.append(1) 85 | user_message = None 86 | if user_message is not None: 87 | result.append(self._user_message(user_message).encode("utf-8")) 88 | result.append(2) 89 | result.append(1) 90 | 91 | f_start = ( 92 | ("" + ((function_to_call + "\n") if function_to_call else "")) 93 | if use_function 94 | else "" 95 | ) 96 | if result and result[-1] == 1: 97 | result.pop() 98 | result.pop() 99 | if not result or isinstance(result[-1], int): 100 | result.append(f_start.encode("utf-8")) 101 | else: 102 | result[-1] += f_start.encode("utf-8") 103 | 104 | return result 105 | 106 | def prompt( 107 | self, 108 | prompt: str | list[ChatMessage], 109 | functions: list[FunctionType], 110 | function_to_call: str | None = None, 111 | ) -> list[bytes | int]: 112 | """Generate the llama prompt 113 | 114 | Args: 115 | prompt (str | list[ChatMessage]): The prompt to generate the response from 116 | functions (list[FunctionType]): The functions to generate the response from 117 | function_to_call (str | None): The function to call. Defaults to None. 118 | 119 | Returns: 120 | list[bytes | int]: The llama prompt, a function selection prompt if no 121 | function is specified, or a function argument prompt if a function is 122 | specified 123 | """ 124 | return ( 125 | self._chat_prompt(prompt, functions, function_to_call, True) 126 | if isinstance(prompt, list) 127 | else self._chat_prompt( 128 | [{"role": "user", "content": prompt}], functions, function_to_call, True 129 | ) 130 | ) 131 | 132 | def should_call_prompt( 133 | self, prompt: str | list[ChatMessage], functions: list[FunctionType] 134 | ) -> tuple[list[bytes | int], ShouldCallResponse]: 135 | """Check if a function should be called 136 | 137 | Args: 138 | prompt (str | list[ChatMessage]): The prompt to generate the response from 139 | functions (list[FunctionType]): The functions to choose from 140 | 141 | Returns: 142 | tuple[str, ShouldCallResponse]: The function to call and the response 143 | """ 144 | return ( 145 | ( 146 | self._chat_prompt(prompt, functions, None, False) 147 | if isinstance(prompt, list) 148 | else self._chat_prompt( 149 | [{"role": "user", "content": prompt}], functions, None, False 150 | ) 151 | ), 152 | {"if_should_call": [""], "if_not_should_call": [" "]}, 153 | ) 154 | 155 | def natural_language_prompt( 156 | self, prompt: str, functions: list[FunctionType] 157 | ) -> list[bytes | int]: 158 | """Prompt the model to generate a natural language response 159 | 160 | Args: 161 | prompt (str): The natural language part of the prompt 162 | functions (list[FunctionType]): The functions to choose from 163 | 164 | Returns: 165 | list[bytes | int]: The natural language prompt 166 | """ 167 | return self.should_call_prompt(prompt, functions)[0] 168 | -------------------------------------------------------------------------------- /local_llm_function_calling/constrainer.py: -------------------------------------------------------------------------------- 1 | """A generator for the responses to a function call""" 2 | from __future__ import annotations 3 | from itertools import count 4 | from typing import Callable, Generic, TYPE_CHECKING, TypeVar 5 | 6 | import json_schema_enforcer 7 | 8 | from .exceptions import InvalidSchemaError, NoValidTokensError 9 | from .exceptions import SequenceTooLongError 10 | 11 | if TYPE_CHECKING: 12 | from .model import Model 13 | from local_llm_function_calling.model.common import Generation 14 | 15 | 16 | class JsonSchemaConstraint: 17 | """A JSON schema constraint""" 18 | 19 | def __init__( 20 | self, schema: dict, style: json_schema_enforcer.StyleConfig | None = None 21 | ) -> None: 22 | """Create a JSON schema constraint 23 | 24 | Args: 25 | schema (dict): The schema to use 26 | style (json_schema_enforcer.StyleConfig | None): The style to use 27 | (specifies indentation, etc.) 28 | 29 | Raises: 30 | InvalidSchemaError: The schema is invalid 31 | """ 32 | if style is None: 33 | self.style = json_schema_enforcer.StyleConfig(True, 4, True, 0, 0) 34 | else: 35 | self.style = style 36 | parser = json_schema_enforcer.parser_for_schema(schema) 37 | if parser is None: 38 | raise InvalidSchemaError() 39 | self.parser = parser 40 | 41 | def validate(self, text: str) -> json_schema_enforcer.schema.ValidationResult: 42 | """Validate the text against the schema 43 | 44 | Args: 45 | text (str): The text to validate 46 | 47 | Returns: 48 | json_schema_enforcer.schema.ValidationResult: The validation result 49 | """ 50 | return self.parser.validate(text, style_config=self.style) 51 | 52 | def __call__(self, text: str) -> tuple[bool, bool]: 53 | """Validate the text against the schema 54 | 55 | Args: 56 | text (str): The text to validate 57 | 58 | Returns: 59 | tuple[bool, bool]: A tuple of (is_valid, is_complete) 60 | """ 61 | result = self.validate(text) 62 | return result.valid, result.end_index is not None 63 | 64 | 65 | class EnumConstraint: 66 | """An enum constraint, allowing only a set of values""" 67 | 68 | def __init__(self, values: list[str], full_generation: bool = True) -> None: 69 | """Create an enum constraint 70 | 71 | Args: 72 | values (list[str]): The values to allow 73 | full_generation (bool): Whether to require full generation, 74 | or just that the generated value is a prefix of one of the 75 | values 76 | """ 77 | self.values = values 78 | self.full_generation = full_generation 79 | 80 | if any( 81 | (value.startswith(prefix) and value != prefix) 82 | for value in values 83 | for prefix in values 84 | ): 85 | raise ValueError("Values must not be prefixes of each other") 86 | 87 | def __call__(self, text: str) -> tuple[bool, bool]: 88 | """Validate the text against the schema 89 | 90 | Args: 91 | text (str): The text to validate 92 | 93 | Returns: 94 | tuple[bool, bool]: A tuple of (is_valid, is_complete) 95 | """ 96 | fitting = self.fitting(text) 97 | is_valid = any(fitting) 98 | is_complete = ( 99 | any(text.startswith(value) for value in self.values) 100 | if self.full_generation 101 | else (len(fitting) == 1) 102 | ) 103 | return is_valid, is_complete 104 | 105 | def fitting(self, text: str) -> list[str]: 106 | """Get the fitting values for the text 107 | 108 | Args: 109 | text (str): The text to check 110 | 111 | Returns: 112 | list[str]: The fitting values 113 | """ 114 | return [ 115 | value 116 | for value in self.values 117 | if value.startswith(text) or text.startswith(value) 118 | ] 119 | 120 | 121 | PrefixType = TypeVar("PrefixType") 122 | 123 | 124 | class Constrainer(Generic[PrefixType]): 125 | """Generate text with an LLM in a constrained way""" 126 | 127 | def __init__( 128 | self, 129 | model: Model[PrefixType], 130 | ) -> None: 131 | """Create a constrainer for generating text with an LLM 132 | 133 | Args: 134 | model (Model): The model to use 135 | """ 136 | self.model = model 137 | 138 | def gen_next_token( 139 | self, 140 | generation: Generation, 141 | constraint: Callable[[str], tuple[bool, bool]], 142 | ) -> tuple[bool, int]: 143 | """Generate the next token and register it 144 | 145 | Args: 146 | generation (Generation): The generation to use 147 | constraint (Callable[[str], tuple[bool, bool]]): 148 | A function that takes a string and returns a tuple of 149 | (is_valid, is_complete) 150 | 151 | Raises: 152 | NoValidTokensError: There are no valid tokens to generate 153 | 154 | Returns: 155 | tuple[bool, int]: A tuple, the first element is whether the 156 | generation is complete, the second is the number of characters 157 | generated so far (or 0 if the generation is complete) 158 | """ 159 | try: 160 | sorted_tokens = generation.get_sorted_tokens() 161 | except SequenceTooLongError: 162 | return (True, 0) 163 | for token in sorted_tokens: 164 | generated = generation.get_generated(token) 165 | fit = constraint(generated) 166 | if fit[0]: 167 | generation.register_token(token) 168 | if fit[1]: 169 | return (True, 0) 170 | return (False, len(generated)) 171 | raise NoValidTokensError() 172 | 173 | def advance_generation( 174 | self, 175 | generation: Generation, 176 | constraint: Callable[[str], tuple[bool, bool]], 177 | max_len: int | None = None, 178 | ) -> bool: 179 | """Advance the generation by one token 180 | 181 | Args: 182 | generation (Generation): The generation to use 183 | constraint (Callable[[str], tuple[bool, bool]]): 184 | A function that takes a string and returns a tuple of 185 | (is_valid, is_complete) 186 | max_len (int | None): The maximum length of the generated string 187 | 188 | Returns: 189 | bool: Whether the generation is complete 190 | """ 191 | done, length = self.gen_next_token(generation, constraint) 192 | if done: 193 | return True 194 | return max_len is not None and length >= max_len 195 | 196 | def generate( 197 | self, 198 | prefix: PrefixType, 199 | constraint: Callable[[str], tuple[bool, bool]], 200 | max_len: int | None = None, 201 | max_new_tokens: int | None = None, 202 | ) -> str: 203 | """Generate a string with the LLM 204 | 205 | Args: 206 | prefix: The prefix to use; the type depends on the model 207 | constraint (Callable[[str], tuple[bool, bool]]): 208 | A function that takes a string and returns a tuple of 209 | (is_valid, is_complete) 210 | max_len (int | None): The maximum length of the generated string 211 | max_new_tokens (int | None): The maximum number of tokens to generate 212 | 213 | Raises: 214 | NoValidTokensError: There are no valid tokens to generate 215 | 216 | Returns: 217 | str: The generated value 218 | """ 219 | generation = self.model.start_generation(prefix) 220 | for _ in range(max_new_tokens) if max_new_tokens else count(): 221 | if self.advance_generation(generation, constraint, max_len): 222 | break 223 | return generation.get_generated() 224 | -------------------------------------------------------------------------------- /local_llm_function_calling/prompter.py: -------------------------------------------------------------------------------- 1 | """Prompter protocol for function calling with open source models""" 2 | 3 | import json 4 | from typing import Literal, NotRequired, Protocol, TypeVar, TypedDict, runtime_checkable 5 | 6 | JsonType = str | int | float | bool | None | list["JsonType"] | dict[str, "JsonType"] 7 | 8 | 9 | class FunctionParameters(TypedDict): 10 | """Function parameters""" 11 | 12 | type: Literal["object"] 13 | properties: dict[str, JsonType] 14 | required: NotRequired[list[str]] 15 | 16 | 17 | class FunctionType(TypedDict): 18 | """Function type""" 19 | 20 | name: str 21 | description: NotRequired[str] 22 | parameters: FunctionParameters 23 | 24 | 25 | class FunctionCall(TypedDict): 26 | """Function call""" 27 | 28 | name: str 29 | arguments: str 30 | 31 | 32 | PromptType_contra = TypeVar("PromptType_contra", contravariant=True) 33 | PrefixType_co = TypeVar("PrefixType_co", covariant=True) 34 | 35 | 36 | class TextPrompter(Protocol[PrefixType_co, PromptType_contra]): 37 | """Prompter protocol for function calling with open source models""" 38 | 39 | def prompt( 40 | self, 41 | prompt: PromptType_contra, 42 | functions: list[FunctionType], 43 | function_to_call: str | None = None, 44 | ) -> PrefixType_co: 45 | """Prompt the user for input 46 | 47 | If function_to_call is None, then the prompt's aim should be to select 48 | the correct function to call. If function_to_call is not None, then the 49 | prompt's aim should be to generate the correct arguments for the 50 | function. 51 | 52 | Args: 53 | prompt: The natural language part of the prompt 54 | functions (list[FunctionType]): The functions to choose from 55 | function_to_call (str | None): The function to call. 56 | When None, the prompt should be to select the function to call. 57 | """ 58 | ... # pylint: disable=unnecessary-ellipsis 59 | 60 | 61 | class ShouldCallResponse(TypedDict): 62 | """Response from should_call""" 63 | 64 | if_should_call: list[str] 65 | if_not_should_call: list[str] 66 | 67 | 68 | @runtime_checkable 69 | class TextPrompterWithNonFunctionResponse(Protocol[PrefixType_co, PromptType_contra]): 70 | """Prompter protocol for function calling with open source models 71 | that can return non-function responses""" 72 | 73 | def prompt( 74 | self, 75 | prompt: PromptType_contra, 76 | functions: list[FunctionType], 77 | function_to_call: str | None = None, 78 | ) -> PrefixType_co: 79 | """Prompt the model to generate a function call 80 | 81 | If function_to_call is None, then the prompt's aim should be to select 82 | the correct function to call. If function_to_call is not None, then the 83 | prompt's aim should be to generate the correct arguments for the 84 | function. 85 | 86 | Args: 87 | prompt: The natural language part of the prompt 88 | functions (list[FunctionType]): The functions to choose from 89 | function_to_call (str | None): The function to call. 90 | When None, the prompt should be to select the function to call. 91 | """ 92 | ... # pylint: disable=unnecessary-ellipsis 93 | 94 | def should_call_prompt( 95 | self, prompt: PromptType_contra, functions: list[FunctionType] 96 | ) -> tuple[PrefixType_co, ShouldCallResponse]: 97 | """Check if a function should be called 98 | 99 | Args: 100 | prompt: The prompt to check 101 | functions (list[FunctionType]): The functions to choose from 102 | """ 103 | ... # pylint: disable=unnecessary-ellipsis 104 | 105 | def natural_language_prompt( 106 | self, 107 | prompt: PromptType_contra, 108 | functions: list[FunctionType], 109 | ) -> PrefixType_co: 110 | """Prompt the model to generate a natural language response 111 | 112 | Args: 113 | prompt: The natural language part of the prompt 114 | functions (list[FunctionType]): The functions to choose from 115 | """ 116 | ... # pylint: disable=unnecessary-ellipsis 117 | 118 | 119 | class CompletionModelPrompter: 120 | """Basic text prompter""" 121 | 122 | def prompt_for_function(self, function: FunctionType) -> str: 123 | """Generate the prompt section for a function 124 | 125 | Args: 126 | function (FunctionType): The function to generate the prompt for 127 | 128 | Returns: 129 | str: The prompt section for the function 130 | """ 131 | header = ( 132 | f"{function['name']} - {function['description']}" 133 | if "description" in function 134 | else function["name"] 135 | ) 136 | schema = json.dumps(function["parameters"]["properties"], indent=4) 137 | packed_schema = f"```jsonschema\n{schema}\n```" 138 | return f"{header}\n{packed_schema}" 139 | 140 | def prompt_for_functions(self, functions: list[FunctionType]) -> str: 141 | """Generate the prompt section for a list of functions 142 | 143 | Args: 144 | functions (list[FunctionType]): The functions to generate the prompt for 145 | 146 | Returns: 147 | str: The prompt section for the functions 148 | """ 149 | return "\n\n".join( 150 | [self.prompt_for_function(function) for function in functions] 151 | ) 152 | 153 | @property 154 | def head(self) -> str: 155 | """The head of the prompt 156 | 157 | Returns: 158 | str: The head of the prompt 159 | """ 160 | return "\n\nAvailable functions:\n" 161 | 162 | @property 163 | def call_header(self) -> str: 164 | """The header for the function call 165 | 166 | Returns: 167 | str: The header for the function call 168 | """ 169 | return "\n\nFunction call: " 170 | 171 | def function_call(self, function_to_call: str | None = None) -> str: 172 | """Create a function call prompt 173 | 174 | Args: 175 | function_to_call (str | None): The function to call. 176 | 177 | Returns: 178 | str: The function call prompt 179 | """ 180 | return self.call_header + ( 181 | f"{function_to_call}\n```json\n" if function_to_call else "" 182 | ) 183 | 184 | def prompt( 185 | self, 186 | prompt: str, 187 | functions: list[FunctionType], 188 | function_to_call: str | None = None, 189 | ) -> str: 190 | """Create a function call prompt 191 | 192 | Args: 193 | prompt (str): The natural language part of the prompt 194 | functions (list[FunctionType]): The functions to choose from 195 | function_to_call (str | None): The function to call. 196 | 197 | Returns: 198 | str: The function call prompt 199 | """ 200 | available_functions = self.prompt_for_functions(functions) 201 | return ( 202 | prompt 203 | + self.head 204 | + available_functions 205 | + self.call_header 206 | + self.function_call(function_to_call) 207 | ) 208 | 209 | 210 | class InstructModelPrompter(CompletionModelPrompter): 211 | """Basic prompter for instruct models""" 212 | 213 | @property 214 | def head(self) -> str: 215 | """The head of the prompt 216 | 217 | Returns: 218 | str: The head of the prompt 219 | """ 220 | return ( 221 | "Your task is to call a function when needed. " 222 | "You will be provided with a list of functions. " 223 | "Available functions:\n" 224 | ) 225 | 226 | def prompt( 227 | self, 228 | prompt: str, 229 | functions: list[FunctionType], 230 | function_to_call: str | None = None, 231 | ) -> str: 232 | """Create a function call prompt 233 | 234 | Args: 235 | prompt (str): The natural language part of the prompt 236 | functions (list[FunctionType]): The functions to choose from 237 | function_to_call (str | None): The function to call. 238 | 239 | Returns: 240 | str: The function call prompt 241 | """ 242 | available_functions = self.prompt_for_functions(functions) 243 | return ( 244 | self.head 245 | + available_functions 246 | + "\n\n" 247 | + prompt 248 | + self.function_call(function_to_call) 249 | ) 250 | -------------------------------------------------------------------------------- /local_llm_function_calling/generator.py: -------------------------------------------------------------------------------- 1 | """A generator for the responses to a function call""" 2 | from __future__ import annotations 3 | from typing import Generic, Self, TYPE_CHECKING, TypeVar 4 | 5 | from .constrainer import Constrainer, EnumConstraint, JsonSchemaConstraint 6 | from .model import Model, ModelWithNaturalLanguageResponses 7 | from .model.huggingface import HuggingfaceModel 8 | from .prompter import ( 9 | FunctionCall, 10 | FunctionType, 11 | TextPrompter, 12 | TextPrompterWithNonFunctionResponse, 13 | ) 14 | 15 | if TYPE_CHECKING: 16 | from transformers import AutoModelForCausalLM, AutoTokenizer 17 | 18 | PromptType = TypeVar("PromptType") 19 | PrefixType = TypeVar("PrefixType") 20 | 21 | 22 | class Generator(Generic[PrefixType, PromptType]): 23 | """Generate the function call based on the schema""" 24 | 25 | def __init__( 26 | self, 27 | functions: list[FunctionType], 28 | model: Model[PrefixType], 29 | prompter: TextPrompter[PrefixType, PromptType] | None = None, 30 | ) -> None: 31 | """Create a generator for the responses to a function call 32 | 33 | Args: 34 | functions (list[FunctionType]): The functions to use. 35 | model (Model): The model to use. 36 | prompter (TextPrompter): The prompter to use. 37 | Will use the model's default prompter if not provided. 38 | """ 39 | self.model = model 40 | self.constrainer = Constrainer( 41 | self.model, 42 | ) 43 | self.prompter: TextPrompter[PrefixType, PromptType] = ( 44 | prompter or self.model.default_prompter() 45 | ) 46 | self.functions = functions or [] 47 | 48 | @classmethod 49 | def hf( 50 | cls: type[Self], 51 | functions: list[FunctionType], 52 | model: AutoModelForCausalLM | str, 53 | tokenizer: AutoTokenizer | str | None = None, 54 | prompter: TextPrompter[str, PromptType] | None = None, 55 | ) -> Generator[str, PromptType]: 56 | """Create a generator for the responses to a function call, 57 | using a Huggingface model 58 | 59 | Args: 60 | functions (list[FunctionType]): The functions to use. 61 | model (AutoTokenizer | str): The model to use. 62 | tokenizer (AutoTokenizer | str | None): The tokenizer to use. 63 | Defaults to the model's tokenizer if not provided. 64 | prompter (TextPrompter): The prompter to use. 65 | Will use the model's default prompter if not provided. 66 | 67 | Returns: 68 | The generator, using a Huggingface model 69 | """ 70 | hf_model: Model[str] = HuggingfaceModel(model, tokenizer) 71 | return cls(functions, hf_model, prompter) 72 | 73 | def _generate_allowed_in_enum(self, prefix: PrefixType, allowed: list[str]) -> str: 74 | """Generate one of the values in an enum, for choosing the function 75 | 76 | Args: 77 | prefix (PrefixType): The prefix to use 78 | allowed (list[str]): The allowed values 79 | 80 | Returns: 81 | str: The generated value 82 | """ 83 | if len(allowed) == 1: 84 | return allowed[0] 85 | constraint = EnumConstraint(allowed) 86 | generated = self.constrainer.generate( 87 | prefix, 88 | constraint, 89 | ) 90 | fitting = constraint.fitting(generated) 91 | return fitting[0] if fitting else generated 92 | 93 | def _choose_function(self, prompt: PromptType, suffix: str = "") -> str: 94 | """Choose a function to call using the LLM 95 | 96 | Args: 97 | prompt (PromptType): The prompt to use 98 | suffix (str): The suffix to terminate, 99 | in order to begin prefix clashes 100 | 101 | Returns: 102 | str: The function to call 103 | """ 104 | prefix = self.prompter.prompt(prompt, self.functions) 105 | suffix_map = { 106 | function["name"] + suffix: function["name"] for function in self.functions 107 | } 108 | return suffix_map[ 109 | self._generate_allowed_in_enum( 110 | prefix, [function["name"] + suffix for function in self.functions] 111 | ) 112 | ] 113 | 114 | def choose_function( 115 | self, prompt: PromptType, function_call: str | None = None, suffix: str = "" 116 | ) -> str: 117 | """Choose a function to call 118 | 119 | Args: 120 | prompt (PromptType): The prompt to use 121 | function_call (str | None): The function to call 122 | Will be generated if not provided. 123 | suffix (str): The suffix to terminate, 124 | in order to begin prefix clashes 125 | 126 | Returns: 127 | str: The function to call 128 | """ 129 | if function_call is None: 130 | return self._choose_function(prompt, suffix) 131 | return function_call 132 | 133 | def generate_arguments( 134 | self, 135 | prompt: PromptType, 136 | function_call: str, 137 | max_length: int | None = None, 138 | max_new_tokens: int | None = None, 139 | ) -> str: 140 | """Generate the arguments for the function 141 | 142 | Args: 143 | prompt (PromptType): The prompt to use 144 | function_call (str): The function to call 145 | max_length (int | None): The maximum length of the generated sequence 146 | max_new_tokens (int | None): The maximum number of tokens to generate 147 | 148 | Returns: 149 | str: The arguments for the function, as a JSON string 150 | (may not be complete) 151 | """ 152 | prefix = self.prompter.prompt(prompt, self.functions, function_call) 153 | constraint = JsonSchemaConstraint( 154 | [ 155 | function 156 | for function in self.functions 157 | if function["name"] == function_call 158 | ][0][ 159 | "parameters" 160 | ] # type: ignore 161 | ) 162 | generated = self.constrainer.generate( 163 | prefix, 164 | constraint, 165 | max_length, 166 | max_new_tokens, 167 | ) 168 | validated = constraint.validate(generated) 169 | return generated[: validated.end_index] if validated.end_index else generated 170 | 171 | def generate( 172 | self, 173 | prompt: PromptType, 174 | function_call: str | None = None, 175 | max_length: int | None = None, 176 | max_new_tokens: int | None = None, 177 | suffix: str = "", 178 | ) -> FunctionCall: 179 | """Generate the function call 180 | 181 | Args: 182 | prompt (PromptType): The prompt to use 183 | function_call (str | None): The function call to use. 184 | Will be generated if not provided. 185 | max_length (int | None): The maximum length of the generated sequence 186 | max_new_tokens (int | None): The maximum number of tokens to generate 187 | suffix (str): The suffix to terminate, 188 | in order to begin prefix clashes 189 | 190 | Returns: 191 | FunctionCall: The generated function call 192 | """ 193 | function_name = self.choose_function(prompt, function_call, suffix) 194 | arguments = self.generate_arguments( 195 | prompt, function_name, max_new_tokens, max_length 196 | ) 197 | return {"name": function_name, "arguments": arguments} 198 | 199 | def should_call( 200 | self, 201 | prompt: PromptType, 202 | ) -> bool: 203 | """Determine if the function should be called 204 | 205 | Args: 206 | prompt (PromptType): The prompt to use 207 | 208 | Returns: 209 | bool: Whether the function should be called 210 | """ 211 | if not isinstance(self.prompter, TextPrompterWithNonFunctionResponse): 212 | raise NotImplementedError( 213 | "The prompter you're using does not support non-function responses" 214 | ) 215 | prefix, responses = self.prompter.should_call_prompt(prompt, self.functions) 216 | generated = self._generate_allowed_in_enum( 217 | prefix, responses["if_should_call"] + responses["if_not_should_call"] 218 | ) 219 | return generated in responses["if_should_call"] 220 | 221 | def natural_language( 222 | self, 223 | prompt: PromptType, 224 | max_new_tokens: int | None = None, 225 | ) -> str: 226 | """Generate a natural language response 227 | 228 | Args: 229 | prompt (PromptType): The prompt to use 230 | max_new_tokens (int | None): The maximum number of tokens to generate 231 | 232 | Returns: 233 | str: The natural language response 234 | """ 235 | if not isinstance(self.model, ModelWithNaturalLanguageResponses): 236 | raise NotImplementedError( 237 | "The model you're using does not support natural language responses" 238 | ) 239 | if not isinstance(self.prompter, TextPrompterWithNonFunctionResponse): 240 | raise NotImplementedError( 241 | "The prompter you're using does not support non-function responses" 242 | ) 243 | return self.model.generate_from_prompt( 244 | self.prompter.natural_language_prompt(prompt, self.functions), 245 | max_new_tokens, 246 | ) 247 | 248 | def respond( 249 | self, prompt: PromptType, max_new_tokens: int | None = None 250 | ) -> str | FunctionCall: 251 | """Generate a response 252 | 253 | Args: 254 | prompt (PromptType): The prompt to use 255 | 256 | Returns: 257 | str | FunctionCall: The response 258 | """ 259 | if self.should_call(prompt): 260 | return self.generate(prompt, max_new_tokens=max_new_tokens) 261 | return self.natural_language(prompt, max_new_tokens) 262 | -------------------------------------------------------------------------------- /local_llm_function_calling/model/llama.py: -------------------------------------------------------------------------------- 1 | """A container for llama-cpp models""" 2 | from __future__ import annotations 3 | import json 4 | from typing import Generator, Iterator, TYPE_CHECKING 5 | 6 | from llama_cpp import Llama, LlamaGrammar, sys 7 | 8 | if TYPE_CHECKING: 9 | from ..prompter import FunctionType 10 | 11 | 12 | class LogitLlama(Llama): 13 | """A wrapper for llama-cpp models that returns logits instead of tokens""" 14 | 15 | def generate_logits( 16 | self, 17 | tokens: list[int], 18 | reset: bool = True, 19 | grammar: LlamaGrammar | None = None, 20 | ) -> Generator[list[float], list[int], None]: 21 | """Create a generator of tokens from a prompt. 22 | 23 | Args: 24 | tokens: The prompt tokens. 25 | top_k: The top-k sampling parameter. 26 | top_p: The top-p sampling parameter. 27 | temp: The temperature parameter. 28 | repeat_penalty: The repeat penalty parameter. 29 | reset: Whether to reset the model state. 30 | 31 | Yields: 32 | The token logits, expecting the next token(s) in return. 33 | """ 34 | assert self.ctx is not None 35 | if reset and len(self._input_ids) > 0: 36 | longest_prefix = 0 37 | for a, b in zip(self._input_ids, tokens[:-1]): 38 | if a == b: 39 | longest_prefix += 1 40 | else: 41 | break 42 | if longest_prefix > 0: 43 | if self.verbose: 44 | print("Llama.generate: prefix-match hit", file=sys.stderr) 45 | reset = False 46 | tokens = tokens[longest_prefix:] 47 | self.n_tokens = longest_prefix 48 | 49 | if reset: 50 | self.reset() 51 | 52 | if grammar is not None: 53 | grammar.reset() 54 | 55 | while True: 56 | self.eval(tokens) 57 | tokens = yield self.eval_logits[0] 58 | if tokens is None: 59 | break 60 | 61 | 62 | class CodeLlamaFc: 63 | """A prompter for code llama function calling models""" 64 | 65 | def function_descriptions( 66 | self, functions: list[FunctionType], function_to_call: str 67 | ) -> list[str]: 68 | """Get the descriptions of the functions 69 | 70 | Args: 71 | functions (list[FunctionType]): The functions to get the descriptions of 72 | function_to_call (str): The function to call 73 | 74 | Returns: 75 | list[str]: The descriptions of the functions 76 | (empty if the function doesn't exist or has no description) 77 | """ 78 | return [ 79 | "Function description: " + function["description"] 80 | for function in functions 81 | if function["name"] == function_to_call and "description" in function 82 | ] 83 | 84 | def function_parameters( 85 | self, functions: list[FunctionType], function_to_call: str 86 | ) -> str: 87 | """Get the parameters of the function 88 | 89 | Args: 90 | functions (list[FunctionType]): The functions to get the parameters of 91 | function_to_call (str): The function to call 92 | 93 | Returns: 94 | str: The parameters of the function as a JSON schema 95 | """ 96 | return next( 97 | json.dumps(function["parameters"]["properties"], indent=4) 98 | for function in functions 99 | if function["name"] == function_to_call 100 | ) 101 | 102 | def function_data( 103 | self, functions: list[FunctionType], function_to_call: str 104 | ) -> str: 105 | """Get the data for the function 106 | 107 | Args: 108 | functions (list[FunctionType]): The functions to get the data for 109 | function_to_call (str): The function to call 110 | 111 | Returns: 112 | str: The data necessary to generate the arguments for the function 113 | """ 114 | return "\n".join( 115 | self.function_descriptions(functions, function_to_call) 116 | + [ 117 | "Function parameters should follow this schema:", 118 | "```jsonschema", 119 | self.function_parameters(functions, function_to_call), 120 | "```", 121 | ] 122 | ) 123 | 124 | def function_summary(self, function: FunctionType) -> str: 125 | """Get a summary of a function 126 | 127 | Args: 128 | function (FunctionType): The function to get the summary of 129 | 130 | Returns: 131 | str: The summary of the function, as a bullet point 132 | """ 133 | return f"- {function['name']}" + ( 134 | f" - {function['description']}" if "description" in function else "" 135 | ) 136 | 137 | def functions_summary(self, functions: list[FunctionType]) -> str: 138 | """Get a summary of the functions 139 | 140 | Args: 141 | functions (list[FunctionType]): The functions to get the summary of 142 | 143 | Returns: 144 | str: The summary of the functions, as a bulleted list 145 | """ 146 | return "Available functions:\n" + "\n".join( 147 | self.function_summary(function) for function in functions 148 | ) 149 | 150 | def prompt( 151 | self, 152 | prompt: str, 153 | functions: list[FunctionType], 154 | function_to_call: str | None = None, 155 | ) -> list[bytes | int]: 156 | """Generate the llama prompt 157 | 158 | Args: 159 | prompt (str): The prompt to generate the response to 160 | functions (list[FunctionType]): The functions to generate the response from 161 | function_to_call (str | None): The function to call. Defaults to None. 162 | 163 | Returns: 164 | list[bytes | int]: The llama prompt, a function selection prompt if no 165 | function is specified, or a function argument prompt if a function is 166 | specified 167 | """ 168 | system = ( 169 | "Help choose the appropriate function " 170 | "to call to answer the user's question." 171 | if function_to_call is None 172 | else f"Define the arguments for {function_to_call} " 173 | "to answer the user's question." 174 | ) 175 | data = ( 176 | self.function_data(functions, function_to_call) 177 | if function_to_call 178 | else self.functions_summary(functions) 179 | ) 180 | response_start = ( 181 | f"Here are the arguments for the `{function_to_call}` function: ```json\n" 182 | if function_to_call 183 | else "Here's the function the user should call: " 184 | ) 185 | return [ 186 | 1, 187 | f"[INST] <>\n{system}\n\n{data}\n<>\n\n{prompt} [/INST]" 188 | f" {response_start}".encode("utf-8"), 189 | ] 190 | 191 | 192 | class LlamaInstructPrompter: 193 | """A prompter for Llama2 instruct models""" 194 | 195 | def function_descriptions( 196 | self, functions: list[FunctionType], function_to_call: str 197 | ) -> list[str]: 198 | """Get the descriptions of the functions 199 | 200 | Args: 201 | functions (list[FunctionType]): The functions to get the descriptions of 202 | function_to_call (str): The function to call 203 | 204 | Returns: 205 | list[str]: The descriptions of the functions 206 | (empty if the function doesn't exist or has no description) 207 | """ 208 | return [ 209 | "Function description: " + function["description"] 210 | for function in functions 211 | if function["name"] == function_to_call and "description" in function 212 | ] 213 | 214 | def function_parameters( 215 | self, functions: list[FunctionType], function_to_call: str 216 | ) -> str: 217 | """Get the parameters of the function 218 | 219 | Args: 220 | functions (list[FunctionType]): The functions to get the parameters of 221 | function_to_call (str): The function to call 222 | 223 | Returns: 224 | str: The parameters of the function as a JSON schema 225 | """ 226 | return next( 227 | json.dumps(function["parameters"]["properties"], indent=4) 228 | for function in functions 229 | if function["name"] == function_to_call 230 | ) 231 | 232 | def function_data( 233 | self, functions: list[FunctionType], function_to_call: str 234 | ) -> str: 235 | """Get the data for the function 236 | 237 | Args: 238 | functions (list[FunctionType]): The functions to get the data for 239 | function_to_call (str): The function to call 240 | 241 | Returns: 242 | str: The data necessary to generate the arguments for the function 243 | """ 244 | return "\n".join( 245 | self.function_descriptions(functions, function_to_call) 246 | + [ 247 | "Function parameters should follow this schema:", 248 | "```jsonschema", 249 | self.function_parameters(functions, function_to_call), 250 | "```", 251 | ] 252 | ) 253 | 254 | def function_summary(self, function: FunctionType) -> str: 255 | """Get a summary of a function 256 | 257 | Args: 258 | function (FunctionType): The function to get the summary of 259 | 260 | Returns: 261 | str: The summary of the function, as a bullet point 262 | """ 263 | return f"- {function['name']}" + ( 264 | f" - {function['description']}" if "description" in function else "" 265 | ) 266 | 267 | def functions_summary(self, functions: list[FunctionType]) -> str: 268 | """Get a summary of the functions 269 | 270 | Args: 271 | functions (list[FunctionType]): The functions to get the summary of 272 | 273 | Returns: 274 | str: The summary of the functions, as a bulleted list 275 | """ 276 | return "Available functions:\n" + "\n".join( 277 | self.function_summary(function) for function in functions 278 | ) 279 | 280 | def prompt( 281 | self, 282 | prompt: str, 283 | functions: list[FunctionType], 284 | function_to_call: str | None = None, 285 | ) -> list[bytes | int]: 286 | """Generate the llama prompt 287 | 288 | Args: 289 | prompt (str): The prompt to generate the response to 290 | functions (list[FunctionType]): The functions to generate the response from 291 | function_to_call (str | None): The function to call. Defaults to None. 292 | 293 | Returns: 294 | list[bytes | int]: The llama prompt, a function selection prompt if no 295 | function is specified, or a function argument prompt if a function is 296 | specified 297 | """ 298 | system = ( 299 | "Help choose the appropriate function " 300 | "to call to answer the user's question." 301 | if function_to_call is None 302 | else f"Define the arguments for {function_to_call} " 303 | "to answer the user's question." 304 | ) 305 | data = ( 306 | self.function_data(functions, function_to_call) 307 | if function_to_call 308 | else self.functions_summary(functions) 309 | ) 310 | response_start = ( 311 | f"Here are the arguments for the `{function_to_call}` function: ```json\n" 312 | if function_to_call 313 | else "Here's the function the user should call: " 314 | ) 315 | return [ 316 | 1, 317 | f"[INST] <>\n{system}\n\n{data}\n<>\n\n{prompt} [/INST]" 318 | f" {response_start}".encode("utf-8"), 319 | ] 320 | 321 | 322 | class LlamaGeneration: 323 | """A generation sequence for llama-cpp models""" 324 | 325 | def __init__(self, model: LogitLlama, prefix: list[bytes | int]) -> None: 326 | """Create a generation sequence 327 | 328 | Args: 329 | model (LogitLlama): The model to use for generation 330 | prefix (str): The generation prefix 331 | """ 332 | self.model = model 333 | self.generated: list[int] = [] 334 | self.prompt: list[int] = sum( 335 | ( 336 | [item] if isinstance(item, int) else self.model.tokenize(item, False) 337 | for item in prefix 338 | ), 339 | [], 340 | ) 341 | 342 | self.generation = self.model.generate_logits(self.prompt) 343 | next(self.generation) 344 | 345 | def get_sorted_tokens(self) -> Iterator[int]: 346 | """Get the tokens sorted by probability 347 | 348 | Yields: 349 | The next of the tokens sorted by probability 350 | """ 351 | probabilities = self.model.eval_logits[0] 352 | for token_id, _ in sorted( 353 | enumerate(probabilities), key=lambda item: item[1], reverse=True 354 | ): 355 | try: 356 | if self.get_generated(token_id): 357 | yield token_id 358 | except UnicodeDecodeError: 359 | continue 360 | 361 | def register_token(self, token: int) -> None: 362 | """Select the token for this generation step 363 | 364 | Args: 365 | token (int): The token to select 366 | """ 367 | self.generated.append(token) 368 | self.generation.send([token]) 369 | 370 | def get_generated(self, candidate: int | None = None) -> str: 371 | """Get the generated sequence 372 | 373 | Args: 374 | candidate (int | None): The token to add to the sequence 375 | 376 | Returns: 377 | str: The generated sequence 378 | """ 379 | return self.model.detokenize( 380 | self.generated + ([candidate] if candidate else []) 381 | ).decode("utf-8") 382 | 383 | 384 | class LlamaModel: 385 | """A llama-cpp model""" 386 | 387 | def __init__( 388 | self, 389 | model: LogitLlama | str, 390 | ) -> None: 391 | """Create a huggingface model 392 | 393 | Args: 394 | model (LogitLlama | str): The model to use for generation, 395 | or the path to the model 396 | """ 397 | if isinstance(model, str): 398 | self.model = LogitLlama(model) 399 | else: 400 | self.model = model 401 | 402 | def start_generation(self, prefix: list[bytes | int]) -> LlamaGeneration: 403 | """Start a new generation sequence 404 | 405 | Args: 406 | prefix (list[int]): The generation prefix 407 | 408 | Returns: 409 | LlamaGeneration: The generation sequence initialized with the prefix 410 | """ 411 | return LlamaGeneration(self.model, prefix) 412 | 413 | def default_prompter(self) -> LlamaInstructPrompter: 414 | """Get the default prompter for this model 415 | 416 | Returns: 417 | LlamaInstructPrompter: The default prompter for this model 418 | """ 419 | return LlamaInstructPrompter() 420 | 421 | def generate_from_prompt( 422 | self, 423 | prefix: list[bytes | int], 424 | max_tokens: int | None = None, 425 | ) -> str: 426 | """Generate a string from a prompt 427 | 428 | Args: 429 | prefix (list[int]): The prompt to generate a response to 430 | max_tokens (int | None): The maximum number of tokens to generate 431 | 432 | Returns: 433 | str: The generated value 434 | """ 435 | prompt = sum( 436 | ( 437 | [item] if isinstance(item, int) else self.model.tokenize(item, False) 438 | for item in prefix 439 | ), 440 | [], 441 | ) 442 | first_token_logits = next(self.model.generate_logits(prompt)) 443 | tokens = [1] 444 | # This magic is to enforce the first character to be a space 445 | for token_id, _ in sorted( 446 | enumerate(first_token_logits), key=lambda item: item[1], reverse=True 447 | ): 448 | if ( 449 | self.model.detokenize([token_id]) 450 | .decode("utf-8", errors="ignore") 451 | .startswith(" ") 452 | ): 453 | tokens.append(token_id) 454 | prompt.append(token_id) 455 | break 456 | for i, token in enumerate(self.model.generate(prompt)): 457 | if token == self.model.token_eos(): 458 | break 459 | tokens.append(token) 460 | if max_tokens is not None and i >= max_tokens: 461 | break 462 | return self.model.detokenize(tokens).decode("utf-8") 463 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. 2 | 3 | [[package]] 4 | name = "certifi" 5 | version = "2023.5.7" 6 | description = "Python package for providing Mozilla's CA Bundle." 7 | category = "main" 8 | optional = false 9 | python-versions = ">=3.6" 10 | files = [ 11 | {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, 12 | {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, 13 | ] 14 | 15 | [[package]] 16 | name = "charset-normalizer" 17 | version = "3.1.0" 18 | description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." 19 | category = "main" 20 | optional = false 21 | python-versions = ">=3.7.0" 22 | files = [ 23 | {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, 24 | {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, 25 | {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, 26 | {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, 27 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, 28 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, 29 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, 30 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, 31 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, 32 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, 33 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, 34 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, 35 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, 36 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, 37 | {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, 38 | {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, 39 | {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, 40 | {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, 41 | {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, 42 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, 43 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, 44 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, 45 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, 46 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, 47 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, 48 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, 49 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, 50 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, 51 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, 52 | {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, 53 | {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, 54 | {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, 55 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, 56 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, 57 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, 58 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, 59 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, 60 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, 61 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, 62 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, 63 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, 64 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, 65 | {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, 66 | {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, 67 | {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, 68 | {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, 69 | {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, 70 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, 71 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, 72 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, 73 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, 74 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, 75 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, 76 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, 77 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, 78 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, 79 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, 80 | {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, 81 | {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, 82 | {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, 83 | {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, 84 | {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, 85 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, 86 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, 87 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, 88 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, 89 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, 90 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, 91 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, 92 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, 93 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, 94 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, 95 | {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, 96 | {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, 97 | {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, 98 | ] 99 | 100 | [[package]] 101 | name = "colorama" 102 | version = "0.4.6" 103 | description = "Cross-platform colored terminal text." 104 | category = "main" 105 | optional = false 106 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" 107 | files = [ 108 | {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, 109 | {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, 110 | ] 111 | 112 | [[package]] 113 | name = "diskcache" 114 | version = "5.6.3" 115 | description = "Disk Cache -- Disk and file backed persistent cache." 116 | category = "main" 117 | optional = true 118 | python-versions = ">=3" 119 | files = [ 120 | {file = "diskcache-5.6.3-py3-none-any.whl", hash = "sha256:5e31b2d5fbad117cc363ebaf6b689474db18a1f6438bc82358b024abd4c2ca19"}, 121 | {file = "diskcache-5.6.3.tar.gz", hash = "sha256:2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc"}, 122 | ] 123 | 124 | [[package]] 125 | name = "filelock" 126 | version = "3.12.2" 127 | description = "A platform independent file lock." 128 | category = "main" 129 | optional = false 130 | python-versions = ">=3.7" 131 | files = [ 132 | {file = "filelock-3.12.2-py3-none-any.whl", hash = "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"}, 133 | {file = "filelock-3.12.2.tar.gz", hash = "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81"}, 134 | ] 135 | 136 | [package.extras] 137 | docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] 138 | testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"] 139 | 140 | [[package]] 141 | name = "fsspec" 142 | version = "2023.6.0" 143 | description = "File-system specification" 144 | category = "main" 145 | optional = false 146 | python-versions = ">=3.8" 147 | files = [ 148 | {file = "fsspec-2023.6.0-py3-none-any.whl", hash = "sha256:1cbad1faef3e391fba6dc005ae9b5bdcbf43005c9167ce78c915549c352c869a"}, 149 | {file = "fsspec-2023.6.0.tar.gz", hash = "sha256:d0b2f935446169753e7a5c5c55681c54ea91996cc67be93c39a154fb3a2742af"}, 150 | ] 151 | 152 | [package.extras] 153 | abfs = ["adlfs"] 154 | adl = ["adlfs"] 155 | arrow = ["pyarrow (>=1)"] 156 | dask = ["dask", "distributed"] 157 | devel = ["pytest", "pytest-cov"] 158 | dropbox = ["dropbox", "dropboxdrivefs", "requests"] 159 | full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] 160 | fuse = ["fusepy"] 161 | gcs = ["gcsfs"] 162 | git = ["pygit2"] 163 | github = ["requests"] 164 | gs = ["gcsfs"] 165 | gui = ["panel"] 166 | hdfs = ["pyarrow (>=1)"] 167 | http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] 168 | libarchive = ["libarchive-c"] 169 | oci = ["ocifs"] 170 | s3 = ["s3fs"] 171 | sftp = ["paramiko"] 172 | smb = ["smbprotocol"] 173 | ssh = ["paramiko"] 174 | tqdm = ["tqdm"] 175 | 176 | [[package]] 177 | name = "huggingface-hub" 178 | version = "0.15.1" 179 | description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" 180 | category = "main" 181 | optional = false 182 | python-versions = ">=3.7.0" 183 | files = [ 184 | {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"}, 185 | {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"}, 186 | ] 187 | 188 | [package.dependencies] 189 | filelock = "*" 190 | fsspec = "*" 191 | packaging = ">=20.9" 192 | pyyaml = ">=5.1" 193 | requests = "*" 194 | tqdm = ">=4.42.1" 195 | typing-extensions = ">=3.7.4.3" 196 | 197 | [package.extras] 198 | all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] 199 | cli = ["InquirerPy (==0.3.4)"] 200 | dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] 201 | fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] 202 | quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"] 203 | tensorflow = ["graphviz", "pydot", "tensorflow"] 204 | testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] 205 | torch = ["torch"] 206 | typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] 207 | 208 | [[package]] 209 | name = "idna" 210 | version = "3.4" 211 | description = "Internationalized Domain Names in Applications (IDNA)" 212 | category = "main" 213 | optional = false 214 | python-versions = ">=3.5" 215 | files = [ 216 | {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, 217 | {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, 218 | ] 219 | 220 | [[package]] 221 | name = "jinja2" 222 | version = "3.1.2" 223 | description = "A very fast and expressive template engine." 224 | category = "main" 225 | optional = false 226 | python-versions = ">=3.7" 227 | files = [ 228 | {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, 229 | {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, 230 | ] 231 | 232 | [package.dependencies] 233 | MarkupSafe = ">=2.0" 234 | 235 | [package.extras] 236 | i18n = ["Babel (>=2.7)"] 237 | 238 | [[package]] 239 | name = "json-schema-enforcer" 240 | version = "0.1.4" 241 | description = "A progressive JSON schema validator" 242 | category = "main" 243 | optional = false 244 | python-versions = ">=3.10,<4.0" 245 | files = [ 246 | {file = "json_schema_enforcer-0.1.4-py3-none-any.whl", hash = "sha256:84bb8d84d4b6923e11e0ad38e11a024c64607cc791aa62c3ef352ae760a5a18d"}, 247 | {file = "json_schema_enforcer-0.1.4.tar.gz", hash = "sha256:117c267543e1517808a4ea9907d21b0f275fea1f26712d650d66d02fdf1aba50"}, 248 | ] 249 | 250 | [[package]] 251 | name = "llama-cpp-python" 252 | version = "0.1.83" 253 | description = "A Python wrapper for llama.cpp" 254 | category = "main" 255 | optional = true 256 | python-versions = ">=3.7" 257 | files = [ 258 | {file = "llama_cpp_python-0.1.83.tar.gz", hash = "sha256:9f40656e46a85a3c3427790246e03490bb90202c37cb99732a095ffcb99efe54"}, 259 | ] 260 | 261 | [package.dependencies] 262 | diskcache = ">=5.6.1,<6.0.0" 263 | numpy = ">=1.24.4,<2.0.0" 264 | typing-extensions = ">=4.7.1,<5.0.0" 265 | 266 | [package.extras] 267 | server = ["fastapi (>=0.100.0)", "pydantic-settings (>=2.0.1)", "sse-starlette (>=1.6.1)", "uvicorn (>=0.23.2,<0.24.0)"] 268 | 269 | [[package]] 270 | name = "markupsafe" 271 | version = "2.1.3" 272 | description = "Safely add untrusted strings to HTML/XML markup." 273 | category = "main" 274 | optional = false 275 | python-versions = ">=3.7" 276 | files = [ 277 | {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, 278 | {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, 279 | {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, 280 | {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, 281 | {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, 282 | {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, 283 | {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, 284 | {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, 285 | {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, 286 | {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, 287 | {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, 288 | {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, 289 | {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, 290 | {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, 291 | {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, 292 | {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, 293 | {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, 294 | {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, 295 | {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, 296 | {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, 297 | {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, 298 | {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, 299 | {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, 300 | {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, 301 | {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, 302 | {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, 303 | {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, 304 | {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, 305 | {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, 306 | {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, 307 | {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, 308 | {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, 309 | {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, 310 | {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, 311 | {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, 312 | {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, 313 | {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, 314 | {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, 315 | {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, 316 | {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, 317 | {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, 318 | {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, 319 | {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, 320 | {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, 321 | {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, 322 | {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, 323 | {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, 324 | {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, 325 | {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, 326 | {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, 327 | ] 328 | 329 | [[package]] 330 | name = "mpmath" 331 | version = "1.3.0" 332 | description = "Python library for arbitrary-precision floating-point arithmetic" 333 | category = "main" 334 | optional = false 335 | python-versions = "*" 336 | files = [ 337 | {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, 338 | {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, 339 | ] 340 | 341 | [package.extras] 342 | develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] 343 | docs = ["sphinx"] 344 | gmpy = ["gmpy2 (>=2.1.0a4)"] 345 | tests = ["pytest (>=4.6)"] 346 | 347 | [[package]] 348 | name = "networkx" 349 | version = "3.1" 350 | description = "Python package for creating and manipulating graphs and networks" 351 | category = "main" 352 | optional = false 353 | python-versions = ">=3.8" 354 | files = [ 355 | {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, 356 | {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, 357 | ] 358 | 359 | [package.extras] 360 | default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] 361 | developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] 362 | doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] 363 | extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] 364 | test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] 365 | 366 | [[package]] 367 | name = "numpy" 368 | version = "1.25.0" 369 | description = "Fundamental package for array computing in Python" 370 | category = "main" 371 | optional = false 372 | python-versions = ">=3.9" 373 | files = [ 374 | {file = "numpy-1.25.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8aa130c3042052d656751df5e81f6d61edff3e289b5994edcf77f54118a8d9f4"}, 375 | {file = "numpy-1.25.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e3f2b96e3b63c978bc29daaa3700c028fe3f049ea3031b58aa33fe2a5809d24"}, 376 | {file = "numpy-1.25.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6b267f349a99d3908b56645eebf340cb58f01bd1e773b4eea1a905b3f0e4208"}, 377 | {file = "numpy-1.25.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aedd08f15d3045a4e9c648f1e04daca2ab1044256959f1f95aafeeb3d794c16"}, 378 | {file = "numpy-1.25.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d183b5c58513f74225c376643234c369468e02947b47942eacbb23c1671f25d"}, 379 | {file = "numpy-1.25.0-cp310-cp310-win32.whl", hash = "sha256:d76a84998c51b8b68b40448ddd02bd1081bb33abcdc28beee6cd284fe11036c6"}, 380 | {file = "numpy-1.25.0-cp310-cp310-win_amd64.whl", hash = "sha256:c0dc071017bc00abb7d7201bac06fa80333c6314477b3d10b52b58fa6a6e38f6"}, 381 | {file = "numpy-1.25.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c69fe5f05eea336b7a740e114dec995e2f927003c30702d896892403df6dbf0"}, 382 | {file = "numpy-1.25.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c7211d7920b97aeca7b3773a6783492b5b93baba39e7c36054f6e749fc7490c"}, 383 | {file = "numpy-1.25.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecc68f11404930e9c7ecfc937aa423e1e50158317bf67ca91736a9864eae0232"}, 384 | {file = "numpy-1.25.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e559c6afbca484072a98a51b6fa466aae785cfe89b69e8b856c3191bc8872a82"}, 385 | {file = "numpy-1.25.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6c284907e37f5e04d2412950960894b143a648dea3f79290757eb878b91acbd1"}, 386 | {file = "numpy-1.25.0-cp311-cp311-win32.whl", hash = "sha256:95367ccd88c07af21b379be1725b5322362bb83679d36691f124a16357390153"}, 387 | {file = "numpy-1.25.0-cp311-cp311-win_amd64.whl", hash = "sha256:b76aa836a952059d70a2788a2d98cb2a533ccd46222558b6970348939e55fc24"}, 388 | {file = "numpy-1.25.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b792164e539d99d93e4e5e09ae10f8cbe5466de7d759fc155e075237e0c274e4"}, 389 | {file = "numpy-1.25.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7cd981ccc0afe49b9883f14761bb57c964df71124dcd155b0cba2b591f0d64b9"}, 390 | {file = "numpy-1.25.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aa48bebfb41f93043a796128854b84407d4df730d3fb6e5dc36402f5cd594c0"}, 391 | {file = "numpy-1.25.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5177310ac2e63d6603f659fadc1e7bab33dd5a8db4e0596df34214eeab0fee3b"}, 392 | {file = "numpy-1.25.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0ac6edfb35d2a99aaf102b509c8e9319c499ebd4978df4971b94419a116d0790"}, 393 | {file = "numpy-1.25.0-cp39-cp39-win32.whl", hash = "sha256:7412125b4f18aeddca2ecd7219ea2d2708f697943e6f624be41aa5f8a9852cc4"}, 394 | {file = "numpy-1.25.0-cp39-cp39-win_amd64.whl", hash = "sha256:26815c6c8498dc49d81faa76d61078c4f9f0859ce7817919021b9eba72b425e3"}, 395 | {file = "numpy-1.25.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5b1b90860bf7d8a8c313b372d4f27343a54f415b20fb69dd601b7efe1029c91e"}, 396 | {file = "numpy-1.25.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85cdae87d8c136fd4da4dad1e48064d700f63e923d5af6c8c782ac0df8044542"}, 397 | {file = "numpy-1.25.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cc3fda2b36482891db1060f00f881c77f9423eead4c3579629940a3e12095fe8"}, 398 | {file = "numpy-1.25.0.tar.gz", hash = "sha256:f1accae9a28dc3cda46a91de86acf69de0d1b5f4edd44a9b0c3ceb8036dfff19"}, 399 | ] 400 | 401 | [[package]] 402 | name = "packaging" 403 | version = "23.1" 404 | description = "Core utilities for Python packages" 405 | category = "main" 406 | optional = false 407 | python-versions = ">=3.7" 408 | files = [ 409 | {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, 410 | {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, 411 | ] 412 | 413 | [[package]] 414 | name = "pyyaml" 415 | version = "6.0" 416 | description = "YAML parser and emitter for Python" 417 | category = "main" 418 | optional = false 419 | python-versions = ">=3.6" 420 | files = [ 421 | {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, 422 | {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, 423 | {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, 424 | {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, 425 | {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, 426 | {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, 427 | {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, 428 | {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, 429 | {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, 430 | {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, 431 | {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, 432 | {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, 433 | {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, 434 | {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, 435 | {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, 436 | {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, 437 | {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, 438 | {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, 439 | {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, 440 | {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, 441 | {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, 442 | {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, 443 | {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, 444 | {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, 445 | {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, 446 | {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, 447 | {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, 448 | {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, 449 | {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, 450 | {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, 451 | {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, 452 | {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, 453 | {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, 454 | {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, 455 | {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, 456 | {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, 457 | {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, 458 | {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, 459 | {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, 460 | {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, 461 | ] 462 | 463 | [[package]] 464 | name = "regex" 465 | version = "2023.6.3" 466 | description = "Alternative regular expression module, to replace re." 467 | category = "main" 468 | optional = false 469 | python-versions = ">=3.6" 470 | files = [ 471 | {file = "regex-2023.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:824bf3ac11001849aec3fa1d69abcb67aac3e150a933963fb12bda5151fe1bfd"}, 472 | {file = "regex-2023.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05ed27acdf4465c95826962528f9e8d41dbf9b1aa8531a387dee6ed215a3e9ef"}, 473 | {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b49c764f88a79160fa64f9a7b425620e87c9f46095ef9c9920542ab2495c8bc"}, 474 | {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8e3f1316c2293e5469f8f09dc2d76efb6c3982d3da91ba95061a7e69489a14ef"}, 475 | {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:43e1dd9d12df9004246bacb79a0e5886b3b6071b32e41f83b0acbf293f820ee8"}, 476 | {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4959e8bcbfda5146477d21c3a8ad81b185cd252f3d0d6e4724a5ef11c012fb06"}, 477 | {file = "regex-2023.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af4dd387354dc83a3bff67127a124c21116feb0d2ef536805c454721c5d7993d"}, 478 | {file = "regex-2023.6.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2239d95d8e243658b8dbb36b12bd10c33ad6e6933a54d36ff053713f129aa536"}, 479 | {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:890e5a11c97cf0d0c550eb661b937a1e45431ffa79803b942a057c4fb12a2da2"}, 480 | {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a8105e9af3b029f243ab11ad47c19b566482c150c754e4c717900a798806b222"}, 481 | {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:25be746a8ec7bc7b082783216de8e9473803706723b3f6bef34b3d0ed03d57e2"}, 482 | {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:3676f1dd082be28b1266c93f618ee07741b704ab7b68501a173ce7d8d0d0ca18"}, 483 | {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:10cb847aeb1728412c666ab2e2000ba6f174f25b2bdc7292e7dd71b16db07568"}, 484 | {file = "regex-2023.6.3-cp310-cp310-win32.whl", hash = "sha256:dbbbfce33cd98f97f6bffb17801b0576e653f4fdb1d399b2ea89638bc8d08ae1"}, 485 | {file = "regex-2023.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:c5f8037000eb21e4823aa485149f2299eb589f8d1fe4b448036d230c3f4e68e0"}, 486 | {file = "regex-2023.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c123f662be8ec5ab4ea72ea300359023a5d1df095b7ead76fedcd8babbedf969"}, 487 | {file = "regex-2023.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9edcbad1f8a407e450fbac88d89e04e0b99a08473f666a3f3de0fd292badb6aa"}, 488 | {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcba6dae7de533c876255317c11f3abe4907ba7d9aa15d13e3d9710d4315ec0e"}, 489 | {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29cdd471ebf9e0f2fb3cac165efedc3c58db841d83a518b082077e612d3ee5df"}, 490 | {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12b74fbbf6cbbf9dbce20eb9b5879469e97aeeaa874145517563cca4029db65c"}, 491 | {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c29ca1bd61b16b67be247be87390ef1d1ef702800f91fbd1991f5c4421ebae8"}, 492 | {file = "regex-2023.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77f09bc4b55d4bf7cc5eba785d87001d6757b7c9eec237fe2af57aba1a071d9"}, 493 | {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ea353ecb6ab5f7e7d2f4372b1e779796ebd7b37352d290096978fea83c4dba0c"}, 494 | {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:10590510780b7541969287512d1b43f19f965c2ece6c9b1c00fc367b29d8dce7"}, 495 | {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e2fbd6236aae3b7f9d514312cdb58e6494ee1c76a9948adde6eba33eb1c4264f"}, 496 | {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:6b2675068c8b56f6bfd5a2bda55b8accbb96c02fd563704732fd1c95e2083461"}, 497 | {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74419d2b50ecb98360cfaa2974da8689cb3b45b9deff0dcf489c0d333bcc1477"}, 498 | {file = "regex-2023.6.3-cp311-cp311-win32.whl", hash = "sha256:fb5ec16523dc573a4b277663a2b5a364e2099902d3944c9419a40ebd56a118f9"}, 499 | {file = "regex-2023.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:09e4a1a6acc39294a36b7338819b10baceb227f7f7dbbea0506d419b5a1dd8af"}, 500 | {file = "regex-2023.6.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0654bca0cdf28a5956c83839162692725159f4cda8d63e0911a2c0dc76166525"}, 501 | {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:463b6a3ceb5ca952e66550a4532cef94c9a0c80dc156c4cc343041951aec1697"}, 502 | {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87b2a5bb5e78ee0ad1de71c664d6eb536dc3947a46a69182a90f4410f5e3f7dd"}, 503 | {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6343c6928282c1f6a9db41f5fd551662310e8774c0e5ebccb767002fcf663ca9"}, 504 | {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6192d5af2ccd2a38877bfef086d35e6659566a335b1492786ff254c168b1693"}, 505 | {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74390d18c75054947e4194019077e243c06fbb62e541d8817a0fa822ea310c14"}, 506 | {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:742e19a90d9bb2f4a6cf2862b8b06dea5e09b96c9f2df1779e53432d7275331f"}, 507 | {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8abbc5d54ea0ee80e37fef009e3cec5dafd722ed3c829126253d3e22f3846f1e"}, 508 | {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:c2b867c17a7a7ae44c43ebbeb1b5ff406b3e8d5b3e14662683e5e66e6cc868d3"}, 509 | {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d831c2f8ff278179705ca59f7e8524069c1a989e716a1874d6d1aab6119d91d1"}, 510 | {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ee2d1a9a253b1729bb2de27d41f696ae893507c7db224436abe83ee25356f5c1"}, 511 | {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:61474f0b41fe1a80e8dfa70f70ea1e047387b7cd01c85ec88fa44f5d7561d787"}, 512 | {file = "regex-2023.6.3-cp36-cp36m-win32.whl", hash = "sha256:0b71e63226e393b534105fcbdd8740410dc6b0854c2bfa39bbda6b0d40e59a54"}, 513 | {file = "regex-2023.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bbb02fd4462f37060122e5acacec78e49c0fbb303c30dd49c7f493cf21fc5b27"}, 514 | {file = "regex-2023.6.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b862c2b9d5ae38a68b92e215b93f98d4c5e9454fa36aae4450f61dd33ff48487"}, 515 | {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:976d7a304b59ede34ca2921305b57356694f9e6879db323fd90a80f865d355a3"}, 516 | {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:83320a09188e0e6c39088355d423aa9d056ad57a0b6c6381b300ec1a04ec3d16"}, 517 | {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9427a399501818a7564f8c90eced1e9e20709ece36be701f394ada99890ea4b3"}, 518 | {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178bbc1b2ec40eaca599d13c092079bf529679bf0371c602edaa555e10b41c3"}, 519 | {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:837328d14cde912af625d5f303ec29f7e28cdab588674897baafaf505341f2fc"}, 520 | {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2d44dc13229905ae96dd2ae2dd7cebf824ee92bc52e8cf03dcead37d926da019"}, 521 | {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d54af539295392611e7efbe94e827311eb8b29668e2b3f4cadcfe6f46df9c777"}, 522 | {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7117d10690c38a622e54c432dfbbd3cbd92f09401d622902c32f6d377e2300ee"}, 523 | {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bb60b503ec8a6e4e3e03a681072fa3a5adcbfa5479fa2d898ae2b4a8e24c4591"}, 524 | {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:65ba8603753cec91c71de423a943ba506363b0e5c3fdb913ef8f9caa14b2c7e0"}, 525 | {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:271f0bdba3c70b58e6f500b205d10a36fb4b58bd06ac61381b68de66442efddb"}, 526 | {file = "regex-2023.6.3-cp37-cp37m-win32.whl", hash = "sha256:9beb322958aaca059f34975b0df135181f2e5d7a13b84d3e0e45434749cb20f7"}, 527 | {file = "regex-2023.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fea75c3710d4f31389eed3c02f62d0b66a9da282521075061ce875eb5300cf23"}, 528 | {file = "regex-2023.6.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8f56fcb7ff7bf7404becdfc60b1e81a6d0561807051fd2f1860b0d0348156a07"}, 529 | {file = "regex-2023.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d2da3abc88711bce7557412310dfa50327d5769a31d1c894b58eb256459dc289"}, 530 | {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a99b50300df5add73d307cf66abea093304a07eb017bce94f01e795090dea87c"}, 531 | {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5708089ed5b40a7b2dc561e0c8baa9535b77771b64a8330b684823cfd5116036"}, 532 | {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:687ea9d78a4b1cf82f8479cab23678aff723108df3edeac098e5b2498879f4a7"}, 533 | {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3850beab9f527f06ccc94b446c864059c57651b3f911fddb8d9d3ec1d1b25d"}, 534 | {file = "regex-2023.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8915cc96abeb8983cea1df3c939e3c6e1ac778340c17732eb63bb96247b91d2"}, 535 | {file = "regex-2023.6.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:841d6e0e5663d4c7b4c8099c9997be748677d46cbf43f9f471150e560791f7ff"}, 536 | {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9edce5281f965cf135e19840f4d93d55b3835122aa76ccacfd389e880ba4cf82"}, 537 | {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b956231ebdc45f5b7a2e1f90f66a12be9610ce775fe1b1d50414aac1e9206c06"}, 538 | {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:36efeba71c6539d23c4643be88295ce8c82c88bbd7c65e8a24081d2ca123da3f"}, 539 | {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:cf67ca618b4fd34aee78740bea954d7c69fdda419eb208c2c0c7060bb822d747"}, 540 | {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b4598b1897837067a57b08147a68ac026c1e73b31ef6e36deeeb1fa60b2933c9"}, 541 | {file = "regex-2023.6.3-cp38-cp38-win32.whl", hash = "sha256:f415f802fbcafed5dcc694c13b1292f07fe0befdb94aa8a52905bd115ff41e88"}, 542 | {file = "regex-2023.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:d4f03bb71d482f979bda92e1427f3ec9b220e62a7dd337af0aa6b47bf4498f72"}, 543 | {file = "regex-2023.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccf91346b7bd20c790310c4147eee6ed495a54ddb6737162a36ce9dbef3e4751"}, 544 | {file = "regex-2023.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b28f5024a3a041009eb4c333863d7894d191215b39576535c6734cd88b0fcb68"}, 545 | {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0bb18053dfcfed432cc3ac632b5e5e5c5b7e55fb3f8090e867bfd9b054dbcbf"}, 546 | {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5bfb3004f2144a084a16ce19ca56b8ac46e6fd0651f54269fc9e230edb5e4a"}, 547 | {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c6b48d0fa50d8f4df3daf451be7f9689c2bde1a52b1225c5926e3f54b6a9ed1"}, 548 | {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:051da80e6eeb6e239e394ae60704d2b566aa6a7aed6f2890a7967307267a5dc6"}, 549 | {file = "regex-2023.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4c3b7fa4cdaa69268748665a1a6ff70c014d39bb69c50fda64b396c9116cf77"}, 550 | {file = "regex-2023.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:457b6cce21bee41ac292d6753d5e94dcbc5c9e3e3a834da285b0bde7aa4a11e9"}, 551 | {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:aad51907d74fc183033ad796dd4c2e080d1adcc4fd3c0fd4fd499f30c03011cd"}, 552 | {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0385e73da22363778ef2324950e08b689abdf0b108a7d8decb403ad7f5191938"}, 553 | {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c6a57b742133830eec44d9b2290daf5cbe0a2f1d6acee1b3c7b1c7b2f3606df7"}, 554 | {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3e5219bf9e75993d73ab3d25985c857c77e614525fac9ae02b1bebd92f7cecac"}, 555 | {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e5087a3c59eef624a4591ef9eaa6e9a8d8a94c779dade95d27c0bc24650261cd"}, 556 | {file = "regex-2023.6.3-cp39-cp39-win32.whl", hash = "sha256:20326216cc2afe69b6e98528160b225d72f85ab080cbdf0b11528cbbaba2248f"}, 557 | {file = "regex-2023.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:bdff5eab10e59cf26bc479f565e25ed71a7d041d1ded04ccf9aee1d9f208487a"}, 558 | {file = "regex-2023.6.3.tar.gz", hash = "sha256:72d1a25bf36d2050ceb35b517afe13864865268dfb45910e2e17a84be6cbfeb0"}, 559 | ] 560 | 561 | [[package]] 562 | name = "requests" 563 | version = "2.31.0" 564 | description = "Python HTTP for Humans." 565 | category = "main" 566 | optional = false 567 | python-versions = ">=3.7" 568 | files = [ 569 | {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, 570 | {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, 571 | ] 572 | 573 | [package.dependencies] 574 | certifi = ">=2017.4.17" 575 | charset-normalizer = ">=2,<4" 576 | idna = ">=2.5,<4" 577 | urllib3 = ">=1.21.1,<3" 578 | 579 | [package.extras] 580 | socks = ["PySocks (>=1.5.6,!=1.5.7)"] 581 | use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] 582 | 583 | [[package]] 584 | name = "safetensors" 585 | version = "0.3.1" 586 | description = "Fast and Safe Tensor serialization" 587 | category = "main" 588 | optional = false 589 | python-versions = "*" 590 | files = [ 591 | {file = "safetensors-0.3.1-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:2ae9b7dd268b4bae6624729dac86deb82104820e9786429b0583e5168db2f770"}, 592 | {file = "safetensors-0.3.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:08c85c1934682f1e2cd904d38433b53cd2a98245a7cc31f5689f9322a2320bbf"}, 593 | {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba625c7af9e1c5d0d91cb83d2fba97d29ea69d4db2015d9714d24c7f6d488e15"}, 594 | {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b57d5890c619ec10d9f1b6426b8690d0c9c2868a90dc52f13fae6f6407ac141f"}, 595 | {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c9f562ea696d50b95cadbeb1716dc476714a87792ffe374280c0835312cbfe2"}, 596 | {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c115951b3a865ece8d98ee43882f2fd0a999c0200d6e6fec24134715ebe3b57"}, 597 | {file = "safetensors-0.3.1-cp310-cp310-win32.whl", hash = "sha256:118f8f7503ea312fc7af27e934088a1b589fb1eff5a7dea2cd1de6c71ee33391"}, 598 | {file = "safetensors-0.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:54846eaae25fded28a7bebbb66be563cad221b4c80daee39e2f55df5e5e0266f"}, 599 | {file = "safetensors-0.3.1-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:5af82e10946c4822506db0f29269f43147e889054704dde994d4e22f0c37377b"}, 600 | {file = "safetensors-0.3.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:626c86dd1d930963c8ea7f953a3787ae85322551e3a5203ac731d6e6f3e18f44"}, 601 | {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12e30677e6af1f4cc4f2832546e91dbb3b0aa7d575bfa473d2899d524e1ace08"}, 602 | {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d534b80bc8d39945bb902f34b0454773971fe9e5e1f2142af451759d7e52b356"}, 603 | {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ddd0ddd502cf219666e7d30f23f196cb87e829439b52b39f3e7da7918c3416df"}, 604 | {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997a2cc14023713f423e6d16536d55cb16a3d72850f142e05f82f0d4c76d383b"}, 605 | {file = "safetensors-0.3.1-cp311-cp311-win32.whl", hash = "sha256:6ae9ca63d9e22f71ec40550207bd284a60a6b4916ae6ca12c85a8d86bf49e0c3"}, 606 | {file = "safetensors-0.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:62aa7421ca455418423e35029524489480adda53e3f702453580180ecfebe476"}, 607 | {file = "safetensors-0.3.1-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:6d54b3ed367b6898baab75dfd057c24f36ec64d3938ffff2af981d56bfba2f42"}, 608 | {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:262423aeda91117010f8c607889066028f680fbb667f50cfe6eae96f22f9d150"}, 609 | {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10efe2513a8327fd628cea13167089588acc23093ba132aecfc536eb9a4560fe"}, 610 | {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:689b3d6a7ebce70ee9438267ee55ea89b575c19923876645e927d08757b552fe"}, 611 | {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14cd9a87bc73ce06903e9f8ee8b05b056af6f3c9f37a6bd74997a16ed36ff5f4"}, 612 | {file = "safetensors-0.3.1-cp37-cp37m-win32.whl", hash = "sha256:a77cb39624480d5f143c1cc272184f65a296f573d61629eff5d495d2e0541d3e"}, 613 | {file = "safetensors-0.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9eff3190bfbbb52eef729911345c643f875ca4dbb374aa6c559675cfd0ab73db"}, 614 | {file = "safetensors-0.3.1-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:05cbfef76e4daa14796db1bbb52072d4b72a44050c368b2b1f6fd3e610669a89"}, 615 | {file = "safetensors-0.3.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:c49061461f4a81e5ec3415070a3f135530834c89cbd6a7db7cd49e3cb9d9864b"}, 616 | {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22cf7e73ca42974f098ce0cf4dd8918983700b6b07a4c6827d50c8daefca776e"}, 617 | {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04f909442d6223ff0016cd2e1b2a95ef8039b92a558014627363a2e267213f62"}, 618 | {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c573c5a0d5d45791ae8c179e26d74aff86e719056591aa7edb3ca7be55bc961"}, 619 | {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6994043b12e717cf2a6ba69077ac41f0d3675b2819734f07f61819e854c622c7"}, 620 | {file = "safetensors-0.3.1-cp38-cp38-win32.whl", hash = "sha256:158ede81694180a0dbba59422bc304a78c054b305df993c0c6e39c6330fa9348"}, 621 | {file = "safetensors-0.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:afdc725beff7121ea8d39a7339f5a6abcb01daa189ea56290b67fe262d56e20f"}, 622 | {file = "safetensors-0.3.1-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:cba910fcc9e5e64d32d62b837388721165e9c7e45d23bc3a38ad57694b77f40d"}, 623 | {file = "safetensors-0.3.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a4f7dbfe7285573cdaddd85ef6fa84ebbed995d3703ab72d71257944e384612f"}, 624 | {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54aed0802f9eaa83ca7b1cbb986bfb90b8e2c67b6a4bcfe245627e17dad565d4"}, 625 | {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34b75a766f3cfc99fd4c33e329b76deae63f5f388e455d863a5d6e99472fca8e"}, 626 | {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a0f31904f35dc14919a145b2d7a2d8842a43a18a629affe678233c4ea90b4af"}, 627 | {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcf527ecc5f58907fd9031510378105487f318cc91ecdc5aee3c7cc8f46030a8"}, 628 | {file = "safetensors-0.3.1-cp39-cp39-win32.whl", hash = "sha256:e2f083112cf97aa9611e2a05cc170a2795eccec5f6ff837f4565f950670a9d83"}, 629 | {file = "safetensors-0.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:5f4f614b8e8161cd8a9ca19c765d176a82b122fa3d3387b77862145bfe9b4e93"}, 630 | {file = "safetensors-0.3.1.tar.gz", hash = "sha256:571da56ff8d0bec8ae54923b621cda98d36dcef10feb36fd492c4d0c2cd0e869"}, 631 | ] 632 | 633 | [package.extras] 634 | all = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (>=2.11.0)", "torch (>=1.10)"] 635 | dev = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (>=2.11.0)", "torch (>=1.10)"] 636 | jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)"] 637 | numpy = ["numpy (>=1.21.6)"] 638 | paddlepaddle = ["paddlepaddle (>=2.4.1)"] 639 | quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] 640 | tensorflow = ["tensorflow (>=2.11.0)"] 641 | testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "numpy (>=1.21.6)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)"] 642 | torch = ["torch (>=1.10)"] 643 | 644 | [[package]] 645 | name = "sympy" 646 | version = "1.12" 647 | description = "Computer algebra system (CAS) in Python" 648 | category = "main" 649 | optional = false 650 | python-versions = ">=3.8" 651 | files = [ 652 | {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, 653 | {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, 654 | ] 655 | 656 | [package.dependencies] 657 | mpmath = ">=0.19" 658 | 659 | [[package]] 660 | name = "tokenizers" 661 | version = "0.13.3" 662 | description = "Fast and Customizable Tokenizers" 663 | category = "main" 664 | optional = false 665 | python-versions = "*" 666 | files = [ 667 | {file = "tokenizers-0.13.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:f3835c5be51de8c0a092058a4d4380cb9244fb34681fd0a295fbf0a52a5fdf33"}, 668 | {file = "tokenizers-0.13.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4ef4c3e821730f2692489e926b184321e887f34fb8a6b80b8096b966ba663d07"}, 669 | {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5fd1a6a25353e9aa762e2aae5a1e63883cad9f4e997c447ec39d071020459bc"}, 670 | {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee0b1b311d65beab83d7a41c56a1e46ab732a9eed4460648e8eb0bd69fc2d059"}, 671 | {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ef4215284df1277dadbcc5e17d4882bda19f770d02348e73523f7e7d8b8d396"}, 672 | {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4d53976079cff8a033f778fb9adca2d9d69d009c02fa2d71a878b5f3963ed30"}, 673 | {file = "tokenizers-0.13.3-cp310-cp310-win32.whl", hash = "sha256:1f0e3b4c2ea2cd13238ce43548959c118069db7579e5d40ec270ad77da5833ce"}, 674 | {file = "tokenizers-0.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:89649c00d0d7211e8186f7a75dfa1db6996f65edce4b84821817eadcc2d3c79e"}, 675 | {file = "tokenizers-0.13.3-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:56b726e0d2bbc9243872b0144515ba684af5b8d8cd112fb83ee1365e26ec74c8"}, 676 | {file = "tokenizers-0.13.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc5c022ce692e1f499d745af293ab9ee6f5d92538ed2faf73f9708c89ee59ce6"}, 677 | {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f55c981ac44ba87c93e847c333e58c12abcbb377a0c2f2ef96e1a266e4184ff2"}, 678 | {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f247eae99800ef821a91f47c5280e9e9afaeed9980fc444208d5aa6ba69ff148"}, 679 | {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e3215d048e94f40f1c95802e45dcc37c5b05eb46280fc2ccc8cd351bff839"}, 680 | {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba2b0bf01777c9b9bc94b53764d6684554ce98551fec496f71bc5be3a03e98b"}, 681 | {file = "tokenizers-0.13.3-cp311-cp311-win32.whl", hash = "sha256:cc78d77f597d1c458bf0ea7c2a64b6aa06941c7a99cb135b5969b0278824d808"}, 682 | {file = "tokenizers-0.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:ecf182bf59bd541a8876deccf0360f5ae60496fd50b58510048020751cf1724c"}, 683 | {file = "tokenizers-0.13.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0527dc5436a1f6bf2c0327da3145687d3bcfbeab91fed8458920093de3901b44"}, 684 | {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cbb2c307627dc99b44b22ef05ff4473aa7c7cc1fec8f0a8b37d8a64b1a16d2"}, 685 | {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4560dbdeaae5b7ee0d4e493027e3de6d53c991b5002d7ff95083c99e11dd5ac0"}, 686 | {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64064bd0322405c9374305ab9b4c07152a1474370327499911937fd4a76d004b"}, 687 | {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8c6e2ab0f2e3d939ca66aa1d596602105fe33b505cd2854a4c1717f704c51de"}, 688 | {file = "tokenizers-0.13.3-cp37-cp37m-win32.whl", hash = "sha256:6cc29d410768f960db8677221e497226e545eaaea01aa3613fa0fdf2cc96cff4"}, 689 | {file = "tokenizers-0.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fc2a7fdf864554a0dacf09d32e17c0caa9afe72baf9dd7ddedc61973bae352d8"}, 690 | {file = "tokenizers-0.13.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8791dedba834c1fc55e5f1521be325ea3dafb381964be20684b92fdac95d79b7"}, 691 | {file = "tokenizers-0.13.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:d607a6a13718aeb20507bdf2b96162ead5145bbbfa26788d6b833f98b31b26e1"}, 692 | {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3791338f809cd1bf8e4fee6b540b36822434d0c6c6bc47162448deee3f77d425"}, 693 | {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2f35f30e39e6aab8716f07790f646bdc6e4a853816cc49a95ef2a9016bf9ce6"}, 694 | {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310204dfed5aa797128b65d63538a9837cbdd15da2a29a77d67eefa489edda26"}, 695 | {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0f9b92ea052305166559f38498b3b0cae159caea712646648aaa272f7160963"}, 696 | {file = "tokenizers-0.13.3-cp38-cp38-win32.whl", hash = "sha256:9a3fa134896c3c1f0da6e762d15141fbff30d094067c8f1157b9fdca593b5806"}, 697 | {file = "tokenizers-0.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:8e7b0cdeace87fa9e760e6a605e0ae8fc14b7d72e9fc19c578116f7287bb873d"}, 698 | {file = "tokenizers-0.13.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00cee1e0859d55507e693a48fa4aef07060c4bb6bd93d80120e18fea9371c66d"}, 699 | {file = "tokenizers-0.13.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a23ff602d0797cea1d0506ce69b27523b07e70f6dda982ab8cf82402de839088"}, 700 | {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70ce07445050b537d2696022dafb115307abdffd2a5c106f029490f84501ef97"}, 701 | {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:280ffe95f50eaaf655b3a1dc7ff1d9cf4777029dbbc3e63a74e65a056594abc3"}, 702 | {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97acfcec592f7e9de8cadcdcda50a7134423ac8455c0166b28c9ff04d227b371"}, 703 | {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd7730c98a3010cd4f523465867ff95cd9d6430db46676ce79358f65ae39797b"}, 704 | {file = "tokenizers-0.13.3-cp39-cp39-win32.whl", hash = "sha256:48625a108029cb1ddf42e17a81b5a3230ba6888a70c9dc14e81bc319e812652d"}, 705 | {file = "tokenizers-0.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:bc0a6f1ba036e482db6453571c9e3e60ecd5489980ffd95d11dc9f960483d783"}, 706 | {file = "tokenizers-0.13.3.tar.gz", hash = "sha256:2e546dbb68b623008a5442353137fbb0123d311a6d7ba52f2667c8862a75af2e"}, 707 | ] 708 | 709 | [package.extras] 710 | dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] 711 | docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] 712 | testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] 713 | 714 | [[package]] 715 | name = "torch" 716 | version = "2.0.1" 717 | description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" 718 | category = "main" 719 | optional = false 720 | python-versions = ">=3.8.0" 721 | files = [ 722 | {file = "torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8ced00b3ba471856b993822508f77c98f48a458623596a4c43136158781e306a"}, 723 | {file = "torch-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:359bfaad94d1cda02ab775dc1cc386d585712329bb47b8741607ef6ef4950747"}, 724 | {file = "torch-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:7c84e44d9002182edd859f3400deaa7410f5ec948a519cc7ef512c2f9b34d2c4"}, 725 | {file = "torch-2.0.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:567f84d657edc5582d716900543e6e62353dbe275e61cdc36eda4929e46df9e7"}, 726 | {file = "torch-2.0.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:787b5a78aa7917465e9b96399b883920c88a08f4eb63b5a5d2d1a16e27d2f89b"}, 727 | {file = "torch-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e617b1d0abaf6ced02dbb9486803abfef0d581609b09641b34fa315c9c40766d"}, 728 | {file = "torch-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b6019b1de4978e96daa21d6a3ebb41e88a0b474898fe251fd96189587408873e"}, 729 | {file = "torch-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:dbd68cbd1cd9da32fe5d294dd3411509b3d841baecb780b38b3b7b06c7754434"}, 730 | {file = "torch-2.0.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:ef654427d91600129864644e35deea761fb1fe131710180b952a6f2e2207075e"}, 731 | {file = "torch-2.0.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:25aa43ca80dcdf32f13da04c503ec7afdf8e77e3a0183dd85cd3e53b2842e527"}, 732 | {file = "torch-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5ef3ea3d25441d3957348f7e99c7824d33798258a2bf5f0f0277cbcadad2e20d"}, 733 | {file = "torch-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0882243755ff28895e8e6dc6bc26ebcf5aa0911ed81b2a12f241fc4b09075b13"}, 734 | {file = "torch-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:f66aa6b9580a22b04d0af54fcd042f52406a8479e2b6a550e3d9f95963e168c8"}, 735 | {file = "torch-2.0.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:1adb60d369f2650cac8e9a95b1d5758e25d526a34808f7448d0bd599e4ae9072"}, 736 | {file = "torch-2.0.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:1bcffc16b89e296826b33b98db5166f990e3b72654a2b90673e817b16c50e32b"}, 737 | {file = "torch-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e10e1597f2175365285db1b24019eb6f04d53dcd626c735fc502f1e8b6be9875"}, 738 | {file = "torch-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:423e0ae257b756bb45a4b49072046772d1ad0c592265c5080070e0767da4e490"}, 739 | {file = "torch-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8742bdc62946c93f75ff92da00e3803216c6cce9b132fbca69664ca38cfb3e18"}, 740 | {file = "torch-2.0.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:c62df99352bd6ee5a5a8d1832452110435d178b5164de450831a3a8cc14dc680"}, 741 | {file = "torch-2.0.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:671a2565e3f63b8fe8e42ae3e36ad249fe5e567435ea27b94edaa672a7d0c416"}, 742 | ] 743 | 744 | [package.dependencies] 745 | filelock = "*" 746 | jinja2 = "*" 747 | networkx = "*" 748 | sympy = "*" 749 | typing-extensions = "*" 750 | 751 | [package.extras] 752 | opt-einsum = ["opt-einsum (>=3.3)"] 753 | 754 | [[package]] 755 | name = "tqdm" 756 | version = "4.65.0" 757 | description = "Fast, Extensible Progress Meter" 758 | category = "main" 759 | optional = false 760 | python-versions = ">=3.7" 761 | files = [ 762 | {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"}, 763 | {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"}, 764 | ] 765 | 766 | [package.dependencies] 767 | colorama = {version = "*", markers = "platform_system == \"Windows\""} 768 | 769 | [package.extras] 770 | dev = ["py-make (>=0.1.0)", "twine", "wheel"] 771 | notebook = ["ipywidgets (>=6)"] 772 | slack = ["slack-sdk"] 773 | telegram = ["requests"] 774 | 775 | [[package]] 776 | name = "transformers" 777 | version = "4.30.2" 778 | description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" 779 | category = "main" 780 | optional = false 781 | python-versions = ">=3.7.0" 782 | files = [ 783 | {file = "transformers-4.30.2-py3-none-any.whl", hash = "sha256:c332e3a3097f9ed89ce556b403251235931c00237b8bc2d7adaa19d226c13f1d"}, 784 | {file = "transformers-4.30.2.tar.gz", hash = "sha256:f4a8aac4e1baffab4033f4a345b0d7dc7957d12a4f1ba969afea08205a513045"}, 785 | ] 786 | 787 | [package.dependencies] 788 | filelock = "*" 789 | huggingface-hub = ">=0.14.1,<1.0" 790 | numpy = ">=1.17" 791 | packaging = ">=20.0" 792 | pyyaml = ">=5.1" 793 | regex = "!=2019.12.17" 794 | requests = "*" 795 | safetensors = ">=0.3.1" 796 | tokenizers = ">=0.11.1,<0.11.3 || >0.11.3,<0.14" 797 | tqdm = ">=4.27" 798 | 799 | [package.extras] 800 | accelerate = ["accelerate (>=0.20.2)"] 801 | agents = ["Pillow", "accelerate (>=0.20.2)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.9,!=1.12.0)"] 802 | all = ["Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.3)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"] 803 | audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] 804 | codecarbon = ["codecarbon (==1.2.0)"] 805 | deepspeed = ["accelerate (>=0.20.2)", "deepspeed (>=0.8.3)"] 806 | deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.2)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.8.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf (<=3.20.3)", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"] 807 | dev = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.6.9)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] 808 | dev-tensorflow = ["GitPython (<3.1.19)", "Pillow", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"] 809 | dev-torch = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.20.2)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] 810 | docs = ["Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "hf-doc-builder", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.3)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"] 811 | docs-specific = ["hf-doc-builder"] 812 | fairscale = ["fairscale (>0.3)"] 813 | flax = ["flax (>=0.4.1,<=0.6.9)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "optax (>=0.0.8,<=0.1.4)"] 814 | flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] 815 | ftfy = ["ftfy"] 816 | integrations = ["optuna", "ray[tune]", "sigopt"] 817 | ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] 818 | modelcreation = ["cookiecutter (==1.7.3)"] 819 | natten = ["natten (>=0.14.6)"] 820 | onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] 821 | onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] 822 | optuna = ["optuna"] 823 | quality = ["GitPython (<3.1.19)", "black (>=23.1,<24.0)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (>=0.0.241,<=0.0.259)", "urllib3 (<2.0.0)"] 824 | ray = ["ray[tune]"] 825 | retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] 826 | sagemaker = ["sagemaker (>=2.31.0)"] 827 | sentencepiece = ["protobuf (<=3.20.3)", "sentencepiece (>=0.1.91,!=0.1.92)"] 828 | serving = ["fastapi", "pydantic", "starlette", "uvicorn"] 829 | sigopt = ["sigopt"] 830 | sklearn = ["scikit-learn"] 831 | speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] 832 | testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf (<=3.20.3)", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "timeout-decorator"] 833 | tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx"] 834 | tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx"] 835 | tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] 836 | timm = ["timm"] 837 | tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.14)"] 838 | torch = ["accelerate (>=0.20.2)", "torch (>=1.9,!=1.12.0)"] 839 | torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] 840 | torch-vision = ["Pillow", "torchvision"] 841 | torchhub = ["filelock", "huggingface-hub (>=0.14.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf (<=3.20.3)", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "tqdm (>=4.27)"] 842 | video = ["av (==9.2.0)", "decord (==0.6.0)"] 843 | vision = ["Pillow"] 844 | 845 | [[package]] 846 | name = "typing-extensions" 847 | version = "4.7.1" 848 | description = "Backported and Experimental Type Hints for Python 3.7+" 849 | category = "main" 850 | optional = false 851 | python-versions = ">=3.7" 852 | files = [ 853 | {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, 854 | {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, 855 | ] 856 | 857 | [[package]] 858 | name = "urllib3" 859 | version = "2.0.3" 860 | description = "HTTP library with thread-safe connection pooling, file post, and more." 861 | category = "main" 862 | optional = false 863 | python-versions = ">=3.7" 864 | files = [ 865 | {file = "urllib3-2.0.3-py3-none-any.whl", hash = "sha256:48e7fafa40319d358848e1bc6809b208340fafe2096f1725d05d67443d0483d1"}, 866 | {file = "urllib3-2.0.3.tar.gz", hash = "sha256:bee28b5e56addb8226c96f7f13ac28cb4c301dd5ea8a6ca179c0b9835e032825"}, 867 | ] 868 | 869 | [package.extras] 870 | brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] 871 | secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] 872 | socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] 873 | zstd = ["zstandard (>=0.18.0)"] 874 | 875 | [extras] 876 | llama-cpp = ["llama-cpp-python"] 877 | 878 | [metadata] 879 | lock-version = "2.0" 880 | python-versions = "^3.11" 881 | content-hash = "dfee3d24560b70fe883b5226ffa68f428cc26500427e051ea99d21ca67cf0e8f" 882 | --------------------------------------------------------------------------------