├── llama_cpp ├── __init__.py ├── server │ ├── requirements.txt │ └── __main__.py ├── llama_types.py ├── llama_cpp.py └── llama.py ├── lib └── llama.so ├── .gitmodules ├── examples ├── high_level_api │ ├── high_level_api_embedding.py │ ├── high_level_api_inference.py │ ├── high_level_api_streaming.py │ ├── langchain_custom_llm.py │ └── fastapi_server.py ├── low_level_api │ ├── quantize.py │ ├── low_level_api_llama_cpp.py │ ├── common.py │ └── low_level_api_chat_cpp.py └── notebooks │ └── Clients.ipynb ├── mkdocs.yml ├── Dockerfile ├── CMakeLists.txt ├── pyproject.toml ├── .github └── workflows │ ├── publish-to-test.yaml │ ├── publish.yaml │ ├── build-and-release-docker.yml │ ├── build-and-release.yaml │ └── test.yaml ├── LICENSE.md ├── setup.py ├── tests └── test_llama.py ├── .gitignore ├── docs └── index.md ├── README.md └── poetry.lock /llama_cpp/__init__.py: -------------------------------------------------------------------------------- 1 | from .llama_cpp import * 2 | from .llama import * 3 | -------------------------------------------------------------------------------- /llama_cpp/server/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | pydantic 3 | uvicorn 4 | sse-starlette -------------------------------------------------------------------------------- /lib/llama.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLukas22/llama-cpp-python/main/lib/llama.so -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "vendor/llama.cpp"] 2 | path = vendor/llama.cpp 3 | url = git@github.com:ggerganov/llama.cpp.git 4 | -------------------------------------------------------------------------------- /examples/high_level_api/high_level_api_embedding.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from llama_cpp import Llama 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-model.bin") 7 | args = parser.parse_args() 8 | 9 | llm = Llama(model_path=args.model, embedding=True) 10 | 11 | print(llm.create_embedding("Hello world!")) 12 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: llama-cpp-python 2 | repo_url: https://github.com/abetlen/llama-cpp-python 3 | 4 | theme: 5 | name: "material" 6 | 7 | plugins: 8 | - mkdocstrings 9 | - search 10 | 11 | watch: 12 | - llama_cpp 13 | 14 | markdown_extensions: 15 | - pymdownx.highlight: 16 | anchor_linenums: true 17 | line_spans: __span 18 | pygments_lang_class: true 19 | - pymdownx.inlinehilite 20 | - pymdownx.snippets 21 | - pymdownx.superfences -------------------------------------------------------------------------------- /examples/high_level_api/high_level_api_inference.py: -------------------------------------------------------------------------------- 1 | import json 2 | import argparse 3 | 4 | from llama_cpp import Llama 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin") 8 | args = parser.parse_args() 9 | 10 | llm = Llama(model_path=args.model) 11 | 12 | output = llm( 13 | "Question: What are the names of the planets in the solar system? Answer: ", 14 | max_tokens=48, 15 | stop=["Q:", "\n"], 16 | echo=True, 17 | ) 18 | 19 | print(json.dumps(output, indent=2)) 20 | -------------------------------------------------------------------------------- /examples/high_level_api/high_level_api_streaming.py: -------------------------------------------------------------------------------- 1 | import json 2 | import argparse 3 | 4 | from llama_cpp import Llama 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin") 8 | args = parser.parse_args() 9 | 10 | llm = Llama(model_path=args.model) 11 | 12 | stream = llm( 13 | "Question: What are the names of the planets in the solar system? Answer: ", 14 | max_tokens=48, 15 | stop=["Q:", "\n"], 16 | stream=True, 17 | ) 18 | 19 | for output in stream: 20 | print(json.dumps(output, indent=2)) 21 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10 2 | 3 | # Install dependencies 4 | 5 | #Copy the compiled llama.so file to the container 6 | COPY ./lib /lib/ 7 | 8 | #Set the environment variable for the llama.so file 9 | ENV LLAMA_CPP_LIB /lib/llama.so 10 | 11 | # Copy the python code to the container 12 | RUN mkdir -p /app 13 | COPY ./llama_cpp /app 14 | WORKDIR /app 15 | 16 | #Add the current directory to the PYTHONPATH 17 | ENV PYTHONPATH "${PYTHONPATH}:/app" 18 | 19 | # Install Requirements 20 | RUN --mount=type=cache,target=/root/.cache/pip pip3 install -r ./server/requirements.txt 21 | 22 | #Set default environment variables 23 | ENV HOST 0.0.0.0 24 | ENV PORT 8000 25 | 26 | #Expose the port 27 | EXPOSE ${PORT} 28 | 29 | #Run the server 30 | CMD ["python3","-m","server"] 31 | 32 | 33 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.4...3.22) 2 | 3 | project(llama_cpp) 4 | 5 | if (UNIX) 6 | add_custom_command( 7 | OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/libllama.so 8 | COMMAND make libllama.so 9 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp 10 | ) 11 | add_custom_target( 12 | run ALL 13 | DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/libllama.so 14 | ) 15 | install( 16 | FILES ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/libllama.so 17 | DESTINATION llama_cpp 18 | ) 19 | else() 20 | set(BUILD_SHARED_LIBS "On") 21 | add_subdirectory(vendor/llama.cpp) 22 | install( 23 | TARGETS llama 24 | LIBRARY DESTINATION llama_cpp 25 | RUNTIME DESTINATION llama_cpp 26 | ) 27 | endif(UNIX) 28 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "llama_cpp_python" 3 | version = "0.1.34" 4 | description = "Python bindings for the llama.cpp library" 5 | authors = ["Andrei Betlen "] 6 | license = "MIT" 7 | readme = "README.md" 8 | homepage = "https://github.com/abetlen/llama-cpp-python" 9 | repository = "https://github.com/abetlen/llama-cpp-python" 10 | packages = [{include = "llama_cpp"}] 11 | include = [ 12 | "LICENSE.md", 13 | ] 14 | 15 | [tool.poetry.dependencies] 16 | python = "^3.8.1" 17 | typing-extensions = "^4.5.0" 18 | 19 | 20 | [tool.poetry.group.dev.dependencies] 21 | black = "^23.1.0" 22 | twine = "^4.0.2" 23 | mkdocs = "^1.4.2" 24 | mkdocstrings = {extras = ["python"], version = "^0.20.0"} 25 | mkdocs-material = "^9.1.4" 26 | pytest = "^7.2.2" 27 | 28 | [build-system] 29 | requires = [ 30 | "setuptools>=42", 31 | "scikit-build>=0.13", 32 | "cmake>=3.18", 33 | "ninja", 34 | ] 35 | build-backend = "setuptools.build_meta" 36 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-test.yaml: -------------------------------------------------------------------------------- 1 | # Based on: https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ 2 | 3 | name: Publish to TestPyPI 4 | 5 | on: workflow_dispatch 6 | 7 | jobs: 8 | build-n-publish: 9 | name: Build and publish 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v3 14 | with: 15 | submodules: "true" 16 | - name: Set up Python 17 | uses: actions/setup-python@v4 18 | with: 19 | python-version: "3.8" 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install --upgrade pip pytest cmake scikit-build setuptools 23 | - name: Build source distribution 24 | run: | 25 | python setup.py sdist 26 | - name: Publish to Test PyPI 27 | uses: pypa/gh-action-pypi-publish@release/v1 28 | with: 29 | password: ${{ secrets.TEST_PYPI_API_TOKEN }} 30 | repository-url: https://test.pypi.org/legacy/ -------------------------------------------------------------------------------- /examples/low_level_api/quantize.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import llama_cpp 4 | 5 | 6 | def main(args): 7 | if not os.path.exists(fname_inp): 8 | raise RuntimeError(f"Input file does not exist ({fname_inp})") 9 | if os.path.exists(fname_out): 10 | raise RuntimeError(f"Output file already exists ({fname_out})") 11 | fname_inp = args.fname_inp.encode("utf-8") 12 | fname_out = args.fname_out.encode("utf-8") 13 | itype = args.itype 14 | return_code = llama_cpp.llama_model_quantize(fname_inp, fname_out, itype) 15 | if return_code != 0: 16 | raise RuntimeError("Failed to quantize model") 17 | 18 | 19 | if __name__ == "__main__": 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument("fname_inp", type=str, help="Path to input model") 22 | parser.add_argument("fname_out", type=str, help="Path to output model") 23 | parser.add_argument("type", type=int, help="Type of quantization (2: q4_0, 3: q4_1)") 24 | args = parser.parse_args() 25 | main(args) 26 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | 3 | # Based on: https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ 4 | 5 | on: workflow_dispatch 6 | 7 | jobs: 8 | build-n-publish: 9 | name: Build and publish 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v3 14 | with: 15 | submodules: "true" 16 | - name: Set up Python 17 | uses: actions/setup-python@v4 18 | with: 19 | python-version: "3.8" 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install --upgrade pip pytest cmake scikit-build setuptools 23 | - name: Build source distribution 24 | run: | 25 | python setup.py sdist 26 | - name: Publish distribution to PyPI 27 | # TODO: move to tag based releases 28 | # if: startsWith(github.ref, 'refs/tags') 29 | uses: pypa/gh-action-pypi-publish@release/v1 30 | with: 31 | password: ${{ secrets.PYPI_API_TOKEN }} -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Andrei Betlen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /.github/workflows/build-and-release-docker.yml: -------------------------------------------------------------------------------- 1 | name: Build Release 2 | 3 | on: push 4 | 5 | jobs: 6 | build_docker: 7 | name: Build docker images 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - name: Checkout 12 | uses: actions/checkout@v3 13 | with: 14 | submodules: "true" 15 | 16 | - name: Setup cmake 17 | uses: jwlawson/actions-setup-cmake@v1.13 18 | with: 19 | cmake-version: '3.22.x' 20 | 21 | - name: Build libllama.so 22 | working-directory: ./vendor/llama.cpp 23 | run: | 24 | mkdir build 25 | cd build 26 | cmake .. -D LLAMA_STANDALONE=OFF -D LLAMA_AVX512=OFF -D BUILD_SHARED_LIBS=ON -D LLAMA_BUILD_TESTS=OFF -D LLAMA_BUILD_EXAMPLES=OFF 27 | cmake --build . --config Release 28 | 29 | - name: Copy libllama.so to lib folder and rename 30 | run: | 31 | mkdir lib 32 | cp ./vendor/llama.cpp/build/libllama.so ./lib/llama.so 33 | 34 | - uses: actions/upload-artifact@v3 35 | with: 36 | name: libllama 37 | path: ./lib/ 38 | 39 | 40 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from skbuild import setup 2 | 3 | from pathlib import Path 4 | 5 | this_directory = Path(__file__).parent 6 | long_description = (this_directory / "README.md").read_text(encoding="utf-8") 7 | 8 | setup( 9 | name="llama_cpp_python", 10 | description="A Python wrapper for llama.cpp", 11 | long_description=long_description, 12 | long_description_content_type="text/markdown", 13 | version="0.1.34", 14 | author="Andrei Betlen", 15 | author_email="abetlen@gmail.com", 16 | license="MIT", 17 | package_dir={"llama_cpp": "llama_cpp", "llama_cpp.server": "llama_cpp/server"}, 18 | packages=["llama_cpp", "llama_cpp.server"], 19 | install_requires=[ 20 | "typing-extensions>=4.5.0", 21 | ], 22 | extras_require={ 23 | "server": ["uvicorn>=0.21.1", "fastapi>=0.95.0", "sse-starlette>=1.3.3"], 24 | }, 25 | python_requires=">=3.7", 26 | classifiers=[ 27 | "Programming Language :: Python :: 3", 28 | "Programming Language :: Python :: 3.7", 29 | "Programming Language :: Python :: 3.8", 30 | "Programming Language :: Python :: 3.9", 31 | "Programming Language :: Python :: 3.10", 32 | "Programming Language :: Python :: 3.11", 33 | ], 34 | ) 35 | -------------------------------------------------------------------------------- /examples/high_level_api/langchain_custom_llm.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from llama_cpp import Llama 4 | 5 | from langchain.llms.base import LLM 6 | from typing import Optional, List, Mapping, Any 7 | 8 | 9 | class LlamaLLM(LLM): 10 | model_path: str 11 | llm: Llama 12 | 13 | @property 14 | def _llm_type(self) -> str: 15 | return "llama-cpp-python" 16 | 17 | def __init__(self, model_path: str, **kwargs: Any): 18 | model_path = model_path 19 | llm = Llama(model_path=model_path) 20 | super().__init__(model_path=model_path, llm=llm, **kwargs) 21 | 22 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 23 | response = self.llm(prompt, stop=stop or []) 24 | return response["choices"][0]["text"] 25 | 26 | @property 27 | def _identifying_params(self) -> Mapping[str, Any]: 28 | return {"model_path": self.model_path} 29 | 30 | 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin") 33 | args = parser.parse_args() 34 | 35 | # Load the model 36 | llm = LlamaLLM(model_path=args.model) 37 | 38 | # Basic Q&A 39 | answer = llm( 40 | "Question: What is the capital of France? Answer: ", stop=["Question:", "\n"] 41 | ) 42 | print(f"Answer: {answer.strip()}") 43 | 44 | # Using in a chain 45 | from langchain.prompts import PromptTemplate 46 | from langchain.chains import LLMChain 47 | 48 | prompt = PromptTemplate( 49 | input_variables=["product"], 50 | template="\n\n### Instruction:\nWrite a good name for a company that makes {product}\n\n### Response:\n", 51 | ) 52 | chain = LLMChain(llm=llm, prompt=prompt) 53 | 54 | # Run the chain only specifying the input variable. 55 | print(chain.run("colorful socks")) 56 | -------------------------------------------------------------------------------- /.github/workflows/build-and-release.yaml: -------------------------------------------------------------------------------- 1 | name: Build Release 2 | 3 | on: workflow_dispatch 4 | 5 | permissions: 6 | contents: write 7 | 8 | jobs: 9 | build_wheels: 10 | name: Build wheels on ${{ matrix.os }} 11 | runs-on: ${{ matrix.os }} 12 | strategy: 13 | matrix: 14 | os: [ubuntu-latest, windows-latest, macOS-latest] 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | with: 19 | submodules: "true" 20 | 21 | # Used to host cibuildwheel 22 | - uses: actions/setup-python@v3 23 | 24 | - name: Install cibuildwheel 25 | run: python -m pip install cibuildwheel==2.12.1 26 | 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip pytest cmake scikit-build setuptools 30 | 31 | - name: Build wheels 32 | run: python -m cibuildwheel --output-dir wheelhouse 33 | 34 | - uses: actions/upload-artifact@v3 35 | with: 36 | path: ./wheelhouse/*.whl 37 | 38 | build_sdist: 39 | name: Build source distribution 40 | runs-on: ubuntu-latest 41 | 42 | steps: 43 | - uses: actions/checkout@v3 44 | with: 45 | submodules: "true" 46 | - uses: actions/setup-python@v3 47 | - name: Install dependencies 48 | run: | 49 | python -m pip install --upgrade pip pytest cmake scikit-build setuptools 50 | - name: Build source distribution 51 | run: | 52 | python setup.py sdist 53 | - uses: actions/upload-artifact@v3 54 | with: 55 | path: ./dist/*.tar.gz 56 | 57 | release: 58 | name: Release 59 | needs: [build_wheels, build_sdist] 60 | runs-on: ubuntu-latest 61 | 62 | steps: 63 | - uses: actions/download-artifact@v3 64 | with: 65 | name: artifact 66 | path: dist 67 | - uses: softprops/action-gh-release@v1 68 | with: 69 | files: dist/* 70 | env: 71 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | build-linux: 13 | 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] 18 | 19 | steps: 20 | - uses: actions/checkout@v3 21 | with: 22 | submodules: "true" 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v4 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip pytest cmake scikit-build setuptools 30 | pip install . -v 31 | - name: Test with pytest 32 | run: | 33 | pytest 34 | 35 | build-windows: 36 | 37 | runs-on: windows-latest 38 | strategy: 39 | matrix: 40 | python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] 41 | 42 | steps: 43 | - uses: actions/checkout@v3 44 | with: 45 | submodules: "true" 46 | - name: Set up Python ${{ matrix.python-version }} 47 | uses: actions/setup-python@v4 48 | with: 49 | python-version: ${{ matrix.python-version }} 50 | - name: Install dependencies 51 | run: | 52 | python -m pip install --upgrade pip pytest cmake scikit-build setuptools 53 | pip install . -v 54 | - name: Test with pytest 55 | run: | 56 | pytest 57 | 58 | build-macos: 59 | 60 | runs-on: macos-latest 61 | strategy: 62 | matrix: 63 | python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] 64 | 65 | steps: 66 | - uses: actions/checkout@v3 67 | with: 68 | submodules: "true" 69 | - name: Set up Python ${{ matrix.python-version }} 70 | uses: actions/setup-python@v4 71 | with: 72 | python-version: ${{ matrix.python-version }} 73 | - name: Install dependencies 74 | run: | 75 | python -m pip install --upgrade pip pytest cmake scikit-build setuptools 76 | pip install . -v 77 | - name: Test with pytest 78 | run: | 79 | pytest -------------------------------------------------------------------------------- /llama_cpp/llama_types.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Dict, Union 2 | from typing_extensions import TypedDict, NotRequired, Literal 3 | 4 | 5 | class EmbeddingUsage(TypedDict): 6 | prompt_tokens: int 7 | total_tokens: int 8 | 9 | 10 | class EmbeddingData(TypedDict): 11 | index: int 12 | object: str 13 | embedding: List[float] 14 | 15 | 16 | class Embedding(TypedDict): 17 | object: Literal["list"] 18 | model: str 19 | data: List[EmbeddingData] 20 | usage: EmbeddingUsage 21 | 22 | 23 | class CompletionLogprobs(TypedDict): 24 | text_offset: List[int] 25 | token_logprobs: List[float] 26 | tokens: List[str] 27 | top_logprobs: List[Dict[str, float]] 28 | 29 | 30 | class CompletionChoice(TypedDict): 31 | text: str 32 | index: int 33 | logprobs: Optional[CompletionLogprobs] 34 | finish_reason: Optional[str] 35 | 36 | 37 | class CompletionUsage(TypedDict): 38 | prompt_tokens: int 39 | completion_tokens: int 40 | total_tokens: int 41 | 42 | 43 | class CompletionChunk(TypedDict): 44 | id: str 45 | object: Literal["text_completion"] 46 | created: int 47 | model: str 48 | choices: List[CompletionChoice] 49 | 50 | 51 | class Completion(TypedDict): 52 | id: str 53 | object: Literal["text_completion"] 54 | created: int 55 | model: str 56 | choices: List[CompletionChoice] 57 | usage: CompletionUsage 58 | 59 | 60 | class ChatCompletionMessage(TypedDict): 61 | role: Union[Literal["assistant"], Literal["user"], Literal["system"]] 62 | content: str 63 | user: NotRequired[str] 64 | 65 | 66 | class ChatCompletionChoice(TypedDict): 67 | index: int 68 | message: ChatCompletionMessage 69 | finish_reason: Optional[str] 70 | 71 | 72 | class ChatCompletion(TypedDict): 73 | id: str 74 | object: Literal["chat.completion"] 75 | created: int 76 | model: str 77 | choices: List[ChatCompletionChoice] 78 | usage: CompletionUsage 79 | 80 | 81 | class ChatCompletionChunkDelta(TypedDict): 82 | role: NotRequired[Literal["assistant"]] 83 | content: NotRequired[str] 84 | 85 | 86 | class ChatCompletionChunkChoice(TypedDict): 87 | index: int 88 | delta: ChatCompletionChunkDelta 89 | finish_reason: Optional[str] 90 | 91 | 92 | class ChatCompletionChunk(TypedDict): 93 | id: str 94 | model: str 95 | object: Literal["chat.completion.chunk"] 96 | created: int 97 | choices: List[ChatCompletionChunkChoice] 98 | -------------------------------------------------------------------------------- /examples/low_level_api/low_level_api_llama_cpp.py: -------------------------------------------------------------------------------- 1 | import llama_cpp 2 | 3 | import multiprocessing 4 | 5 | import llama_cpp 6 | 7 | N_THREADS = multiprocessing.cpu_count() 8 | 9 | prompt = b"\n\n### Instruction:\nWhat is the capital of France?\n\n### Response:\n" 10 | 11 | lparams = llama_cpp.llama_context_default_params() 12 | ctx = llama_cpp.llama_init_from_file(b"../models/7B/ggml-model.bin", lparams) 13 | 14 | # determine the required inference memory per token: 15 | tmp = [0, 1, 2, 3] 16 | llama_cpp.llama_eval(ctx, (llama_cpp.c_int * len(tmp))(*tmp), len(tmp), 0, N_THREADS) 17 | 18 | n_past = 0 19 | 20 | prompt = b" " + prompt 21 | 22 | embd_inp = (llama_cpp.llama_token * (len(prompt) + 1))() 23 | n_of_tok = llama_cpp.llama_tokenize(ctx, prompt, embd_inp, len(embd_inp), True) 24 | embd_inp = embd_inp[:n_of_tok] 25 | 26 | n_ctx = llama_cpp.llama_n_ctx(ctx) 27 | 28 | n_predict = 20 29 | n_predict = min(n_predict, n_ctx - len(embd_inp)) 30 | 31 | input_consumed = 0 32 | input_noecho = False 33 | 34 | remaining_tokens = n_predict 35 | 36 | embd = [] 37 | last_n_size = 64 38 | last_n_tokens_data = [0] * last_n_size 39 | n_batch = 24 40 | 41 | while remaining_tokens > 0: 42 | if len(embd) > 0: 43 | llama_cpp.llama_eval( 44 | ctx, (llama_cpp.c_int * len(embd))(*embd), len(embd), n_past, N_THREADS 45 | ) 46 | 47 | n_past += len(embd) 48 | embd = [] 49 | if len(embd_inp) <= input_consumed: 50 | id = llama_cpp.llama_sample_top_p_top_k( 51 | ctx, 52 | (llama_cpp.c_int * len(last_n_tokens_data))(*last_n_tokens_data), 53 | len(last_n_tokens_data), 54 | 40, 55 | 0.8, 56 | 0.2, 57 | 1.0 / 0.85, 58 | ) 59 | last_n_tokens_data = last_n_tokens_data[1:] + [id] 60 | embd.append(id) 61 | input_noecho = False 62 | remaining_tokens -= 1 63 | else: 64 | while len(embd_inp) > input_consumed: 65 | embd.append(embd_inp[input_consumed]) 66 | last_n_tokens_data = last_n_tokens_data[1:] + [embd_inp[input_consumed]] 67 | input_consumed += 1 68 | if len(embd) >= n_batch: 69 | break 70 | if not input_noecho: 71 | for id in embd: 72 | print( 73 | llama_cpp.llama_token_to_str(ctx, id).decode("utf-8"), 74 | end="", 75 | flush=True, 76 | ) 77 | 78 | if len(embd) > 0 and embd[-1] == llama_cpp.llama_token_eos(): 79 | break 80 | 81 | print() 82 | 83 | llama_cpp.llama_print_timings(ctx) 84 | 85 | llama_cpp.llama_free(ctx) 86 | -------------------------------------------------------------------------------- /examples/notebooks/Clients.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "data": { 10 | "text/plain": [ 11 | " JSON: {\n", 12 | " \"choices\": [\n", 13 | " {\n", 14 | " \"finish_reason\": \"length\",\n", 15 | " \"index\": 0,\n", 16 | " \"logprobs\": null,\n", 17 | " \"text\": \" over the lazy dog.\"\n", 18 | " }\n", 19 | " ],\n", 20 | " \"created\": 1680960690,\n", 21 | " \"id\": \"cmpl-ad3ba53d-407c-466b-bd5f-97cb8987af83\",\n", 22 | " \"model\": \"models/ggml-alpaca.bin\",\n", 23 | " \"object\": \"text_completion\",\n", 24 | " \"usage\": {\n", 25 | " \"completion_tokens\": 5,\n", 26 | " \"prompt_tokens\": 8,\n", 27 | " \"total_tokens\": 13\n", 28 | " }\n", 29 | "}" 30 | ] 31 | }, 32 | "execution_count": 1, 33 | "metadata": {}, 34 | "output_type": "execute_result" 35 | } 36 | ], 37 | "source": [ 38 | "import openai\n", 39 | "\n", 40 | "openai.api_key = \"sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # can be anything\n", 41 | "openai.api_base = \"http://100.64.159.73:8000/v1\"\n", 42 | "\n", 43 | "openai.Completion.create(\n", 44 | " model=\"text-davinci-003\", # currently can be anything\n", 45 | " prompt=\"The quick brown fox jumps\",\n", 46 | " max_tokens=5,\n", 47 | ")" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 2, 53 | "metadata": {}, 54 | "outputs": [ 55 | { 56 | "data": { 57 | "text/plain": [ 58 | "' over the lazy dog'" 59 | ] 60 | }, 61 | "execution_count": 2, 62 | "metadata": {}, 63 | "output_type": "execute_result" 64 | } 65 | ], 66 | "source": [ 67 | "import os\n", 68 | "\n", 69 | "os.environ[\"OPENAI_API_KEY\"] = \"sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # can be anything\n", 70 | "os.environ[\"OPENAI_API_BASE\"] = \"http://100.64.159.73:8000/v1\"\n", 71 | "\n", 72 | "from langchain.llms import OpenAI\n", 73 | "\n", 74 | "llms = OpenAI()\n", 75 | "llms(\n", 76 | " prompt=\"The quick brown fox jumps\",\n", 77 | " stop=[\".\", \"\\n\"],\n", 78 | ")" 79 | ] 80 | } 81 | ], 82 | "metadata": { 83 | "kernelspec": { 84 | "display_name": ".venv", 85 | "language": "python", 86 | "name": "python3" 87 | }, 88 | "language_info": { 89 | "codemirror_mode": { 90 | "name": "ipython", 91 | "version": 3 92 | }, 93 | "file_extension": ".py", 94 | "mimetype": "text/x-python", 95 | "name": "python", 96 | "nbconvert_exporter": "python", 97 | "pygments_lexer": "ipython3", 98 | "version": "3.8.10" 99 | }, 100 | "orig_nbformat": 4 101 | }, 102 | "nbformat": 4, 103 | "nbformat_minor": 2 104 | } 105 | -------------------------------------------------------------------------------- /tests/test_llama.py: -------------------------------------------------------------------------------- 1 | import llama_cpp 2 | 3 | MODEL = "./vendor/llama.cpp/models/ggml-vocab.bin" 4 | 5 | 6 | def test_llama(): 7 | llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True) 8 | 9 | assert llama 10 | assert llama.ctx is not None 11 | 12 | text = b"Hello World" 13 | 14 | assert llama.detokenize(llama.tokenize(text)) == text 15 | 16 | 17 | def test_llama_patch(monkeypatch): 18 | llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True) 19 | 20 | ## Set up mock function 21 | def mock_eval(*args, **kwargs): 22 | return 0 23 | 24 | monkeypatch.setattr("llama_cpp.llama_cpp.llama_eval", mock_eval) 25 | 26 | output_text = " jumps over the lazy dog." 27 | output_tokens = llama.tokenize(output_text.encode("utf-8")) 28 | token_eos = llama.token_eos() 29 | n = 0 30 | 31 | def mock_sample(*args, **kwargs): 32 | nonlocal n 33 | if n < len(output_tokens): 34 | n += 1 35 | return output_tokens[n - 1] 36 | else: 37 | return token_eos 38 | 39 | monkeypatch.setattr("llama_cpp.llama_cpp.llama_sample_top_p_top_k", mock_sample) 40 | 41 | text = "The quick brown fox" 42 | 43 | ## Test basic completion until eos 44 | n = 0 # reset 45 | completion = llama.create_completion(text, max_tokens=20) 46 | assert completion["choices"][0]["text"] == output_text 47 | assert completion["choices"][0]["finish_reason"] == "stop" 48 | 49 | ## Test streaming completion until eos 50 | n = 0 # reset 51 | chunks = llama.create_completion(text, max_tokens=20, stream=True) 52 | assert "".join(chunk["choices"][0]["text"] for chunk in chunks) == output_text 53 | assert completion["choices"][0]["finish_reason"] == "stop" 54 | 55 | ## Test basic completion until stop sequence 56 | n = 0 # reset 57 | completion = llama.create_completion(text, max_tokens=20, stop=["lazy"]) 58 | assert completion["choices"][0]["text"] == " jumps over the " 59 | assert completion["choices"][0]["finish_reason"] == "stop" 60 | 61 | ## Test streaming completion until stop sequence 62 | n = 0 # reset 63 | chunks = llama.create_completion(text, max_tokens=20, stream=True, stop=["lazy"]) 64 | assert ( 65 | "".join(chunk["choices"][0]["text"] for chunk in chunks) == " jumps over the " 66 | ) 67 | assert completion["choices"][0]["finish_reason"] == "stop" 68 | 69 | ## Test basic completion until length 70 | n = 0 # reset 71 | completion = llama.create_completion(text, max_tokens=2) 72 | assert completion["choices"][0]["text"] == " j" 73 | assert completion["choices"][0]["finish_reason"] == "length" 74 | 75 | ## Test streaming completion until length 76 | n = 0 # reset 77 | chunks = llama.create_completion(text, max_tokens=2, stream=True) 78 | assert "".join(chunk["choices"][0]["text"] for chunk in chunks) == " j" 79 | assert completion["choices"][0]["finish_reason"] == "length" 80 | 81 | 82 | def test_llama_pickle(): 83 | import pickle 84 | import tempfile 85 | fp = tempfile.TemporaryFile() 86 | llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True) 87 | pickle.dump(llama, fp) 88 | fp.seek(0) 89 | llama = pickle.load(fp) 90 | 91 | assert llama 92 | assert llama.ctx is not None 93 | 94 | text = b"Hello World" 95 | 96 | assert llama.detokenize(llama.tokenize(text)) == text -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | _skbuild/ 2 | 3 | .envrc 4 | 5 | models/ 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # poetry 100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 101 | # This is especially recommended for binary packages to ensure reproducibility, and is more 102 | # commonly ignored for libraries. 103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 104 | #poetry.lock 105 | 106 | # pdm 107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 108 | #pdm.lock 109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 110 | # in version control. 111 | # https://pdm.fming.dev/#use-with-ide 112 | .pdm.toml 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | .idea/ 163 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | ## 🦙 Python Bindings for `llama.cpp` 4 | 5 | [![Documentation](https://img.shields.io/badge/docs-passing-green.svg)](https://abetlen.github.io/llama-cpp-python) 6 | [![Tests](https://github.com/abetlen/llama-cpp-python/actions/workflows/test.yaml/badge.svg?branch=main)](https://github.com/abetlen/llama-cpp-python/actions/workflows/test.yaml) 7 | [![PyPI](https://img.shields.io/pypi/v/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) 8 | [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) 9 | [![PyPI - License](https://img.shields.io/pypi/l/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) 10 | [![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) 11 | 12 | Simple Python bindings for **@ggerganov's** [`llama.cpp`](https://github.com/ggerganov/llama.cpp) library. 13 | This package provides: 14 | 15 | - Low-level access to C API via `ctypes` interface. 16 | - High-level Python API for text completion 17 | - OpenAI-like API 18 | - LangChain compatibility 19 | 20 | ## Installation 21 | 22 | Install from PyPI: 23 | 24 | ```bash 25 | pip install llama-cpp-python 26 | ``` 27 | 28 | ## High-level API 29 | 30 | ```python 31 | >>> from llama_cpp import Llama 32 | >>> llm = Llama(model_path="./models/7B/ggml-model.bin") 33 | >>> output = llm("Q: Name the planets in the solar system? A: ", max_tokens=32, stop=["Q:", "\n"], echo=True) 34 | >>> print(output) 35 | { 36 | "id": "cmpl-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", 37 | "object": "text_completion", 38 | "created": 1679561337, 39 | "model": "./models/7B/ggml-model.bin", 40 | "choices": [ 41 | { 42 | "text": "Q: Name the planets in the solar system? A: Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune and Pluto.", 43 | "index": 0, 44 | "logprobs": None, 45 | "finish_reason": "stop" 46 | } 47 | ], 48 | "usage": { 49 | "prompt_tokens": 14, 50 | "completion_tokens": 28, 51 | "total_tokens": 42 52 | } 53 | } 54 | ``` 55 | 56 | ## Web Server 57 | 58 | `llama-cpp-python` offers a web server which aims to act as a drop-in replacement for the OpenAI API. 59 | This allows you to use llama.cpp compatible models with any OpenAI compatible client (language libraries, services, etc). 60 | 61 | To install the server package and get started: 62 | 63 | ```bash 64 | pip install llama-cpp-python[server] 65 | export MODEL=./models/7B/ggml-model.bin 66 | python3 -m llama_cpp.server 67 | ``` 68 | 69 | Navigate to [http://localhost:8000/docs](http://localhost:8000/docs) to see the OpenAPI documentation. 70 | 71 | ## Low-level API 72 | 73 | The low-level API is a direct `ctypes` binding to the C API provided by `llama.cpp`. 74 | The entire API can be found in [llama_cpp/llama_cpp.py](https://github.com/abetlen/llama-cpp-python/blob/master/llama_cpp/llama_cpp.py) and should mirror [llama.h](https://github.com/ggerganov/llama.cpp/blob/master/llama.h). 75 | 76 | 77 | ## Development 78 | 79 | This package is under active development and I welcome any contributions. 80 | 81 | To get started, clone the repository and install the package in development mode: 82 | 83 | ```bash 84 | git clone git@github.com:abetlen/llama-cpp-python.git 85 | git submodule update --init --recursive 86 | # Will need to be re-run any time vendor/llama.cpp is updated 87 | python3 setup.py develop 88 | ``` 89 | 90 | ## API Reference 91 | 92 | ::: llama_cpp.Llama 93 | options: 94 | members: 95 | - __init__ 96 | - tokenize 97 | - detokenize 98 | - reset 99 | - eval 100 | - sample 101 | - generate 102 | - create_embedding 103 | - embed 104 | - create_completion 105 | - __call__ 106 | - create_chat_completion 107 | - set_cache 108 | - token_bos 109 | - token_eos 110 | show_root_heading: true 111 | 112 | ::: llama_cpp.LlamaCache 113 | 114 | ::: llama_cpp.llama_cpp 115 | options: 116 | show_if_no_docstring: true 117 | 118 | ## License 119 | 120 | This project is licensed under the terms of the MIT license. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🦙 Python Bindings for `llama.cpp` 2 | 3 | [![Documentation](https://img.shields.io/badge/docs-passing-green.svg)](https://abetlen.github.io/llama-cpp-python) 4 | [![Tests](https://github.com/abetlen/llama-cpp-python/actions/workflows/test.yaml/badge.svg?branch=main)](https://github.com/abetlen/llama-cpp-python/actions/workflows/test.yaml) 5 | [![PyPI](https://img.shields.io/pypi/v/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) 6 | [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) 7 | [![PyPI - License](https://img.shields.io/pypi/l/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) 8 | [![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) 9 | 10 | Simple Python bindings for **@ggerganov's** [`llama.cpp`](https://github.com/ggerganov/llama.cpp) library. 11 | This package provides: 12 | 13 | - Low-level access to C API via `ctypes` interface. 14 | - High-level Python API for text completion 15 | - OpenAI-like API 16 | - LangChain compatibility 17 | 18 | ## Installation 19 | 20 | Install from PyPI: 21 | 22 | ```bash 23 | pip install llama-cpp-python 24 | ``` 25 | 26 | ## High-level API 27 | 28 | ```python 29 | >>> from llama_cpp import Llama 30 | >>> llm = Llama(model_path="./models/7B/ggml-model.bin") 31 | >>> output = llm("Q: Name the planets in the solar system? A: ", max_tokens=32, stop=["Q:", "\n"], echo=True) 32 | >>> print(output) 33 | { 34 | "id": "cmpl-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", 35 | "object": "text_completion", 36 | "created": 1679561337, 37 | "model": "./models/7B/ggml-model.bin", 38 | "choices": [ 39 | { 40 | "text": "Q: Name the planets in the solar system? A: Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune and Pluto.", 41 | "index": 0, 42 | "logprobs": None, 43 | "finish_reason": "stop" 44 | } 45 | ], 46 | "usage": { 47 | "prompt_tokens": 14, 48 | "completion_tokens": 28, 49 | "total_tokens": 42 50 | } 51 | } 52 | ``` 53 | 54 | ## Web Server 55 | 56 | `llama-cpp-python` offers a web server which aims to act as a drop-in replacement for the OpenAI API. 57 | This allows you to use llama.cpp compatible models with any OpenAI compatible client (language libraries, services, etc). 58 | 59 | To install the server package and get started: 60 | 61 | ```bash 62 | pip install llama-cpp-python[server] 63 | export MODEL=./models/7B/ggml-model.bin 64 | python3 -m llama_cpp.server 65 | ``` 66 | 67 | Navigate to [http://localhost:8000/docs](http://localhost:8000/docs) to see the OpenAPI documentation. 68 | 69 | ## Low-level API 70 | 71 | The low-level API is a direct `ctypes` binding to the C API provided by `llama.cpp`. 72 | The entire API can be found in [llama_cpp/llama_cpp.py](https://github.com/abetlen/llama-cpp-python/blob/master/llama_cpp/llama_cpp.py) and should mirror [llama.h](https://github.com/ggerganov/llama.cpp/blob/master/llama.h). 73 | 74 | 75 | # Documentation 76 | 77 | Documentation is available at [https://abetlen.github.io/llama-cpp-python](https://abetlen.github.io/llama-cpp-python). 78 | If you find any issues with the documentation, please open an issue or submit a PR. 79 | 80 | # Development 81 | 82 | This package is under active development and I welcome any contributions. 83 | 84 | To get started, clone the repository and install the package in development mode: 85 | 86 | ```bash 87 | git clone git@github.com:abetlen/llama-cpp-python.git 88 | git submodule update --init --recursive 89 | # Will need to be re-run any time vendor/llama.cpp is updated 90 | python3 setup.py develop 91 | ``` 92 | 93 | # How does this compare to other Python bindings of `llama.cpp`? 94 | 95 | I originally wrote this package for my own use with two goals in mind: 96 | 97 | - Provide a simple process to install `llama.cpp` and access the full C API in `llama.h` from Python 98 | - Provide a high-level Python API that can be used as a drop-in replacement for the OpenAI API so existing apps can be easily ported to use `llama.cpp` 99 | 100 | Any contributions and changes to this package will be made with these goals in mind. 101 | 102 | # License 103 | 104 | This project is licensed under the terms of the MIT license. 105 | -------------------------------------------------------------------------------- /examples/low_level_api/common.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | 4 | from dataclasses import dataclass, field 5 | from typing import List, Optional 6 | 7 | # Based on https://github.com/ggerganov/llama.cpp/blob/master/examples/common.cpp 8 | 9 | 10 | @dataclass 11 | class GptParams: 12 | seed: int = -1 13 | n_threads: int = min(4, os.cpu_count() or 1) 14 | n_predict: int = 128 15 | repeat_last_n: int = 64 16 | n_parts: int = -1 17 | n_ctx: int = 512 18 | n_batch: int = 8 19 | n_keep: int = 0 20 | 21 | top_k: int = 40 22 | top_p: float = 0.95 23 | temp: float = 0.80 24 | repeat_penalty: float = 1.10 25 | 26 | model: str = "./models/llama-7B/ggml-model.bin" 27 | prompt: str = "" 28 | input_prefix: str = " " 29 | 30 | antiprompt: List[str] = field(default_factory=list) 31 | 32 | memory_f16: bool = True 33 | random_prompt: bool = False 34 | use_color: bool = False 35 | interactive: bool = False 36 | 37 | embedding: bool = False 38 | interactive_start: bool = False 39 | 40 | instruct: bool = False 41 | ignore_eos: bool = False 42 | perplexity: bool = False 43 | use_mmap: bool = True 44 | use_mlock: bool = False 45 | mem_test: bool = False 46 | verbose_prompt: bool = False 47 | 48 | file: str = None 49 | 50 | # If chat ended prematurely, append this to the conversation to fix it. 51 | # Set to "\nUser:" etc. 52 | # This is an alternative to input_prefix which always adds it, so it potentially duplicates "User:"" 53 | fix_prefix: str = " " 54 | output_postfix: str = "" 55 | input_echo: bool = True, 56 | 57 | # Default instructions for Alpaca 58 | # switch to "Human" and "Assistant" for Vicuna. 59 | # TODO: TBD how they are gonna handle this upstream 60 | instruct_inp_prefix: str="\n\n### Instruction:\n\n" 61 | instruct_inp_suffix: str="\n\n### Response:\n\n" 62 | 63 | 64 | def gpt_params_parse(argv = None, params: Optional[GptParams] = None): 65 | if params is None: 66 | params = GptParams() 67 | 68 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 69 | parser.add_argument("-s", "--seed", type=int, default=-1, help="RNG seed (use random seed for <= 0)",dest="seed") 70 | parser.add_argument("-t", "--threads", type=int, default=min(4, os.cpu_count() or 1), help="number of threads to use during computation",dest="n_threads") 71 | parser.add_argument("-p", "--prompt", type=str, default="", help="initial prompt",dest="prompt") 72 | parser.add_argument("-f", "--file", type=str, default=None, help="file containing initial prompt to load",dest="file") 73 | parser.add_argument("-c", "--ctx_size", type=int, default=512, help="size of the prompt context",dest="n_ctx") 74 | parser.add_argument("--memory_f32", action="store_false", help="use f32 instead of f16 for memory key+value",dest="memory_f16") 75 | parser.add_argument("--top_p", type=float, default=0.95, help="top-p samplin",dest="top_p") 76 | parser.add_argument("--top_k", type=int, default=40, help="top-k sampling",dest="top_k") 77 | parser.add_argument("--temp", type=float, default=0.80, help="temperature",dest="temp") 78 | parser.add_argument("--n_predict", type=int, default=128, help="number of model parts",dest="n_predict") 79 | parser.add_argument("--repeat_last_n", type=int, default=64, help="last n tokens to consider for penalize ",dest="repeat_last_n") 80 | parser.add_argument("--repeat_penalty", type=float, default=1.10, help="penalize repeat sequence of tokens",dest="repeat_penalty") 81 | parser.add_argument("-b", "--batch_size", type=int, default=8, help="batch size for prompt processing",dest="n_batch") 82 | parser.add_argument("--keep", type=int, default=0, help="number of tokens to keep from the initial prompt",dest="n_keep") 83 | parser.add_argument("-m", "--model", type=str, default="./models/llama-7B/ggml-model.bin", help="model path",dest="model") 84 | parser.add_argument( 85 | "-i", "--interactive", action="store_true", help="run in interactive mode", dest="interactive" 86 | ) 87 | parser.add_argument("--embedding", action="store_true", help="", dest="embedding") 88 | parser.add_argument( 89 | "--interactive-start", 90 | action="store_true", 91 | help="run in interactive mode", 92 | dest="interactive" 93 | ) 94 | parser.add_argument( 95 | "--interactive-first", 96 | action="store_true", 97 | help="run in interactive mode and wait for input right away", 98 | dest="interactive_start" 99 | ) 100 | parser.add_argument( 101 | "-ins", 102 | "--instruct", 103 | action="store_true", 104 | help="run in instruction mode (use with Alpaca or Vicuna models)", 105 | dest="instruct" 106 | ) 107 | parser.add_argument( 108 | "--color", 109 | action="store_true", 110 | help="colorise output to distinguish prompt and user input from generations", 111 | dest="use_color" 112 | ) 113 | parser.add_argument("--mlock", action="store_true",help="force system to keep model in RAM rather than swapping or compressing",dest="use_mlock") 114 | parser.add_argument("--no-mmap", action="store_false",help="do not memory-map model (slower load but may reduce pageouts if not using mlock)",dest="use_mmap") 115 | parser.add_argument("--mtest", action="store_true",help="compute maximum memory usage",dest="mem_test") 116 | parser.add_argument("--verbose-prompt", action="store_true",help="print prompt before generation",dest="verbose_prompt") 117 | parser.add_argument( 118 | "-r", 119 | "--reverse-prompt", 120 | type=str, 121 | action='append', 122 | help="poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).", 123 | dest="antiprompt" 124 | ) 125 | parser.add_argument("--perplexity", action="store_true", help="compute perplexity over the prompt", dest="perplexity") 126 | parser.add_argument("--ignore-eos", action="store_true", help="ignore end of stream token and continue generating", dest="ignore_eos") 127 | parser.add_argument("--n_parts", type=int, default=-1, help="number of model parts", dest="n_parts") 128 | parser.add_argument("--random-prompt", action="store_true", help="start with a randomized prompt.", dest="random_prompt") 129 | parser.add_argument("--in-prefix", type=str, default="", help="string to prefix user inputs with", dest="input_prefix") 130 | parser.add_argument("--fix-prefix", type=str, default="", help="append to input when generated n_predict tokens", dest="fix_prefix") 131 | parser.add_argument("--out-postfix", type=str, default="", help="append to input", dest="output_postfix") 132 | parser.add_argument("--input-noecho", action="store_false", help="dont output the input", dest="input_echo") 133 | args = parser.parse_args(argv) 134 | return args 135 | 136 | def gpt_random_prompt(rng): 137 | return [ 138 | "So", 139 | "Once upon a time", 140 | "When", 141 | "The", 142 | "After", 143 | "If", 144 | "import", 145 | "He", 146 | "She", 147 | "They", 148 | ][rng % 10] 149 | 150 | if __name__ == "__main__": 151 | print(GptParams(gpt_params_parse())) 152 | -------------------------------------------------------------------------------- /examples/high_level_api/fastapi_server.py: -------------------------------------------------------------------------------- 1 | """Example FastAPI server for llama.cpp. 2 | 3 | To run this example: 4 | 5 | ```bash 6 | pip install fastapi uvicorn sse-starlette 7 | export MODEL=../models/7B/ggml-model.bin 8 | uvicorn fastapi_server_chat:app --reload 9 | ``` 10 | 11 | Then visit http://localhost:8000/docs to see the interactive API docs. 12 | 13 | """ 14 | import os 15 | import json 16 | from typing import List, Optional, Literal, Union, Iterator, Dict 17 | from typing_extensions import TypedDict 18 | 19 | import llama_cpp 20 | 21 | from fastapi import FastAPI 22 | from fastapi.middleware.cors import CORSMiddleware 23 | from pydantic import BaseModel, BaseSettings, Field, create_model_from_typeddict 24 | from sse_starlette.sse import EventSourceResponse 25 | 26 | 27 | class Settings(BaseSettings): 28 | model: str 29 | n_ctx: int = 2048 30 | n_batch: int = 8 31 | n_threads: int = int(os.cpu_count() / 2) or 1 32 | f16_kv: bool = True 33 | use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out... 34 | embedding: bool = True 35 | last_n_tokens_size: int = 64 36 | 37 | 38 | app = FastAPI( 39 | title="🦙 llama.cpp Python API", 40 | version="0.0.1", 41 | ) 42 | app.add_middleware( 43 | CORSMiddleware, 44 | allow_origins=["*"], 45 | allow_credentials=True, 46 | allow_methods=["*"], 47 | allow_headers=["*"], 48 | ) 49 | settings = Settings() 50 | llama = llama_cpp.Llama( 51 | settings.model, 52 | f16_kv=settings.f16_kv, 53 | use_mlock=settings.use_mlock, 54 | embedding=settings.embedding, 55 | n_threads=settings.n_threads, 56 | n_batch=settings.n_batch, 57 | n_ctx=settings.n_ctx, 58 | last_n_tokens_size=settings.last_n_tokens_size, 59 | ) 60 | 61 | 62 | class CreateCompletionRequest(BaseModel): 63 | prompt: str 64 | suffix: Optional[str] = Field(None) 65 | max_tokens: int = 16 66 | temperature: float = 0.8 67 | top_p: float = 0.95 68 | echo: bool = False 69 | stop: List[str] = [] 70 | stream: bool = False 71 | 72 | # ignored or currently unsupported 73 | model: Optional[str] = Field(None) 74 | n: Optional[int] = 1 75 | logprobs: Optional[int] = Field(None) 76 | presence_penalty: Optional[float] = 0 77 | frequency_penalty: Optional[float] = 0 78 | best_of: Optional[int] = 1 79 | logit_bias: Optional[Dict[str, float]] = Field(None) 80 | user: Optional[str] = Field(None) 81 | 82 | # llama.cpp specific parameters 83 | top_k: int = 40 84 | repeat_penalty: float = 1.1 85 | 86 | class Config: 87 | schema_extra = { 88 | "example": { 89 | "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", 90 | "stop": ["\n", "###"], 91 | } 92 | } 93 | 94 | 95 | CreateCompletionResponse = create_model_from_typeddict(llama_cpp.Completion) 96 | 97 | 98 | @app.post( 99 | "/v1/completions", 100 | response_model=CreateCompletionResponse, 101 | ) 102 | def create_completion(request: CreateCompletionRequest): 103 | if request.stream: 104 | chunks: Iterator[llama_cpp.CompletionChunk] = llama(**request.dict()) # type: ignore 105 | return EventSourceResponse(dict(data=json.dumps(chunk)) for chunk in chunks) 106 | return llama( 107 | **request.dict( 108 | exclude={ 109 | "model", 110 | "n", 111 | "logprobs", 112 | "frequency_penalty", 113 | "presence_penalty", 114 | "best_of", 115 | "logit_bias", 116 | "user", 117 | } 118 | ) 119 | ) 120 | 121 | 122 | class CreateEmbeddingRequest(BaseModel): 123 | model: Optional[str] 124 | input: str 125 | user: Optional[str] 126 | 127 | class Config: 128 | schema_extra = { 129 | "example": { 130 | "input": "The food was delicious and the waiter...", 131 | } 132 | } 133 | 134 | 135 | CreateEmbeddingResponse = create_model_from_typeddict(llama_cpp.Embedding) 136 | 137 | 138 | @app.post( 139 | "/v1/embeddings", 140 | response_model=CreateEmbeddingResponse, 141 | ) 142 | def create_embedding(request: CreateEmbeddingRequest): 143 | return llama.create_embedding(**request.dict(exclude={"model", "user"})) 144 | 145 | 146 | class ChatCompletionRequestMessage(BaseModel): 147 | role: Union[Literal["system"], Literal["user"], Literal["assistant"]] 148 | content: str 149 | user: Optional[str] = None 150 | 151 | 152 | class CreateChatCompletionRequest(BaseModel): 153 | model: Optional[str] 154 | messages: List[ChatCompletionRequestMessage] 155 | temperature: float = 0.8 156 | top_p: float = 0.95 157 | stream: bool = False 158 | stop: List[str] = [] 159 | max_tokens: int = 128 160 | 161 | # ignored or currently unsupported 162 | model: Optional[str] = Field(None) 163 | n: Optional[int] = 1 164 | presence_penalty: Optional[float] = 0 165 | frequency_penalty: Optional[float] = 0 166 | logit_bias: Optional[Dict[str, float]] = Field(None) 167 | user: Optional[str] = Field(None) 168 | 169 | # llama.cpp specific parameters 170 | repeat_penalty: float = 1.1 171 | 172 | class Config: 173 | schema_extra = { 174 | "example": { 175 | "messages": [ 176 | ChatCompletionRequestMessage( 177 | role="system", content="You are a helpful assistant." 178 | ), 179 | ChatCompletionRequestMessage( 180 | role="user", content="What is the capital of France?" 181 | ), 182 | ] 183 | } 184 | } 185 | 186 | 187 | CreateChatCompletionResponse = create_model_from_typeddict(llama_cpp.ChatCompletion) 188 | 189 | 190 | @app.post( 191 | "/v1/chat/completions", 192 | response_model=CreateChatCompletionResponse, 193 | ) 194 | async def create_chat_completion( 195 | request: CreateChatCompletionRequest, 196 | ) -> Union[llama_cpp.ChatCompletion, EventSourceResponse]: 197 | completion_or_chunks = llama.create_chat_completion( 198 | **request.dict( 199 | exclude={ 200 | "model", 201 | "n", 202 | "presence_penalty", 203 | "frequency_penalty", 204 | "logit_bias", 205 | "user", 206 | } 207 | ), 208 | ) 209 | 210 | if request.stream: 211 | 212 | async def server_sent_events( 213 | chat_chunks: Iterator[llama_cpp.ChatCompletionChunk], 214 | ): 215 | for chat_chunk in chat_chunks: 216 | yield dict(data=json.dumps(chat_chunk)) 217 | yield dict(data="[DONE]") 218 | 219 | chunks: Iterator[llama_cpp.ChatCompletionChunk] = completion_or_chunks # type: ignore 220 | 221 | return EventSourceResponse( 222 | server_sent_events(chunks), 223 | ) 224 | completion: llama_cpp.ChatCompletion = completion_or_chunks # type: ignore 225 | return completion 226 | 227 | 228 | class ModelData(TypedDict): 229 | id: str 230 | object: Literal["model"] 231 | owned_by: str 232 | permissions: List[str] 233 | 234 | 235 | class ModelList(TypedDict): 236 | object: Literal["list"] 237 | data: List[ModelData] 238 | 239 | 240 | GetModelResponse = create_model_from_typeddict(ModelList) 241 | 242 | 243 | @app.get("/v1/models", response_model=GetModelResponse) 244 | def get_models() -> ModelList: 245 | return { 246 | "object": "list", 247 | "data": [ 248 | { 249 | "id": llama.model_path, 250 | "object": "model", 251 | "owned_by": "me", 252 | "permissions": [], 253 | } 254 | ], 255 | } 256 | 257 | 258 | if __name__ == "__main__": 259 | import os 260 | import uvicorn 261 | 262 | uvicorn.run(app, host=os.getenv("HOST", "localhost"), port=os.getenv("PORT", 8000)) 263 | -------------------------------------------------------------------------------- /llama_cpp/server/__main__.py: -------------------------------------------------------------------------------- 1 | """Example FastAPI server for llama.cpp. 2 | 3 | To run this example: 4 | 5 | ```bash 6 | pip install fastapi uvicorn sse-starlette 7 | export MODEL=../models/7B/... 8 | uvicorn fastapi_server_chat:app --reload 9 | ``` 10 | 11 | Then visit http://localhost:8000/docs to see the interactive API docs. 12 | 13 | """ 14 | import os 15 | import json 16 | from threading import Lock 17 | from typing import List, Optional, Literal, Union, Iterator, Dict 18 | from typing_extensions import TypedDict 19 | 20 | import llama_cpp 21 | 22 | from fastapi import Depends, FastAPI 23 | from fastapi.middleware.cors import CORSMiddleware 24 | from pydantic import BaseModel, BaseSettings, Field, create_model_from_typeddict 25 | from sse_starlette.sse import EventSourceResponse 26 | 27 | 28 | class Settings(BaseSettings): 29 | model: str 30 | n_ctx: int = 2048 31 | n_batch: int = 8 32 | n_threads: int = ((os.cpu_count() or 2) // 2) or 1 33 | f16_kv: bool = True 34 | use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out... 35 | embedding: bool = True 36 | last_n_tokens_size: int = 64 37 | logits_all: bool = False 38 | cache: bool = False # WARNING: This is an experimental feature 39 | 40 | 41 | app = FastAPI( 42 | title="🦙 llama.cpp Python API", 43 | version="0.0.1", 44 | ) 45 | app.add_middleware( 46 | CORSMiddleware, 47 | allow_origins=["*"], 48 | allow_credentials=True, 49 | allow_methods=["*"], 50 | allow_headers=["*"], 51 | ) 52 | settings = Settings() 53 | llama = llama_cpp.Llama( 54 | settings.model, 55 | f16_kv=settings.f16_kv, 56 | use_mlock=settings.use_mlock, 57 | embedding=settings.embedding, 58 | logits_all=settings.logits_all, 59 | n_threads=settings.n_threads, 60 | n_batch=settings.n_batch, 61 | n_ctx=settings.n_ctx, 62 | last_n_tokens_size=settings.last_n_tokens_size, 63 | ) 64 | if settings.cache: 65 | cache = llama_cpp.LlamaCache() 66 | llama.set_cache(cache) 67 | llama_lock = Lock() 68 | 69 | 70 | def get_llama(): 71 | with llama_lock: 72 | yield llama 73 | 74 | 75 | class CreateCompletionRequest(BaseModel): 76 | prompt: Union[str, List[str]] 77 | suffix: Optional[str] = Field(None) 78 | max_tokens: int = 16 79 | temperature: float = 0.8 80 | top_p: float = 0.95 81 | echo: bool = False 82 | stop: Optional[List[str]] = [] 83 | stream: bool = False 84 | 85 | # ignored or currently unsupported 86 | model: Optional[str] = Field(None) 87 | n: Optional[int] = 1 88 | logprobs: Optional[int] = Field(None) 89 | presence_penalty: Optional[float] = 0 90 | frequency_penalty: Optional[float] = 0 91 | best_of: Optional[int] = 1 92 | logit_bias: Optional[Dict[str, float]] = Field(None) 93 | user: Optional[str] = Field(None) 94 | 95 | # llama.cpp specific parameters 96 | top_k: int = 40 97 | repeat_penalty: float = 1.1 98 | 99 | class Config: 100 | schema_extra = { 101 | "example": { 102 | "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", 103 | "stop": ["\n", "###"], 104 | } 105 | } 106 | 107 | 108 | CreateCompletionResponse = create_model_from_typeddict(llama_cpp.Completion) 109 | 110 | 111 | @app.post( 112 | "/v1/completions", 113 | response_model=CreateCompletionResponse, 114 | ) 115 | def create_completion( 116 | request: CreateCompletionRequest, llama: llama_cpp.Llama = Depends(get_llama) 117 | ): 118 | if isinstance(request.prompt, list): 119 | request.prompt = "".join(request.prompt) 120 | 121 | completion_or_chunks = llama( 122 | **request.dict( 123 | exclude={ 124 | "model", 125 | "n", 126 | "frequency_penalty", 127 | "presence_penalty", 128 | "best_of", 129 | "logit_bias", 130 | "user", 131 | } 132 | ) 133 | ) 134 | if request.stream: 135 | chunks: Iterator[llama_cpp.CompletionChunk] = completion_or_chunks # type: ignore 136 | return EventSourceResponse(dict(data=json.dumps(chunk)) for chunk in chunks) 137 | completion: llama_cpp.Completion = completion_or_chunks # type: ignore 138 | return completion 139 | 140 | 141 | class CreateEmbeddingRequest(BaseModel): 142 | model: Optional[str] 143 | input: str 144 | user: Optional[str] 145 | 146 | class Config: 147 | schema_extra = { 148 | "example": { 149 | "input": "The food was delicious and the waiter...", 150 | } 151 | } 152 | 153 | 154 | CreateEmbeddingResponse = create_model_from_typeddict(llama_cpp.Embedding) 155 | 156 | 157 | @app.post( 158 | "/v1/embeddings", 159 | response_model=CreateEmbeddingResponse, 160 | ) 161 | def create_embedding( 162 | request: CreateEmbeddingRequest, llama: llama_cpp.Llama = Depends(get_llama) 163 | ): 164 | return llama.create_embedding(**request.dict(exclude={"model", "user"})) 165 | 166 | 167 | class ChatCompletionRequestMessage(BaseModel): 168 | role: Union[Literal["system"], Literal["user"], Literal["assistant"]] 169 | content: str 170 | user: Optional[str] = None 171 | 172 | 173 | class CreateChatCompletionRequest(BaseModel): 174 | model: Optional[str] 175 | messages: List[ChatCompletionRequestMessage] 176 | temperature: float = 0.8 177 | top_p: float = 0.95 178 | stream: bool = False 179 | stop: Optional[List[str]] = [] 180 | max_tokens: int = 128 181 | 182 | # ignored or currently unsupported 183 | model: Optional[str] = Field(None) 184 | n: Optional[int] = 1 185 | presence_penalty: Optional[float] = 0 186 | frequency_penalty: Optional[float] = 0 187 | logit_bias: Optional[Dict[str, float]] = Field(None) 188 | user: Optional[str] = Field(None) 189 | 190 | # llama.cpp specific parameters 191 | repeat_penalty: float = 1.1 192 | 193 | class Config: 194 | schema_extra = { 195 | "example": { 196 | "messages": [ 197 | ChatCompletionRequestMessage( 198 | role="system", content="You are a helpful assistant." 199 | ), 200 | ChatCompletionRequestMessage( 201 | role="user", content="What is the capital of France?" 202 | ), 203 | ] 204 | } 205 | } 206 | 207 | 208 | CreateChatCompletionResponse = create_model_from_typeddict(llama_cpp.ChatCompletion) 209 | 210 | 211 | @app.post( 212 | "/v1/chat/completions", 213 | response_model=CreateChatCompletionResponse, 214 | ) 215 | def create_chat_completion( 216 | request: CreateChatCompletionRequest, 217 | llama: llama_cpp.Llama = Depends(get_llama), 218 | ) -> Union[llama_cpp.ChatCompletion, EventSourceResponse]: 219 | completion_or_chunks = llama.create_chat_completion( 220 | **request.dict( 221 | exclude={ 222 | "model", 223 | "n", 224 | "presence_penalty", 225 | "frequency_penalty", 226 | "logit_bias", 227 | "user", 228 | } 229 | ), 230 | ) 231 | 232 | if request.stream: 233 | 234 | async def server_sent_events( 235 | chat_chunks: Iterator[llama_cpp.ChatCompletionChunk], 236 | ): 237 | for chat_chunk in chat_chunks: 238 | yield dict(data=json.dumps(chat_chunk)) 239 | yield dict(data="[DONE]") 240 | 241 | chunks: Iterator[llama_cpp.ChatCompletionChunk] = completion_or_chunks # type: ignore 242 | 243 | return EventSourceResponse( 244 | server_sent_events(chunks), 245 | ) 246 | completion: llama_cpp.ChatCompletion = completion_or_chunks # type: ignore 247 | return completion 248 | 249 | 250 | class ModelData(TypedDict): 251 | id: str 252 | object: Literal["model"] 253 | owned_by: str 254 | permissions: List[str] 255 | 256 | 257 | class ModelList(TypedDict): 258 | object: Literal["list"] 259 | data: List[ModelData] 260 | 261 | 262 | GetModelResponse = create_model_from_typeddict(ModelList) 263 | 264 | 265 | @app.get("/v1/models", response_model=GetModelResponse) 266 | def get_models() -> ModelList: 267 | return { 268 | "object": "list", 269 | "data": [ 270 | { 271 | "id": llama.model_path, 272 | "object": "model", 273 | "owned_by": "me", 274 | "permissions": [], 275 | } 276 | ], 277 | } 278 | 279 | 280 | if __name__ == "__main__": 281 | import os 282 | import uvicorn 283 | 284 | uvicorn.run( 285 | app, host=os.getenv("HOST", "localhost"), port=int(os.getenv("PORT", 8000)) 286 | ) 287 | -------------------------------------------------------------------------------- /llama_cpp/llama_cpp.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import ctypes 4 | from ctypes import ( 5 | c_int, 6 | c_float, 7 | c_char_p, 8 | c_void_p, 9 | c_bool, 10 | POINTER, 11 | Structure, 12 | Array, 13 | c_uint8, 14 | c_size_t, 15 | ) 16 | import pathlib 17 | 18 | 19 | # Load the library 20 | def _load_shared_library(lib_base_name): 21 | # Determine the file extension based on the platform 22 | if sys.platform.startswith("linux"): 23 | lib_ext = ".so" 24 | elif sys.platform == "darwin": 25 | lib_ext = ".so" 26 | elif sys.platform == "win32": 27 | lib_ext = ".dll" 28 | else: 29 | raise RuntimeError("Unsupported platform") 30 | 31 | # Construct the paths to the possible shared library names 32 | _base_path = pathlib.Path(__file__).parent.resolve() 33 | # Searching for the library in the current directory under the name "libllama" (default name 34 | # for llamacpp) and "llama" (default name for this repo) 35 | _lib_paths = [ 36 | _base_path / f"lib{lib_base_name}{lib_ext}", 37 | _base_path / f"{lib_base_name}{lib_ext}", 38 | ] 39 | 40 | if "LLAMA_CPP_LIB" in os.environ: 41 | lib_base_name = os.environ["LLAMA_CPP_LIB"] 42 | _lib = pathlib.Path(lib_base_name) 43 | _base_path = _lib.parent.resolve() 44 | _lib_paths = [_lib.resolve()] 45 | 46 | # Add the library directory to the DLL search path on Windows (if needed) 47 | if sys.platform == "win32" and sys.version_info >= (3, 8): 48 | os.add_dll_directory(str(_base_path)) 49 | 50 | # Try to load the shared library, handling potential errors 51 | for _lib_path in _lib_paths: 52 | if _lib_path.exists(): 53 | try: 54 | return ctypes.CDLL(str(_lib_path)) 55 | except Exception as e: 56 | raise RuntimeError(f"Failed to load shared library '{_lib_path}': {e}") 57 | 58 | raise FileNotFoundError( 59 | f"Shared library with base name '{lib_base_name}' not found" 60 | ) 61 | 62 | 63 | # Specify the base name of the shared library to load 64 | _lib_base_name = "llama" 65 | 66 | # Load the library 67 | _lib = _load_shared_library(_lib_base_name) 68 | 69 | # C types 70 | llama_context_p = c_void_p 71 | 72 | 73 | llama_token = c_int 74 | llama_token_p = POINTER(llama_token) 75 | 76 | 77 | class llama_token_data(Structure): 78 | _fields_ = [ 79 | ("id", llama_token), # token id 80 | ("p", c_float), # probability of the token 81 | ("plog", c_float), # log probability of the token 82 | ] 83 | 84 | 85 | llama_token_data_p = POINTER(llama_token_data) 86 | 87 | llama_progress_callback = ctypes.CFUNCTYPE(None, c_float, c_void_p) 88 | 89 | 90 | class llama_context_params(Structure): 91 | _fields_ = [ 92 | ("n_ctx", c_int), # text context 93 | ("n_parts", c_int), # -1 for default 94 | ("seed", c_int), # RNG seed, 0 for random 95 | ("f16_kv", c_bool), # use fp16 for KV cache 96 | ( 97 | "logits_all", 98 | c_bool, 99 | ), # the llama_eval() call computes all logits, not just the last one 100 | ("vocab_only", c_bool), # only load the vocabulary, no weights 101 | ("use_mmap", c_bool), # use mmap if possible 102 | ("use_mlock", c_bool), # force system to keep model in RAM 103 | ("embedding", c_bool), # embedding mode only 104 | # called with a progress value between 0 and 1, pass NULL to disable 105 | ("progress_callback", llama_progress_callback), 106 | # context pointer passed to the progress callback 107 | ("progress_callback_user_data", c_void_p), 108 | ] 109 | 110 | 111 | llama_context_params_p = POINTER(llama_context_params) 112 | 113 | LLAMA_FTYPE_ALL_F32 = ctypes.c_int(0) 114 | LLAMA_FTYPE_MOSTLY_F16 = ctypes.c_int(1) # except 1d tensors 115 | LLAMA_FTYPE_MOSTLY_Q4_0 = ctypes.c_int(2) # except 1d tensors 116 | LLAMA_FTYPE_MOSTLY_Q4_1 = ctypes.c_int(3) # except 1d tensors 117 | LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = ctypes.c_int(4) # tok_embeddings.weight and output.weight are F16 118 | 119 | # Functions 120 | 121 | 122 | def llama_context_default_params() -> llama_context_params: 123 | return _lib.llama_context_default_params() 124 | 125 | 126 | _lib.llama_context_default_params.argtypes = [] 127 | _lib.llama_context_default_params.restype = llama_context_params 128 | 129 | 130 | def llama_mmap_supported() -> c_bool: 131 | return _lib.llama_mmap_supported() 132 | 133 | 134 | _lib.llama_mmap_supported.argtypes = [] 135 | _lib.llama_mmap_supported.restype = c_bool 136 | 137 | 138 | def llama_mlock_supported() -> c_bool: 139 | return _lib.llama_mlock_supported() 140 | 141 | 142 | _lib.llama_mlock_supported.argtypes = [] 143 | _lib.llama_mlock_supported.restype = c_bool 144 | 145 | 146 | # Various functions for loading a ggml llama model. 147 | # Allocate (almost) all memory needed for the model. 148 | # Return NULL on failure 149 | def llama_init_from_file( 150 | path_model: bytes, params: llama_context_params 151 | ) -> llama_context_p: 152 | return _lib.llama_init_from_file(path_model, params) 153 | 154 | 155 | _lib.llama_init_from_file.argtypes = [c_char_p, llama_context_params] 156 | _lib.llama_init_from_file.restype = llama_context_p 157 | 158 | 159 | # Frees all allocated memory 160 | def llama_free(ctx: llama_context_p): 161 | _lib.llama_free(ctx) 162 | 163 | 164 | _lib.llama_free.argtypes = [llama_context_p] 165 | _lib.llama_free.restype = None 166 | 167 | 168 | # TODO: not great API - very likely to change 169 | # Returns 0 on success 170 | def llama_model_quantize(fname_inp: bytes, fname_out: bytes, itype: c_int) -> c_int: 171 | return _lib.llama_model_quantize(fname_inp, fname_out, itype) 172 | 173 | 174 | _lib.llama_model_quantize.argtypes = [c_char_p, c_char_p, c_int] 175 | _lib.llama_model_quantize.restype = c_int 176 | 177 | 178 | # Returns the KV cache that will contain the context for the 179 | # ongoing prediction with the model. 180 | def llama_get_kv_cache(ctx: llama_context_p): 181 | return _lib.llama_get_kv_cache(ctx) 182 | 183 | 184 | _lib.llama_get_kv_cache.argtypes = [llama_context_p] 185 | _lib.llama_get_kv_cache.restype = POINTER(c_uint8) 186 | 187 | 188 | # Returns the size of the KV cache 189 | def llama_get_kv_cache_size(ctx: llama_context_p) -> c_size_t: 190 | return _lib.llama_get_kv_cache_size(ctx) 191 | 192 | 193 | _lib.llama_get_kv_cache_size.argtypes = [llama_context_p] 194 | _lib.llama_get_kv_cache_size.restype = c_size_t 195 | 196 | 197 | # Returns the number of tokens in the KV cache 198 | def llama_get_kv_cache_token_count(ctx: llama_context_p) -> c_int: 199 | return _lib.llama_get_kv_cache_token_count(ctx) 200 | 201 | 202 | _lib.llama_get_kv_cache_token_count.argtypes = [llama_context_p] 203 | _lib.llama_get_kv_cache_token_count.restype = c_int 204 | 205 | 206 | # Sets the KV cache containing the current context for the model 207 | def llama_set_kv_cache( 208 | ctx: llama_context_p, kv_cache, n_size: c_size_t, n_token_count: c_int 209 | ): 210 | return _lib.llama_set_kv_cache(ctx, kv_cache, n_size, n_token_count) 211 | 212 | 213 | _lib.llama_set_kv_cache.argtypes = [llama_context_p, POINTER(c_uint8), c_size_t, c_int] 214 | _lib.llama_set_kv_cache.restype = None 215 | 216 | 217 | # Run the llama inference to obtain the logits and probabilities for the next token. 218 | # tokens + n_tokens is the provided batch of new tokens to process 219 | # n_past is the number of tokens to use from previous eval calls 220 | # Returns 0 on success 221 | def llama_eval( 222 | ctx: llama_context_p, 223 | tokens, # type: Array[llama_token] 224 | n_tokens: c_int, 225 | n_past: c_int, 226 | n_threads: c_int, 227 | ) -> c_int: 228 | return _lib.llama_eval(ctx, tokens, n_tokens, n_past, n_threads) 229 | 230 | 231 | _lib.llama_eval.argtypes = [llama_context_p, llama_token_p, c_int, c_int, c_int] 232 | _lib.llama_eval.restype = c_int 233 | 234 | 235 | # Convert the provided text into tokens. 236 | # The tokens pointer must be large enough to hold the resulting tokens. 237 | # Returns the number of tokens on success, no more than n_max_tokens 238 | # Returns a negative number on failure - the number of tokens that would have been returned 239 | # TODO: not sure if correct 240 | def llama_tokenize( 241 | ctx: llama_context_p, 242 | text: bytes, 243 | tokens, # type: Array[llama_token] 244 | n_max_tokens: c_int, 245 | add_bos: c_bool, 246 | ) -> c_int: 247 | return _lib.llama_tokenize(ctx, text, tokens, n_max_tokens, add_bos) 248 | 249 | 250 | _lib.llama_tokenize.argtypes = [llama_context_p, c_char_p, llama_token_p, c_int, c_bool] 251 | _lib.llama_tokenize.restype = c_int 252 | 253 | 254 | def llama_n_vocab(ctx: llama_context_p) -> c_int: 255 | return _lib.llama_n_vocab(ctx) 256 | 257 | 258 | _lib.llama_n_vocab.argtypes = [llama_context_p] 259 | _lib.llama_n_vocab.restype = c_int 260 | 261 | 262 | def llama_n_ctx(ctx: llama_context_p) -> c_int: 263 | return _lib.llama_n_ctx(ctx) 264 | 265 | 266 | _lib.llama_n_ctx.argtypes = [llama_context_p] 267 | _lib.llama_n_ctx.restype = c_int 268 | 269 | 270 | def llama_n_embd(ctx: llama_context_p) -> c_int: 271 | return _lib.llama_n_embd(ctx) 272 | 273 | 274 | _lib.llama_n_embd.argtypes = [llama_context_p] 275 | _lib.llama_n_embd.restype = c_int 276 | 277 | 278 | # Token logits obtained from the last call to llama_eval() 279 | # The logits for the last token are stored in the last row 280 | # Can be mutated in order to change the probabilities of the next token 281 | # Rows: n_tokens 282 | # Cols: n_vocab 283 | def llama_get_logits(ctx: llama_context_p): 284 | return _lib.llama_get_logits(ctx) 285 | 286 | 287 | _lib.llama_get_logits.argtypes = [llama_context_p] 288 | _lib.llama_get_logits.restype = POINTER(c_float) 289 | 290 | 291 | # Get the embeddings for the input 292 | # shape: [n_embd] (1-dimensional) 293 | def llama_get_embeddings(ctx: llama_context_p): 294 | return _lib.llama_get_embeddings(ctx) 295 | 296 | 297 | _lib.llama_get_embeddings.argtypes = [llama_context_p] 298 | _lib.llama_get_embeddings.restype = POINTER(c_float) 299 | 300 | 301 | # Token Id -> String. Uses the vocabulary in the provided context 302 | def llama_token_to_str(ctx: llama_context_p, token: llama_token) -> bytes: 303 | return _lib.llama_token_to_str(ctx, token) 304 | 305 | 306 | _lib.llama_token_to_str.argtypes = [llama_context_p, llama_token] 307 | _lib.llama_token_to_str.restype = c_char_p 308 | 309 | # Special tokens 310 | 311 | 312 | def llama_token_bos() -> llama_token: 313 | return _lib.llama_token_bos() 314 | 315 | 316 | _lib.llama_token_bos.argtypes = [] 317 | _lib.llama_token_bos.restype = llama_token 318 | 319 | 320 | def llama_token_eos() -> llama_token: 321 | return _lib.llama_token_eos() 322 | 323 | 324 | _lib.llama_token_eos.argtypes = [] 325 | _lib.llama_token_eos.restype = llama_token 326 | 327 | 328 | # TODO: improve the last_n_tokens interface ? 329 | def llama_sample_top_p_top_k( 330 | ctx: llama_context_p, 331 | last_n_tokens_data, # type: Array[llama_token] 332 | last_n_tokens_size: c_int, 333 | top_k: c_int, 334 | top_p: c_float, 335 | temp: c_float, 336 | repeat_penalty: c_float, 337 | ) -> llama_token: 338 | return _lib.llama_sample_top_p_top_k( 339 | ctx, last_n_tokens_data, last_n_tokens_size, top_k, top_p, temp, repeat_penalty 340 | ) 341 | 342 | 343 | _lib.llama_sample_top_p_top_k.argtypes = [ 344 | llama_context_p, 345 | llama_token_p, 346 | c_int, 347 | c_int, 348 | c_float, 349 | c_float, 350 | c_float, 351 | ] 352 | _lib.llama_sample_top_p_top_k.restype = llama_token 353 | 354 | 355 | # Performance information 356 | 357 | 358 | def llama_print_timings(ctx: llama_context_p): 359 | _lib.llama_print_timings(ctx) 360 | 361 | 362 | _lib.llama_print_timings.argtypes = [llama_context_p] 363 | _lib.llama_print_timings.restype = None 364 | 365 | 366 | def llama_reset_timings(ctx: llama_context_p): 367 | _lib.llama_reset_timings(ctx) 368 | 369 | 370 | _lib.llama_reset_timings.argtypes = [llama_context_p] 371 | _lib.llama_reset_timings.restype = None 372 | 373 | 374 | # Print system information 375 | def llama_print_system_info() -> bytes: 376 | return _lib.llama_print_system_info() 377 | 378 | 379 | _lib.llama_print_system_info.argtypes = [] 380 | _lib.llama_print_system_info.restype = c_char_p 381 | -------------------------------------------------------------------------------- /examples/low_level_api/low_level_api_chat_cpp.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is an example implementation of main.cpp from llama.cpp 3 | Quirks: 4 | * Its not exactly alike since this port is designed around programmatic I/O 5 | * Input is always echoed if on, so it should be turned off when using "input()" 6 | * The first antiprompt should be the userprompt like "\nUser:", 7 | because its added when n_predict is reached (aka generation ended prematurely) 8 | * n_predict can be set to -1 for unlimited length responses (or just a really high value) 9 | * Instruction mode adds its own antiprompt. 10 | You should also still be feeding the model with a "primer" prompt that 11 | shows it the expected format. 12 | """ 13 | import sys 14 | from time import time 15 | from os import cpu_count 16 | 17 | import llama_cpp 18 | from common import GptParams, gpt_params_parse, gpt_random_prompt 19 | 20 | ANSI_COLOR_RESET = "\x1b[0m" 21 | ANSI_COLOR_YELLOW = "\x1b[33m" 22 | ANSI_BOLD = "\x1b[1m" 23 | ANSI_COLOR_GREEN = "\x1b[32m" 24 | 25 | CONSOLE_COLOR_DEFAULT = ANSI_COLOR_RESET 26 | CONSOLE_COLOR_PROMPT = ANSI_COLOR_YELLOW 27 | CONSOLE_COLOR_USER_INPUT = ANSI_BOLD + ANSI_COLOR_GREEN 28 | 29 | # Iterative search 30 | # Actively searches and prevents a pattern from being returned 31 | class IterSearch: 32 | def __init__(self, pattern): 33 | self.pattern = list(pattern) 34 | self.buffer = [] 35 | 36 | def __call__(self, char): 37 | self.buffer += [char] 38 | 39 | if (self.pattern[:len(self.buffer)] == self.buffer): 40 | if (len(self.buffer) >= len(self.pattern)): 41 | self.buffer.clear() 42 | return [] 43 | 44 | _tmp = self.buffer[:] 45 | self.buffer.clear() 46 | return _tmp 47 | 48 | # A LLaMA interactive session 49 | class LLaMAInteract: 50 | def __init__(self, params: GptParams) -> None: 51 | # input args 52 | self.params = params 53 | 54 | if (self.params.perplexity): 55 | raise NotImplementedError("""************ 56 | please use the 'perplexity' tool for perplexity calculations 57 | ************""") 58 | 59 | if (self.params.embedding): 60 | raise NotImplementedError("""************ 61 | please use the 'embedding' tool for embedding calculations 62 | ************""") 63 | 64 | if (self.params.n_ctx > 2048): 65 | print(f"""warning: model does not support \ 66 | context sizes greater than 2048 tokens ({self.params.n_ctx} \ 67 | specified) expect poor results""", file=sys.stderr) 68 | 69 | if (self.params.seed <= 0): 70 | self.params.seed = int(time()) 71 | 72 | print(f"seed = {self.params.seed}", file=sys.stderr) 73 | 74 | if (self.params.random_prompt): 75 | self.params.prompt = gpt_random_prompt(self.params.seed) 76 | 77 | # runtime args 78 | self.input_consumed = 0 79 | self.n_past = 0 80 | self.first_antiprompt = [] 81 | self.remaining_tokens = self.params.n_predict 82 | self.output_echo = self.params.input_echo 83 | 84 | # model load 85 | self.lparams = llama_cpp.llama_context_default_params() 86 | self.lparams.n_ctx = self.params.n_ctx 87 | self.lparams.n_parts = self.params.n_parts 88 | self.lparams.seed = self.params.seed 89 | self.lparams.memory_f16 = self.params.memory_f16 90 | self.lparams.use_mlock = self.params.use_mlock 91 | self.lparams.use_mmap = self.params.use_mmap 92 | 93 | self.ctx = llama_cpp.llama_init_from_file(self.params.model.encode("utf8"), self.lparams) 94 | if (not self.ctx): 95 | raise RuntimeError(f"error: failed to load model '{self.params.model}'") 96 | 97 | print(file=sys.stderr) 98 | print(f"system_info: n_threads = {self.params.n_threads} / {cpu_count()} \ 99 | | {llama_cpp.llama_print_system_info().decode('utf8')}", file=sys.stderr) 100 | 101 | # determine the required inference memory per token: 102 | if (self.params.mem_test): 103 | tmp = [0, 1, 2, 3] 104 | llama_cpp.llama_eval(self.ctx, (llama_cpp.c_int * len(tmp))(*tmp), len(tmp), 0, self.n_threads) 105 | llama_cpp.llama_print_timings(self.ctx) 106 | self.exit() 107 | return 108 | 109 | # create internal context 110 | self.n_ctx = llama_cpp.llama_n_ctx(self.ctx) 111 | 112 | # Add a space in front of the first character to match OG llama tokenizer behavior 113 | self.params.prompt = " " + self.params.prompt 114 | 115 | # Load prompt file 116 | if (self.params.file): 117 | with open(self.params.file) as f: 118 | self.params.prompt = f.read() 119 | 120 | # tokenize the prompt 121 | self.embd = [] 122 | self.embd_inp = self._tokenize(self.params.prompt) 123 | 124 | if (len(self.embd_inp) > self.params.n_ctx - 4): 125 | raise RuntimeError(f"error: prompt is too long ({len(self.embd_inp)} tokens, max {self.params.n_ctx - 4})") 126 | 127 | # number of tokens to keep when resetting context 128 | if (self.params.n_keep < 0 or self.params.n_keep > len(self.embd_inp) or self.params.instruct): 129 | self.params.n_keep = len(self.embd_inp) 130 | 131 | self.inp_prefix = self._tokenize(self.params.instruct_inp_prefix) 132 | self.inp_suffix = self._tokenize(self.params.instruct_inp_suffix, False) 133 | 134 | # in instruct mode, we inject a prefix and a suffix to each input by the user 135 | if (self.params.instruct): 136 | self.params.interactive_start = True 137 | _ptn = self._tokenize(self.params.instruct_inp_prefix.strip(), False) 138 | self.first_antiprompt.append(_ptn) 139 | self.antiecho = IterSearch(_ptn) 140 | 141 | # enable interactive mode if reverse prompt or interactive start is specified 142 | if (len(self.params.antiprompt) != 0 or self.params.interactive_start): 143 | self.params.interactive = True 144 | 145 | # determine newline token 146 | self.llama_token_newline = self._tokenize("\n", False) 147 | 148 | if (self.params.verbose_prompt): 149 | print(f""" 150 | prompt: '{self.params.prompt}' 151 | number of tokens in prompt = {len(self.embd_inp)}""", file=sys.stderr) 152 | 153 | for i in range(len(self.embd_inp)): 154 | print(f"{self.embd_inp[i]} -> '{llama_cpp.llama_token_to_str(self.ctx, self.embd_inp[i])}'", file=sys.stderr) 155 | 156 | if (self.params.n_keep > 0): 157 | print("static prompt based on n_keep: '") 158 | for i in range(self.params.n_keep): 159 | print(llama_cpp.llama_token_to_str(self.ctx, self.embd_inp[i]), file=sys.stderr) 160 | print("'", file=sys.stderr) 161 | print(file=sys.stderr) 162 | 163 | if (self.params.interactive): 164 | print("interactive mode on.", file=sys.stderr) 165 | 166 | if (len(self.params.antiprompt) > 0): 167 | for antiprompt in self.params.antiprompt: 168 | print(f"Reverse prompt: '{antiprompt}'", file=sys.stderr) 169 | 170 | if len(self.params.input_prefix) > 0: 171 | print(f"Input prefix: '{self.params.input_prefix}'", file=sys.stderr) 172 | 173 | print(f"""sampling: temp = {self.params.temp},\ 174 | top_k = {self.params.top_k},\ 175 | top_p = {self.params.top_p},\ 176 | repeat_last_n = {self.params.repeat_last_n},\ 177 | repeat_penalty = {self.params.repeat_penalty} 178 | 179 | generate: n_ctx = {self.n_ctx}, \ 180 | n_batch = {self.params.n_batch}, \ 181 | n_predict = {self.params.n_predict}, \ 182 | n_keep = {self.params.n_keep} 183 | """, file=sys.stderr) 184 | 185 | # determine antiprompt tokens 186 | for i in self.params.antiprompt: 187 | self.first_antiprompt.append(self._tokenize(i, False)) 188 | 189 | self.last_n_tokens = [0]*self.n_ctx #TODO: deque doesnt support slices 190 | 191 | if (params.interactive): 192 | print("""== Running in interactive mode. == 193 | - Press Ctrl+C to interject at any time. 194 | - Press Return to return control to LLaMa. 195 | - If you want to submit another line, end your input in '\\'. 196 | 197 | """, file=sys.stderr) 198 | self.set_color(CONSOLE_COLOR_PROMPT) 199 | 200 | # tokenize a prompt 201 | def _tokenize(self, prompt, bos=True): 202 | _arr = (llama_cpp.llama_token * (len(prompt) + 1))() 203 | _n = llama_cpp.llama_tokenize(self.ctx, prompt.encode("utf8"), _arr, len(_arr), bos) 204 | return _arr[:_n] 205 | 206 | def use_antiprompt(self): 207 | return len(self.first_antiprompt) > 0 208 | 209 | def set_color(self, c): 210 | if (self.params.use_color): 211 | print(c, end="") 212 | 213 | # generate tokens 214 | def generate(self): 215 | while self.remaining_tokens > 0 or self.params.interactive: 216 | # predict 217 | if len(self.embd) > 0: 218 | # infinite text generation via context swapping 219 | # if we run out of context: 220 | # - take the n_keep first tokens from the original prompt (via n_past) 221 | # - take half of the last (n_ctx - n_keep) tokens and recompute the logits in a batch 222 | if (self.n_past + len(self.embd) > self.n_ctx): 223 | n_left = self.n_past - self.params.n_keep 224 | self.n_past = self.params.n_keep 225 | 226 | # insert n_left/2 tokens at the start of embd from last_n_tokens 227 | _insert = self.last_n_tokens[ 228 | self.n_ctx - int(n_left/2) - len(self.embd):-len(self.embd) 229 | ] 230 | self.embd = _insert + self.embd 231 | 232 | if (llama_cpp.llama_eval( 233 | self.ctx, (llama_cpp.llama_token * len(self.embd))(*self.embd), len(self.embd), self.n_past, self.params.n_threads 234 | ) != 0): 235 | raise Exception("Failed to llama_eval!") 236 | 237 | self.n_past += len(self.embd) 238 | self.embd = [] 239 | if len(self.embd_inp) <= self.input_consumed: 240 | # out of user input, sample next token 241 | 242 | if (self.params.ignore_eos): 243 | logits = llama_cpp.llama_get_logits(self.ctx) 244 | logits[llama_cpp.llama_token_eos()] = llama_cpp.c_float(0) 245 | 246 | _arr = self.last_n_tokens[-min(self.params.repeat_last_n, self.n_past):] 247 | id = llama_cpp.llama_sample_top_p_top_k( 248 | self.ctx, 249 | (llama_cpp.llama_token * len(_arr))(*_arr), 250 | len(_arr), 251 | self.params.top_k, 252 | self.params.top_p, 253 | self.params.temp, 254 | self.params.repeat_penalty, 255 | ) 256 | self.last_n_tokens.pop(0) 257 | self.last_n_tokens.append(id) 258 | 259 | # replace end of text token with newline token when in interactive mode 260 | if (id == llama_cpp.llama_token_eos() and self.params.interactive and not self.params.instruct): 261 | id = self.llama_token_newline[0] 262 | if (self.use_antiprompt()): 263 | # tokenize and inject first reverse prompt 264 | self.embd_inp += self.first_antiprompt[0] 265 | 266 | # add it to the context 267 | self.embd.append(id) 268 | 269 | # echo this to console 270 | self.output_echo = True 271 | 272 | # decrement remaining sampling budget 273 | self.remaining_tokens -= 1 274 | else: 275 | # output to console if input echo is on 276 | self.output_echo = self.params.input_echo 277 | 278 | # some user input remains from prompt or interaction, forward it to processing 279 | while len(self.embd_inp) > self.input_consumed: 280 | self.embd.append(self.embd_inp[self.input_consumed]) 281 | self.last_n_tokens.pop(0) 282 | self.last_n_tokens.append(self.embd_inp[self.input_consumed]) 283 | self.input_consumed += 1 284 | if len(self.embd) >= self.params.n_batch: 285 | break 286 | 287 | # display tokens 288 | if self.output_echo: 289 | for id in self.embd: 290 | if self.params.instruct: 291 | for r in self.antiecho(id): 292 | yield r 293 | else: 294 | yield id 295 | 296 | # reset color to default if we there is no pending user input 297 | if (self.params.input_echo and len(self.embd_inp) == self.input_consumed): 298 | self.set_color(CONSOLE_COLOR_DEFAULT) 299 | 300 | if (self.params.interactive and len(self.embd_inp) <= self.input_consumed): 301 | # if antiprompt is present, stop 302 | if (self.use_antiprompt()): 303 | if True in [ 304 | i == self.last_n_tokens[-len(i):] 305 | for i in self.first_antiprompt 306 | ]: 307 | break 308 | 309 | # if we are using instruction mode, and we have processed the initial prompt 310 | if (self.params.interactive_start): 311 | break 312 | 313 | # end of text token 314 | if len(self.embd) > 0 and self.embd[-1] == llama_cpp.llama_token_eos(): 315 | if (not self.params.instruct): 316 | for i in " [end of text]\n": 317 | yield i 318 | break 319 | 320 | # respect n_predict even if antiprompt is present 321 | if (self.params.interactive and self.remaining_tokens <= 0 and self.params.n_predict != -1): 322 | # If we arent in instruction mode, fix the current generation by appending the antiprompt. 323 | # Makes it so if chat ends prematurely you dont append the AI's text etc. 324 | if not self.params.instruct: 325 | self.embd_inp += self.first_antiprompt[0] 326 | self.n_remain = self.params.n_predict 327 | break 328 | 329 | self.params.interactive_start = False 330 | 331 | def __enter__(self): 332 | return self 333 | 334 | def __exit__(self, type, value, tb): 335 | self.exit() 336 | 337 | def exit(self): 338 | llama_cpp.llama_free(self.ctx) 339 | self.set_color(CONSOLE_COLOR_DEFAULT) 340 | 341 | # return past text 342 | def past(self): 343 | for id in self.last_n_tokens[-self.n_past:]: 344 | yield llama_cpp.llama_token_to_str(self.ctx, id).decode("utf-8") 345 | 346 | # write input 347 | def input(self, prompt: str): 348 | if (self.params.instruct and self.last_n_tokens[-len(self.inp_prefix):] != self.inp_prefix): 349 | self.embd_inp += self.inp_prefix 350 | self.embd_inp += self._tokenize(prompt) 351 | if (self.params.instruct): 352 | self.embd_inp += self.inp_suffix 353 | 354 | # write output 355 | def output(self): 356 | self.remaining_tokens = self.params.n_predict 357 | for id in self.generate(): 358 | yield llama_cpp.llama_token_to_str(self.ctx, id).decode("utf-8") 359 | 360 | # read user input 361 | def read_input(self): 362 | out = "" 363 | while (t := input()).endswith("\\"): 364 | out += t[:-1] + "\n" 365 | return out + t + "\n" 366 | 367 | # interactive mode 368 | def interact(self): 369 | for i in self.output(): 370 | print(i,end="",flush=True) 371 | self.params.input_echo = False 372 | 373 | while self.params.interactive: 374 | self.set_color(CONSOLE_COLOR_USER_INPUT) 375 | if (self.params.instruct): 376 | print('\n> ', end="") 377 | self.input(self.read_input()) 378 | else: 379 | print(self.params.input_prefix, end="") 380 | self.input(f"{self.params.input_prefix}{self.read_input()}{self.params.output_postfix}") 381 | print(self.params.output_postfix,end="") 382 | self.set_color(CONSOLE_COLOR_DEFAULT) 383 | 384 | try: 385 | for i in self.output(): 386 | print(i,end="",flush=True) 387 | except KeyboardInterrupt: 388 | self.set_color(CONSOLE_COLOR_DEFAULT) 389 | if not self.params.instruct: 390 | print(self.params.fix_prefix,end="") 391 | self.input(self.params.fix_prefix) 392 | 393 | if __name__ == "__main__": 394 | from datetime import datetime 395 | 396 | USER_NAME="User" 397 | AI_NAME="ChatLLaMa" 398 | 399 | time_now = datetime.now() 400 | prompt = f"""Text transcript of a never ending dialog, where {USER_NAME} interacts with an AI assistant named {AI_NAME}. 401 | {AI_NAME} is helpful, kind, honest, friendly, good at writing and never fails to answer {USER_NAME}’s requests immediately and with details and precision. 402 | There are no annotations like (30 seconds passed...) or (to himself), just what {USER_NAME} and {AI_NAME} say aloud to each other. 403 | The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long. 404 | The transcript only includes text, it does not include markup like HTML and Markdown. 405 | 406 | {USER_NAME}: Hello, {AI_NAME}! 407 | {AI_NAME}: Hello {USER_NAME}! How may I help you today? 408 | {USER_NAME}: What time is it? 409 | {AI_NAME}: It is {time_now.strftime("%H:%M")}. 410 | {USER_NAME}: What year is it? 411 | {AI_NAME}: We are in {time_now.strftime("%Y")}. 412 | {USER_NAME}: What is a cat? 413 | {AI_NAME}: A cat is a domestic species of small carnivorous mammal. It is the only domesticated species in the family Felidae. 414 | {USER_NAME}: Name a color. 415 | {AI_NAME}: Blue 416 | {USER_NAME}:""" 417 | args = gpt_params_parse() 418 | params = GptParams(**vars(args)) 419 | 420 | with LLaMAInteract(params) as m: 421 | m.interact() 422 | -------------------------------------------------------------------------------- /llama_cpp/llama.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import uuid 4 | import time 5 | import math 6 | import multiprocessing 7 | from typing import List, Optional, Union, Generator, Sequence, Iterator 8 | from collections import deque 9 | 10 | from . import llama_cpp 11 | from .llama_types import * 12 | 13 | 14 | class LlamaCache: 15 | """Cache for a llama.cpp model. 16 | 17 | NOTE: This implementation currently only tells the Llama class to avoid reprocessing bytes and continue from the last 18 | completion. It does not actually cache the results.""" 19 | 20 | pass 21 | 22 | 23 | class Llama: 24 | """High-level Python wrapper for a llama.cpp model.""" 25 | 26 | def __init__( 27 | self, 28 | model_path: str, 29 | # NOTE: These parameters are likely to change in the future. 30 | n_ctx: int = 512, 31 | n_parts: int = -1, 32 | seed: int = 1337, 33 | f16_kv: bool = True, 34 | logits_all: bool = False, 35 | vocab_only: bool = False, 36 | use_mmap: bool = True, 37 | use_mlock: bool = False, 38 | embedding: bool = False, 39 | n_threads: Optional[int] = None, 40 | n_batch: int = 8, 41 | last_n_tokens_size: int = 64, 42 | verbose: bool = True, 43 | ): 44 | """Load a llama.cpp model from `model_path`. 45 | 46 | Args: 47 | model_path: Path to the model. 48 | n_ctx: Maximum context size. 49 | n_parts: Number of parts to split the model into. If -1, the number of parts is automatically determined. 50 | seed: Random seed. 0 for random. 51 | f16_kv: Use half-precision for key/value cache. 52 | logits_all: Return logits for all tokens, not just the last token. 53 | vocab_only: Only load the vocabulary no weights. 54 | use_mmap: Use mmap if possible. 55 | use_mlock: Force the system to keep the model in RAM. 56 | embedding: Embedding mode only. 57 | n_threads: Number of threads to use. If None, the number of threads is automatically determined. 58 | n_batch: Maximum number of prompt tokens to batch together when calling llama_eval. 59 | last_n_tokens_size: Maximum number of tokens to keep in the last_n_tokens deque. 60 | verbose: Print verbose output to stderr. 61 | 62 | Raises: 63 | ValueError: If the model path does not exist. 64 | 65 | Returns: 66 | A Llama instance. 67 | """ 68 | self.verbose = verbose 69 | self.model_path = model_path 70 | 71 | self.params = llama_cpp.llama_context_default_params() 72 | self.params.n_ctx = n_ctx 73 | self.params.n_parts = n_parts 74 | self.params.seed = seed 75 | self.params.f16_kv = f16_kv 76 | self.params.logits_all = logits_all 77 | self.params.vocab_only = vocab_only 78 | self.params.use_mmap = use_mmap 79 | self.params.use_mlock = use_mlock 80 | self.params.embedding = embedding 81 | 82 | self.last_n_tokens_size = last_n_tokens_size 83 | self.last_n_tokens_data = deque( 84 | [llama_cpp.llama_token(0)] * self.last_n_tokens_size, 85 | maxlen=self.last_n_tokens_size, 86 | ) 87 | self.tokens_consumed = 0 88 | self.tokens: List[llama_cpp.llama_token] = [] 89 | self.n_batch = min(n_ctx, n_batch) 90 | self.n_tokens = 0 91 | self.n_past = 0 92 | self.all_logits: List[List[float]] = [] # TODO: Use an array instead of a list. 93 | 94 | ### HACK: This is a hack to work around the fact that the llama.cpp API does not yet support 95 | ### saving and restoring state, this allows us to continue a completion if the last 96 | ### completion_bytes is a prefix to the prompt passed in. However this is actually incorrect 97 | ### because it does not take into account stop tokens which have been processed by the model. 98 | self._completion_bytes: List[bytes] = [] 99 | self._cache: Optional[LlamaCache] = None 100 | ### 101 | 102 | self.n_threads = n_threads or max(multiprocessing.cpu_count() // 2, 1) 103 | 104 | if not os.path.exists(model_path): 105 | raise ValueError(f"Model path does not exist: {model_path}") 106 | 107 | self.ctx = llama_cpp.llama_init_from_file( 108 | self.model_path.encode("utf-8"), self.params 109 | ) 110 | 111 | if self.verbose: 112 | print(llama_cpp.llama_print_system_info().decode("utf-8"), file=sys.stderr) 113 | 114 | def tokenize(self, text: bytes) -> List[llama_cpp.llama_token]: 115 | """Tokenize a string. 116 | 117 | Args: 118 | text: The utf-8 encoded string to tokenize. 119 | 120 | Raises: 121 | RuntimeError: If the tokenization failed. 122 | 123 | Returns: 124 | A list of tokens. 125 | """ 126 | assert self.ctx is not None 127 | n_ctx = llama_cpp.llama_n_ctx(self.ctx) 128 | tokens = (llama_cpp.llama_token * int(n_ctx))() 129 | n_tokens = llama_cpp.llama_tokenize( 130 | self.ctx, 131 | text, 132 | tokens, 133 | n_ctx, 134 | llama_cpp.c_bool(True), 135 | ) 136 | if int(n_tokens) < 0: 137 | raise RuntimeError(f'Failed to tokenize: text="{text}" n_tokens={n_tokens}') 138 | return list(tokens[:n_tokens]) 139 | 140 | def detokenize(self, tokens: List[llama_cpp.llama_token]) -> bytes: 141 | """Detokenize a list of tokens. 142 | 143 | Args: 144 | tokens: The list of tokens to detokenize. 145 | 146 | Returns: 147 | The detokenized string. 148 | """ 149 | assert self.ctx is not None 150 | output = b"" 151 | for token in tokens: 152 | output += llama_cpp.llama_token_to_str(self.ctx, token) 153 | return output 154 | 155 | def set_cache(self, cache: Optional[LlamaCache]): 156 | """Set the cache. 157 | 158 | Args: 159 | cache: The cache to set. 160 | """ 161 | self._cache = cache 162 | 163 | def reset(self): 164 | """Reset the model state.""" 165 | self.last_n_tokens_data.extend( 166 | [llama_cpp.llama_token(0)] * self.last_n_tokens_size 167 | ) 168 | self.tokens_consumed = 0 169 | self.tokens.clear() 170 | self.n_tokens = 0 171 | self.n_past = 0 172 | self.all_logits.clear() 173 | 174 | def eval(self, tokens: Sequence[llama_cpp.llama_token]): 175 | """Evaluate a list of tokens. 176 | 177 | Args: 178 | tokens: The list of tokens to evaluate. 179 | """ 180 | assert self.ctx is not None 181 | n_ctx = int(llama_cpp.llama_n_ctx(self.ctx)) 182 | for i in range(0, len(tokens), self.n_batch): 183 | batch = tokens[i : min(len(tokens), i + self.n_batch)] 184 | self.n_past = min(n_ctx - len(batch), self.tokens_consumed) 185 | self.n_tokens = len(batch) 186 | return_code = llama_cpp.llama_eval( 187 | ctx=self.ctx, 188 | tokens=(llama_cpp.llama_token * len(batch))(*batch), 189 | n_tokens=llama_cpp.c_int(self.n_tokens), 190 | n_past=llama_cpp.c_int(self.n_past), 191 | n_threads=llama_cpp.c_int(self.n_threads), 192 | ) 193 | if int(return_code) != 0: 194 | raise RuntimeError(f"llama_eval returned {return_code}") 195 | self.tokens.extend(batch) 196 | self.last_n_tokens_data.extend(batch) 197 | self.tokens_consumed += len(batch) 198 | if self.params.logits_all: 199 | self.all_logits.extend(self._logits()) 200 | 201 | def _logits(self) -> List[List[float]]: 202 | """Return the logits from the last call to llama_eval.""" 203 | assert self.ctx is not None 204 | n_vocab = llama_cpp.llama_n_vocab(self.ctx) 205 | cols = int(n_vocab) 206 | rows = self.n_tokens if self.params.logits_all else 1 207 | logits_view = llama_cpp.llama_get_logits(self.ctx) 208 | logits = [[logits_view[i * cols + j] for j in range(cols)] for i in range(rows)] 209 | return logits 210 | 211 | def sample( 212 | self, 213 | top_k: int, 214 | top_p: float, 215 | temp: float, 216 | repeat_penalty: float, 217 | ): 218 | """Sample a token from the model. 219 | 220 | Args: 221 | top_k: The top-k sampling parameter. 222 | top_p: The top-p sampling parameter. 223 | temp: The temperature parameter. 224 | repeat_penalty: The repeat penalty parameter. 225 | 226 | Returns: 227 | The sampled token. 228 | """ 229 | assert self.ctx is not None 230 | return llama_cpp.llama_sample_top_p_top_k( 231 | ctx=self.ctx, 232 | last_n_tokens_data=(llama_cpp.llama_token * self.last_n_tokens_size)( 233 | *self.last_n_tokens_data 234 | ), 235 | last_n_tokens_size=llama_cpp.c_int(self.last_n_tokens_size), 236 | top_k=llama_cpp.c_int(top_k), 237 | top_p=llama_cpp.c_float(top_p), 238 | temp=llama_cpp.c_float(temp), 239 | repeat_penalty=llama_cpp.c_float(repeat_penalty), 240 | ) 241 | 242 | def generate( 243 | self, 244 | tokens: Sequence[llama_cpp.llama_token], 245 | top_k: int, 246 | top_p: float, 247 | temp: float, 248 | repeat_penalty: float, 249 | reset: bool = True, 250 | ) -> Generator[ 251 | llama_cpp.llama_token, Optional[Sequence[llama_cpp.llama_token]], None 252 | ]: 253 | """Create a generator of tokens from a prompt. 254 | 255 | Examples: 256 | >>> llama = Llama("models/ggml-7b.bin") 257 | >>> tokens = llama.tokenize(b"Hello, world!") 258 | >>> for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.1): 259 | ... print(llama.detokenize([token])) 260 | 261 | Args: 262 | tokens: The prompt tokens. 263 | top_k: The top-k sampling parameter. 264 | top_p: The top-p sampling parameter. 265 | temp: The temperature parameter. 266 | repeat_penalty: The repeat penalty parameter. 267 | reset: Whether to reset the model state. 268 | 269 | Yields: 270 | The generated tokens. 271 | """ 272 | assert self.ctx is not None 273 | ### HACK 274 | if ( 275 | reset 276 | and self._cache 277 | and len(self.tokens) > 0 278 | and self.tokens == tokens[: len(self.tokens)] 279 | ): 280 | if self.verbose: 281 | print("generate cache hit", file=sys.stderr) 282 | reset = False 283 | tokens = tokens[len(self.tokens) :] 284 | ### 285 | if reset: 286 | self.reset() 287 | while True: 288 | self.eval(tokens) 289 | token = self.sample( 290 | top_k=top_k, 291 | top_p=top_p, 292 | temp=temp, 293 | repeat_penalty=repeat_penalty, 294 | ) 295 | tokens_or_none = yield token 296 | tokens = [token] 297 | if tokens_or_none is not None: 298 | tokens.extend(tokens_or_none) 299 | 300 | def create_embedding(self, input: str) -> Embedding: 301 | """Embed a string. 302 | 303 | Args: 304 | input: The utf-8 encoded string to embed. 305 | 306 | Returns: 307 | An embedding object. 308 | """ 309 | assert self.ctx is not None 310 | 311 | if self.params.embedding == False: 312 | raise RuntimeError( 313 | "Llama model must be created with embedding=True to call this method" 314 | ) 315 | 316 | if self.verbose: 317 | llama_cpp.llama_reset_timings(self.ctx) 318 | 319 | tokens = self.tokenize(input.encode("utf-8")) 320 | self.reset() 321 | self.eval(tokens) 322 | n_tokens = len(tokens) 323 | embedding = llama_cpp.llama_get_embeddings(self.ctx)[ 324 | : llama_cpp.llama_n_embd(self.ctx) 325 | ] 326 | 327 | if self.verbose: 328 | llama_cpp.llama_print_timings(self.ctx) 329 | 330 | return { 331 | "object": "list", 332 | "data": [ 333 | { 334 | "object": "embedding", 335 | "embedding": embedding, 336 | "index": 0, 337 | } 338 | ], 339 | "model": self.model_path, 340 | "usage": { 341 | "prompt_tokens": n_tokens, 342 | "total_tokens": n_tokens, 343 | }, 344 | } 345 | 346 | def embed(self, input: str) -> List[float]: 347 | """Embed a string. 348 | 349 | Args: 350 | input: The utf-8 encoded string to embed. 351 | 352 | Returns: 353 | A list of embeddings 354 | """ 355 | return list(map(float, self.create_embedding(input)["data"][0]["embedding"])) 356 | 357 | def _create_completion( 358 | self, 359 | prompt: str, 360 | suffix: Optional[str] = None, 361 | max_tokens: int = 16, 362 | temperature: float = 0.8, 363 | top_p: float = 0.95, 364 | logprobs: Optional[int] = None, 365 | echo: bool = False, 366 | stop: Optional[List[str]] = [], 367 | repeat_penalty: float = 1.1, 368 | top_k: int = 40, 369 | stream: bool = False, 370 | ) -> Union[Iterator[Completion], Iterator[CompletionChunk]]: 371 | assert self.ctx is not None 372 | completion_id: str = f"cmpl-{str(uuid.uuid4())}" 373 | created: int = int(time.time()) 374 | completion_tokens: List[llama_cpp.llama_token] = [] 375 | # Add blank space to start of prompt to match OG llama tokenizer 376 | prompt_tokens: List[llama_cpp.llama_token] = self.tokenize( 377 | b" " + prompt.encode("utf-8") 378 | ) 379 | text: bytes = b"" 380 | returned_characters: int = 0 381 | stop = stop if stop is not None else [] 382 | 383 | if self.verbose: 384 | llama_cpp.llama_reset_timings(self.ctx) 385 | 386 | if len(prompt_tokens) + max_tokens > int(llama_cpp.llama_n_ctx(self.ctx)): 387 | raise ValueError( 388 | f"Requested tokens exceed context window of {llama_cpp.llama_n_ctx(self.ctx)}" 389 | ) 390 | 391 | if stop != []: 392 | stop_sequences = [s.encode("utf-8") for s in stop] 393 | else: 394 | stop_sequences = [] 395 | 396 | if logprobs is not None and self.params.logits_all is False: 397 | raise ValueError( 398 | "logprobs is not supported for models created with logits_all=False" 399 | ) 400 | 401 | ### HACK 402 | reset: bool = True 403 | _prompt: bytes = prompt.encode("utf-8") 404 | _completion: bytes = b"".join(self._completion_bytes) 405 | if len(_completion) and self._cache and _prompt.startswith(_completion): 406 | if self.verbose: 407 | print("completion cache hit", file=sys.stderr) 408 | reset = False 409 | _prompt = _prompt[len(_completion) :] 410 | prompt_tokens = self.tokenize(b" " + _prompt) 411 | self._completion_bytes.append(_prompt) 412 | else: 413 | self._completion_bytes = [prompt.encode("utf-8")] 414 | ### 415 | 416 | finish_reason = "length" 417 | for token in self.generate( 418 | prompt_tokens, 419 | top_k=top_k, 420 | top_p=top_p, 421 | temp=temperature, 422 | repeat_penalty=repeat_penalty, 423 | reset=reset, 424 | ): 425 | if token == llama_cpp.llama_token_eos(): 426 | text = self.detokenize(completion_tokens) 427 | finish_reason = "stop" 428 | break 429 | completion_tokens.append(token) 430 | 431 | all_text = self.detokenize(completion_tokens) 432 | any_stop = [s for s in stop_sequences if s in all_text] 433 | if len(any_stop) > 0: 434 | first_stop = any_stop[0] 435 | text = all_text[: all_text.index(first_stop)] 436 | finish_reason = "stop" 437 | break 438 | 439 | if stream: 440 | start = returned_characters 441 | longest = 0 442 | # We want to avoid yielding any characters from 443 | # the generated text if they are part of a stop 444 | # sequence. 445 | for s in stop_sequences: 446 | for i in range(len(s), 0, -1): 447 | if all_text.endswith(s[:i]): 448 | if i > longest: 449 | longest = i 450 | break 451 | text = all_text[: len(all_text) - longest] 452 | returned_characters += len(text[start:]) 453 | ### HACK 454 | self._completion_bytes.append(text[start:]) 455 | ### 456 | yield { 457 | "id": completion_id, 458 | "object": "text_completion", 459 | "created": created, 460 | "model": self.model_path, 461 | "choices": [ 462 | { 463 | "text": text[start:].decode("utf-8"), 464 | "index": 0, 465 | "logprobs": None, 466 | "finish_reason": None, 467 | } 468 | ], 469 | } 470 | 471 | if len(completion_tokens) >= max_tokens: 472 | text = self.detokenize(completion_tokens) 473 | finish_reason = "length" 474 | break 475 | 476 | if stream: 477 | ### HACK 478 | self._completion_bytes.append(text[returned_characters:]) 479 | ### 480 | yield { 481 | "id": completion_id, 482 | "object": "text_completion", 483 | "created": created, 484 | "model": self.model_path, 485 | "choices": [ 486 | { 487 | "text": text[returned_characters:].decode("utf-8"), 488 | "index": 0, 489 | "logprobs": None, 490 | "finish_reason": finish_reason, 491 | } 492 | ], 493 | } 494 | return 495 | 496 | ### HACK 497 | self._completion_bytes.append(text) 498 | ### 499 | text_str = text.decode("utf-8") 500 | 501 | if echo: 502 | text_str = prompt + text_str 503 | 504 | if suffix is not None: 505 | text_str = text_str + suffix 506 | 507 | logprobs_or_none: Optional[CompletionLogprobs] = None 508 | if logprobs is not None: 509 | text_offset = 0 510 | text_offsets: List[int] = [] 511 | token_logprobs: List[float] = [] 512 | tokens: List[str] = [] 513 | top_logprobs: List[Dict[str, float]] = [] 514 | 515 | all_tokens = prompt_tokens + completion_tokens 516 | all_token_strs = [ 517 | self.detokenize([token]).decode("utf-8") for token in all_tokens 518 | ] 519 | all_logprobs = [ 520 | [Llama.logit_to_logprob(logit) for logit in row] 521 | for row in self.all_logits 522 | ] 523 | for token, token_str, logprobs_token in zip( 524 | all_tokens, all_token_strs, all_logprobs 525 | ): 526 | text_offsets.append(text_offset) 527 | text_offset += len(token_str) 528 | tokens.append(token_str) 529 | sorted_logprobs = list( 530 | sorted( 531 | zip(logprobs_token, range(len(logprobs_token))), reverse=True 532 | ) 533 | ) 534 | token_logprobs.append(sorted_logprobs[int(token)][0]) 535 | top_logprob = { 536 | self.detokenize([llama_cpp.llama_token(i)]).decode("utf-8"): logprob 537 | for logprob, i in sorted_logprobs[:logprobs] 538 | } 539 | top_logprob.update({token_str: sorted_logprobs[int(token)][0]}) 540 | top_logprobs.append(top_logprob) 541 | logprobs_or_none = { 542 | "tokens": tokens, 543 | "text_offset": text_offsets, 544 | "token_logprobs": token_logprobs, 545 | "top_logprobs": top_logprobs, 546 | } 547 | 548 | if self.verbose: 549 | llama_cpp.llama_print_timings(self.ctx) 550 | 551 | yield { 552 | "id": completion_id, 553 | "object": "text_completion", 554 | "created": created, 555 | "model": self.model_path, 556 | "choices": [ 557 | { 558 | "text": text_str, 559 | "index": 0, 560 | "logprobs": logprobs_or_none, 561 | "finish_reason": finish_reason, 562 | } 563 | ], 564 | "usage": { 565 | "prompt_tokens": len(prompt_tokens), 566 | "completion_tokens": len(completion_tokens), 567 | "total_tokens": len(prompt_tokens) + len(completion_tokens), 568 | }, 569 | } 570 | 571 | def create_completion( 572 | self, 573 | prompt: str, 574 | suffix: Optional[str] = None, 575 | max_tokens: int = 128, 576 | temperature: float = 0.8, 577 | top_p: float = 0.95, 578 | logprobs: Optional[int] = None, 579 | echo: bool = False, 580 | stop: Optional[List[str]] = [], 581 | repeat_penalty: float = 1.1, 582 | top_k: int = 40, 583 | stream: bool = False, 584 | ) -> Union[Completion, Iterator[CompletionChunk]]: 585 | """Generate text from a prompt. 586 | 587 | Args: 588 | prompt: The prompt to generate text from. 589 | suffix: A suffix to append to the generated text. If None, no suffix is appended. 590 | max_tokens: The maximum number of tokens to generate. 591 | temperature: The temperature to use for sampling. 592 | top_p: The top-p value to use for sampling. 593 | logprobs: The number of logprobs to return. If None, no logprobs are returned. 594 | echo: Whether to echo the prompt. 595 | stop: A list of strings to stop generation when encountered. 596 | repeat_penalty: The penalty to apply to repeated tokens. 597 | top_k: The top-k value to use for sampling. 598 | stream: Whether to stream the results. 599 | 600 | Raises: 601 | ValueError: If the requested tokens exceed the context window. 602 | RuntimeError: If the prompt fails to tokenize or the model fails to evaluate the prompt. 603 | 604 | Returns: 605 | Response object containing the generated text. 606 | """ 607 | completion_or_chunks = self._create_completion( 608 | prompt=prompt, 609 | suffix=suffix, 610 | max_tokens=max_tokens, 611 | temperature=temperature, 612 | top_p=top_p, 613 | logprobs=logprobs, 614 | echo=echo, 615 | stop=stop, 616 | repeat_penalty=repeat_penalty, 617 | top_k=top_k, 618 | stream=stream, 619 | ) 620 | if stream: 621 | chunks: Iterator[CompletionChunk] = completion_or_chunks 622 | return chunks 623 | completion: Completion = next(completion_or_chunks) # type: ignore 624 | return completion 625 | 626 | def __call__( 627 | self, 628 | prompt: str, 629 | suffix: Optional[str] = None, 630 | max_tokens: int = 128, 631 | temperature: float = 0.8, 632 | top_p: float = 0.95, 633 | logprobs: Optional[int] = None, 634 | echo: bool = False, 635 | stop: Optional[List[str]] = [], 636 | repeat_penalty: float = 1.1, 637 | top_k: int = 40, 638 | stream: bool = False, 639 | ) -> Union[Completion, Iterator[CompletionChunk]]: 640 | """Generate text from a prompt. 641 | 642 | Args: 643 | prompt: The prompt to generate text from. 644 | suffix: A suffix to append to the generated text. If None, no suffix is appended. 645 | max_tokens: The maximum number of tokens to generate. 646 | temperature: The temperature to use for sampling. 647 | top_p: The top-p value to use for sampling. 648 | logprobs: The number of logprobs to return. If None, no logprobs are returned. 649 | echo: Whether to echo the prompt. 650 | stop: A list of strings to stop generation when encountered. 651 | repeat_penalty: The penalty to apply to repeated tokens. 652 | top_k: The top-k value to use for sampling. 653 | stream: Whether to stream the results. 654 | 655 | Raises: 656 | ValueError: If the requested tokens exceed the context window. 657 | RuntimeError: If the prompt fails to tokenize or the model fails to evaluate the prompt. 658 | 659 | Returns: 660 | Response object containing the generated text. 661 | """ 662 | return self.create_completion( 663 | prompt=prompt, 664 | suffix=suffix, 665 | max_tokens=max_tokens, 666 | temperature=temperature, 667 | top_p=top_p, 668 | logprobs=logprobs, 669 | echo=echo, 670 | stop=stop, 671 | repeat_penalty=repeat_penalty, 672 | top_k=top_k, 673 | stream=stream, 674 | ) 675 | 676 | def _convert_text_completion_to_chat( 677 | self, completion: Completion 678 | ) -> ChatCompletion: 679 | return { 680 | "id": "chat" + completion["id"], 681 | "object": "chat.completion", 682 | "created": completion["created"], 683 | "model": completion["model"], 684 | "choices": [ 685 | { 686 | "index": 0, 687 | "message": { 688 | "role": "assistant", 689 | "content": completion["choices"][0]["text"], 690 | }, 691 | "finish_reason": completion["choices"][0]["finish_reason"], 692 | } 693 | ], 694 | "usage": completion["usage"], 695 | } 696 | 697 | def _convert_text_completion_chunks_to_chat( 698 | self, 699 | chunks: Iterator[CompletionChunk], 700 | ) -> Iterator[ChatCompletionChunk]: 701 | for i, chunk in enumerate(chunks): 702 | if i == 0: 703 | yield { 704 | "id": "chat" + chunk["id"], 705 | "model": chunk["model"], 706 | "created": chunk["created"], 707 | "object": "chat.completion.chunk", 708 | "choices": [ 709 | { 710 | "index": 0, 711 | "delta": { 712 | "role": "assistant", 713 | }, 714 | "finish_reason": None, 715 | } 716 | ], 717 | } 718 | yield { 719 | "id": "chat" + chunk["id"], 720 | "model": chunk["model"], 721 | "created": chunk["created"], 722 | "object": "chat.completion.chunk", 723 | "choices": [ 724 | { 725 | "index": 0, 726 | "delta": { 727 | "content": chunk["choices"][0]["text"], 728 | }, 729 | "finish_reason": chunk["choices"][0]["finish_reason"], 730 | } 731 | ], 732 | } 733 | 734 | def create_chat_completion( 735 | self, 736 | messages: List[ChatCompletionMessage], 737 | temperature: float = 0.2, 738 | top_p: float = 0.95, 739 | top_k: int = 40, 740 | stream: bool = False, 741 | stop: Optional[List[str]] = [], 742 | max_tokens: int = 256, 743 | repeat_penalty: float = 1.1, 744 | ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: 745 | """Generate a chat completion from a list of messages. 746 | 747 | Args: 748 | messages: A list of messages to generate a response for. 749 | temperature: The temperature to use for sampling. 750 | top_p: The top-p value to use for sampling. 751 | top_k: The top-k value to use for sampling. 752 | stream: Whether to stream the results. 753 | stop: A list of strings to stop generation when encountered. 754 | max_tokens: The maximum number of tokens to generate. 755 | repeat_penalty: The penalty to apply to repeated tokens. 756 | 757 | Returns: 758 | Generated chat completion or a stream of chat completion chunks. 759 | """ 760 | stop = stop if stop is not None else [] 761 | chat_history = "".join( 762 | f'### {"Human" if message["role"] == "user" else "Assistant"}:{message["content"]}' 763 | for message in messages 764 | ) 765 | PROMPT = chat_history + "### Assistant:" 766 | PROMPT_STOP = ["### Assistant:", "### Human:"] 767 | completion_or_chunks = self( 768 | prompt=PROMPT, 769 | stop=PROMPT_STOP + stop, 770 | temperature=temperature, 771 | top_p=top_p, 772 | top_k=top_k, 773 | stream=stream, 774 | max_tokens=max_tokens, 775 | repeat_penalty=repeat_penalty, 776 | ) 777 | if stream: 778 | chunks: Iterator[CompletionChunk] = completion_or_chunks # type: ignore 779 | return self._convert_text_completion_chunks_to_chat(chunks) 780 | else: 781 | completion: Completion = completion_or_chunks # type: ignore 782 | return self._convert_text_completion_to_chat(completion) 783 | 784 | def __del__(self): 785 | if self.ctx is not None: 786 | llama_cpp.llama_free(self.ctx) 787 | self.ctx = None 788 | 789 | def __getstate__(self): 790 | return dict( 791 | verbose=self.verbose, 792 | model_path=self.model_path, 793 | n_ctx=self.params.n_ctx, 794 | n_parts=self.params.n_parts, 795 | seed=self.params.seed, 796 | f16_kv=self.params.f16_kv, 797 | logits_all=self.params.logits_all, 798 | vocab_only=self.params.vocab_only, 799 | use_mmap=self.params.use_mmap, 800 | use_mlock=self.params.use_mlock, 801 | embedding=self.params.embedding, 802 | last_n_tokens_size=self.last_n_tokens_size, 803 | n_batch=self.n_batch, 804 | n_threads=self.n_threads, 805 | ) 806 | 807 | def __setstate__(self, state): 808 | self.__init__( 809 | model_path=state["model_path"], 810 | n_ctx=state["n_ctx"], 811 | n_parts=state["n_parts"], 812 | seed=state["seed"], 813 | f16_kv=state["f16_kv"], 814 | logits_all=state["logits_all"], 815 | vocab_only=state["vocab_only"], 816 | use_mmap=state["use_mmap"], 817 | use_mlock=state["use_mlock"], 818 | embedding=state["embedding"], 819 | n_threads=state["n_threads"], 820 | n_batch=state["n_batch"], 821 | last_n_tokens_size=state["last_n_tokens_size"], 822 | verbose=state["verbose"], 823 | ) 824 | 825 | @staticmethod 826 | def token_eos() -> llama_cpp.llama_token: 827 | """Return the end-of-sequence token.""" 828 | return llama_cpp.llama_token_eos() 829 | 830 | @staticmethod 831 | def token_bos() -> llama_cpp.llama_token: 832 | """Return the beginning-of-sequence token.""" 833 | return llama_cpp.llama_token_bos() 834 | 835 | @staticmethod 836 | def logit_to_logprob(x: float) -> float: 837 | return math.log(1.0 + math.exp(x)) 838 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Poetry 1.4.1 and should not be changed by hand. 2 | 3 | [[package]] 4 | name = "attrs" 5 | version = "22.2.0" 6 | description = "Classes Without Boilerplate" 7 | category = "dev" 8 | optional = false 9 | python-versions = ">=3.6" 10 | files = [ 11 | {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, 12 | {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, 13 | ] 14 | 15 | [package.extras] 16 | cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] 17 | dev = ["attrs[docs,tests]"] 18 | docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] 19 | tests = ["attrs[tests-no-zope]", "zope.interface"] 20 | tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] 21 | 22 | [[package]] 23 | name = "black" 24 | version = "23.1.0" 25 | description = "The uncompromising code formatter." 26 | category = "dev" 27 | optional = false 28 | python-versions = ">=3.7" 29 | files = [ 30 | {file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"}, 31 | {file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"}, 32 | {file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"}, 33 | {file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"}, 34 | {file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"}, 35 | {file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"}, 36 | {file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"}, 37 | {file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"}, 38 | {file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"}, 39 | {file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"}, 40 | {file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"}, 41 | {file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"}, 42 | {file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"}, 43 | {file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"}, 44 | {file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"}, 45 | {file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"}, 46 | {file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"}, 47 | {file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"}, 48 | {file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"}, 49 | {file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"}, 50 | {file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"}, 51 | {file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"}, 52 | {file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"}, 53 | {file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"}, 54 | {file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"}, 55 | ] 56 | 57 | [package.dependencies] 58 | click = ">=8.0.0" 59 | mypy-extensions = ">=0.4.3" 60 | packaging = ">=22.0" 61 | pathspec = ">=0.9.0" 62 | platformdirs = ">=2" 63 | tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} 64 | typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} 65 | 66 | [package.extras] 67 | colorama = ["colorama (>=0.4.3)"] 68 | d = ["aiohttp (>=3.7.4)"] 69 | jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] 70 | uvloop = ["uvloop (>=0.15.2)"] 71 | 72 | [[package]] 73 | name = "bleach" 74 | version = "6.0.0" 75 | description = "An easy safelist-based HTML-sanitizing tool." 76 | category = "dev" 77 | optional = false 78 | python-versions = ">=3.7" 79 | files = [ 80 | {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, 81 | {file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"}, 82 | ] 83 | 84 | [package.dependencies] 85 | six = ">=1.9.0" 86 | webencodings = "*" 87 | 88 | [package.extras] 89 | css = ["tinycss2 (>=1.1.0,<1.2)"] 90 | 91 | [[package]] 92 | name = "certifi" 93 | version = "2022.12.7" 94 | description = "Python package for providing Mozilla's CA Bundle." 95 | category = "dev" 96 | optional = false 97 | python-versions = ">=3.6" 98 | files = [ 99 | {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, 100 | {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, 101 | ] 102 | 103 | [[package]] 104 | name = "cffi" 105 | version = "1.15.1" 106 | description = "Foreign Function Interface for Python calling C code." 107 | category = "dev" 108 | optional = false 109 | python-versions = "*" 110 | files = [ 111 | {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, 112 | {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, 113 | {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, 114 | {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, 115 | {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, 116 | {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, 117 | {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, 118 | {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, 119 | {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, 120 | {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, 121 | {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, 122 | {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, 123 | {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, 124 | {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, 125 | {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, 126 | {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, 127 | {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, 128 | {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, 129 | {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, 130 | {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, 131 | {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, 132 | {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, 133 | {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, 134 | {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, 135 | {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, 136 | {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, 137 | {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, 138 | {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, 139 | {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, 140 | {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, 141 | {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, 142 | {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, 143 | {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, 144 | {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, 145 | {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, 146 | {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, 147 | {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, 148 | {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, 149 | {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, 150 | {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, 151 | {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, 152 | {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, 153 | {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, 154 | {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, 155 | {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, 156 | {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, 157 | {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, 158 | {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, 159 | {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, 160 | {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, 161 | {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, 162 | {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, 163 | {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, 164 | {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, 165 | {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, 166 | {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, 167 | {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, 168 | {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, 169 | {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, 170 | {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, 171 | {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, 172 | {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, 173 | {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, 174 | {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, 175 | ] 176 | 177 | [package.dependencies] 178 | pycparser = "*" 179 | 180 | [[package]] 181 | name = "charset-normalizer" 182 | version = "3.1.0" 183 | description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." 184 | category = "dev" 185 | optional = false 186 | python-versions = ">=3.7.0" 187 | files = [ 188 | {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, 189 | {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, 190 | {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, 191 | {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, 192 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, 193 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, 194 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, 195 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, 196 | {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, 197 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, 198 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, 199 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, 200 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, 201 | {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, 202 | {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, 203 | {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, 204 | {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, 205 | {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, 206 | {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, 207 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, 208 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, 209 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, 210 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, 211 | {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, 212 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, 213 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, 214 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, 215 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, 216 | {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, 217 | {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, 218 | {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, 219 | {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, 220 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, 221 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, 222 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, 223 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, 224 | {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, 225 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, 226 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, 227 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, 228 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, 229 | {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, 230 | {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, 231 | {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, 232 | {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, 233 | {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, 234 | {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, 235 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, 236 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, 237 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, 238 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, 239 | {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, 240 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, 241 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, 242 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, 243 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, 244 | {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, 245 | {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, 246 | {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, 247 | {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, 248 | {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, 249 | {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, 250 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, 251 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, 252 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, 253 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, 254 | {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, 255 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, 256 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, 257 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, 258 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, 259 | {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, 260 | {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, 261 | {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, 262 | {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, 263 | ] 264 | 265 | [[package]] 266 | name = "click" 267 | version = "8.1.3" 268 | description = "Composable command line interface toolkit" 269 | category = "dev" 270 | optional = false 271 | python-versions = ">=3.7" 272 | files = [ 273 | {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, 274 | {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, 275 | ] 276 | 277 | [package.dependencies] 278 | colorama = {version = "*", markers = "platform_system == \"Windows\""} 279 | 280 | [[package]] 281 | name = "colorama" 282 | version = "0.4.6" 283 | description = "Cross-platform colored terminal text." 284 | category = "dev" 285 | optional = false 286 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" 287 | files = [ 288 | {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, 289 | {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, 290 | ] 291 | 292 | [[package]] 293 | name = "cryptography" 294 | version = "39.0.2" 295 | description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." 296 | category = "dev" 297 | optional = false 298 | python-versions = ">=3.6" 299 | files = [ 300 | {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:2725672bb53bb92dc7b4150d233cd4b8c59615cd8288d495eaa86db00d4e5c06"}, 301 | {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:23df8ca3f24699167daf3e23e51f7ba7334d504af63a94af468f468b975b7dd7"}, 302 | {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:eb40fe69cfc6f5cdab9a5ebd022131ba21453cf7b8a7fd3631f45bbf52bed612"}, 303 | {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc0521cce2c1d541634b19f3ac661d7a64f9555135e9d8af3980965be717fd4a"}, 304 | {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd394c7896ed7821a6d13b24657c6a34b6e2650bd84ae063cf11ccffa4f1a97"}, 305 | {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:e8a0772016feeb106efd28d4a328e77dc2edae84dfbac06061319fdb669ff828"}, 306 | {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8f35c17bd4faed2bc7797d2a66cbb4f986242ce2e30340ab832e5d99ae60e011"}, 307 | {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b49a88ff802e1993b7f749b1eeb31134f03c8d5c956e3c125c75558955cda536"}, 308 | {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c682e736513db7d04349b4f6693690170f95aac449c56f97415c6980edef5"}, 309 | {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d7d84a512a59f4412ca8549b01f94be4161c94efc598bf09d027d67826beddc0"}, 310 | {file = "cryptography-39.0.2-cp36-abi3-win32.whl", hash = "sha256:c43ac224aabcbf83a947eeb8b17eaf1547bce3767ee2d70093b461f31729a480"}, 311 | {file = "cryptography-39.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:788b3921d763ee35dfdb04248d0e3de11e3ca8eb22e2e48fef880c42e1f3c8f9"}, 312 | {file = "cryptography-39.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d15809e0dbdad486f4ad0979753518f47980020b7a34e9fc56e8be4f60702fac"}, 313 | {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:50cadb9b2f961757e712a9737ef33d89b8190c3ea34d0fb6675e00edbe35d074"}, 314 | {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:103e8f7155f3ce2ffa0049fe60169878d47a4364b277906386f8de21c9234aa1"}, 315 | {file = "cryptography-39.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6236a9610c912b129610eb1a274bdc1350b5df834d124fa84729ebeaf7da42c3"}, 316 | {file = "cryptography-39.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e944fe07b6f229f4c1a06a7ef906a19652bdd9fd54c761b0ff87e83ae7a30354"}, 317 | {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:35d658536b0a4117c885728d1a7032bdc9a5974722ae298d6c533755a6ee3915"}, 318 | {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:30b1d1bfd00f6fc80d11300a29f1d8ab2b8d9febb6ed4a38a76880ec564fae84"}, 319 | {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e029b844c21116564b8b61216befabca4b500e6816fa9f0ba49527653cae2108"}, 320 | {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fa507318e427169ade4e9eccef39e9011cdc19534f55ca2f36ec3f388c1f70f3"}, 321 | {file = "cryptography-39.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8bc0008ef798231fac03fe7d26e82d601d15bd16f3afaad1c6113771566570f3"}, 322 | {file = "cryptography-39.0.2.tar.gz", hash = "sha256:bc5b871e977c8ee5a1bbc42fa8d19bcc08baf0c51cbf1586b0e87a2694dde42f"}, 323 | ] 324 | 325 | [package.dependencies] 326 | cffi = ">=1.12" 327 | 328 | [package.extras] 329 | docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] 330 | docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] 331 | pep8test = ["black", "check-manifest", "mypy", "ruff", "types-pytz", "types-requests"] 332 | sdist = ["setuptools-rust (>=0.11.4)"] 333 | ssh = ["bcrypt (>=3.1.5)"] 334 | test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist", "pytz"] 335 | test-randomorder = ["pytest-randomly"] 336 | tox = ["tox"] 337 | 338 | [[package]] 339 | name = "docutils" 340 | version = "0.19" 341 | description = "Docutils -- Python Documentation Utilities" 342 | category = "dev" 343 | optional = false 344 | python-versions = ">=3.7" 345 | files = [ 346 | {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, 347 | {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, 348 | ] 349 | 350 | [[package]] 351 | name = "exceptiongroup" 352 | version = "1.1.1" 353 | description = "Backport of PEP 654 (exception groups)" 354 | category = "dev" 355 | optional = false 356 | python-versions = ">=3.7" 357 | files = [ 358 | {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, 359 | {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, 360 | ] 361 | 362 | [package.extras] 363 | test = ["pytest (>=6)"] 364 | 365 | [[package]] 366 | name = "ghp-import" 367 | version = "2.1.0" 368 | description = "Copy your docs directly to the gh-pages branch." 369 | category = "dev" 370 | optional = false 371 | python-versions = "*" 372 | files = [ 373 | {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, 374 | {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, 375 | ] 376 | 377 | [package.dependencies] 378 | python-dateutil = ">=2.8.1" 379 | 380 | [package.extras] 381 | dev = ["flake8", "markdown", "twine", "wheel"] 382 | 383 | [[package]] 384 | name = "griffe" 385 | version = "0.25.5" 386 | description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." 387 | category = "dev" 388 | optional = false 389 | python-versions = ">=3.7" 390 | files = [ 391 | {file = "griffe-0.25.5-py3-none-any.whl", hash = "sha256:1fb9edff48e66d4873014a2ebf21aca5f271d0006a4c937826e3cf592ffb3706"}, 392 | {file = "griffe-0.25.5.tar.gz", hash = "sha256:11ea3403ef0560a1cbcf7f302eb5d21cf4c1d8ed3f8a16a75aa9f6f458caf3f1"}, 393 | ] 394 | 395 | [package.dependencies] 396 | colorama = ">=0.4" 397 | 398 | [package.extras] 399 | async = ["aiofiles (>=0.7,<1.0)"] 400 | 401 | [[package]] 402 | name = "idna" 403 | version = "3.4" 404 | description = "Internationalized Domain Names in Applications (IDNA)" 405 | category = "dev" 406 | optional = false 407 | python-versions = ">=3.5" 408 | files = [ 409 | {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, 410 | {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, 411 | ] 412 | 413 | [[package]] 414 | name = "importlib-metadata" 415 | version = "6.1.0" 416 | description = "Read metadata from Python packages" 417 | category = "dev" 418 | optional = false 419 | python-versions = ">=3.7" 420 | files = [ 421 | {file = "importlib_metadata-6.1.0-py3-none-any.whl", hash = "sha256:ff80f3b5394912eb1b108fcfd444dc78b7f1f3e16b16188054bd01cb9cb86f09"}, 422 | {file = "importlib_metadata-6.1.0.tar.gz", hash = "sha256:43ce9281e097583d758c2c708c4376371261a02c34682491a8e98352365aad20"}, 423 | ] 424 | 425 | [package.dependencies] 426 | zipp = ">=0.5" 427 | 428 | [package.extras] 429 | docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] 430 | perf = ["ipython"] 431 | testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] 432 | 433 | [[package]] 434 | name = "importlib-resources" 435 | version = "5.12.0" 436 | description = "Read resources from Python packages" 437 | category = "dev" 438 | optional = false 439 | python-versions = ">=3.7" 440 | files = [ 441 | {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, 442 | {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, 443 | ] 444 | 445 | [package.dependencies] 446 | zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} 447 | 448 | [package.extras] 449 | docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] 450 | testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] 451 | 452 | [[package]] 453 | name = "iniconfig" 454 | version = "2.0.0" 455 | description = "brain-dead simple config-ini parsing" 456 | category = "dev" 457 | optional = false 458 | python-versions = ">=3.7" 459 | files = [ 460 | {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, 461 | {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, 462 | ] 463 | 464 | [[package]] 465 | name = "jaraco-classes" 466 | version = "3.2.3" 467 | description = "Utility functions for Python class constructs" 468 | category = "dev" 469 | optional = false 470 | python-versions = ">=3.7" 471 | files = [ 472 | {file = "jaraco.classes-3.2.3-py3-none-any.whl", hash = "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158"}, 473 | {file = "jaraco.classes-3.2.3.tar.gz", hash = "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"}, 474 | ] 475 | 476 | [package.dependencies] 477 | more-itertools = "*" 478 | 479 | [package.extras] 480 | docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] 481 | testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] 482 | 483 | [[package]] 484 | name = "jeepney" 485 | version = "0.8.0" 486 | description = "Low-level, pure Python DBus protocol wrapper." 487 | category = "dev" 488 | optional = false 489 | python-versions = ">=3.7" 490 | files = [ 491 | {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"}, 492 | {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"}, 493 | ] 494 | 495 | [package.extras] 496 | test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"] 497 | trio = ["async_generator", "trio"] 498 | 499 | [[package]] 500 | name = "jinja2" 501 | version = "3.1.2" 502 | description = "A very fast and expressive template engine." 503 | category = "dev" 504 | optional = false 505 | python-versions = ">=3.7" 506 | files = [ 507 | {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, 508 | {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, 509 | ] 510 | 511 | [package.dependencies] 512 | MarkupSafe = ">=2.0" 513 | 514 | [package.extras] 515 | i18n = ["Babel (>=2.7)"] 516 | 517 | [[package]] 518 | name = "keyring" 519 | version = "23.13.1" 520 | description = "Store and access your passwords safely." 521 | category = "dev" 522 | optional = false 523 | python-versions = ">=3.7" 524 | files = [ 525 | {file = "keyring-23.13.1-py3-none-any.whl", hash = "sha256:771ed2a91909389ed6148631de678f82ddc73737d85a927f382a8a1b157898cd"}, 526 | {file = "keyring-23.13.1.tar.gz", hash = "sha256:ba2e15a9b35e21908d0aaf4e0a47acc52d6ae33444df0da2b49d41a46ef6d678"}, 527 | ] 528 | 529 | [package.dependencies] 530 | importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} 531 | importlib-resources = {version = "*", markers = "python_version < \"3.9\""} 532 | "jaraco.classes" = "*" 533 | jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} 534 | pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} 535 | SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} 536 | 537 | [package.extras] 538 | completion = ["shtab"] 539 | docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] 540 | testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] 541 | 542 | [[package]] 543 | name = "markdown" 544 | version = "3.3.7" 545 | description = "Python implementation of Markdown." 546 | category = "dev" 547 | optional = false 548 | python-versions = ">=3.6" 549 | files = [ 550 | {file = "Markdown-3.3.7-py3-none-any.whl", hash = "sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"}, 551 | {file = "Markdown-3.3.7.tar.gz", hash = "sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874"}, 552 | ] 553 | 554 | [package.dependencies] 555 | importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} 556 | 557 | [package.extras] 558 | testing = ["coverage", "pyyaml"] 559 | 560 | [[package]] 561 | name = "markdown-it-py" 562 | version = "2.2.0" 563 | description = "Python port of markdown-it. Markdown parsing, done right!" 564 | category = "dev" 565 | optional = false 566 | python-versions = ">=3.7" 567 | files = [ 568 | {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, 569 | {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, 570 | ] 571 | 572 | [package.dependencies] 573 | mdurl = ">=0.1,<1.0" 574 | 575 | [package.extras] 576 | benchmarking = ["psutil", "pytest", "pytest-benchmark"] 577 | code-style = ["pre-commit (>=3.0,<4.0)"] 578 | compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] 579 | linkify = ["linkify-it-py (>=1,<3)"] 580 | plugins = ["mdit-py-plugins"] 581 | profiling = ["gprof2dot"] 582 | rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] 583 | testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] 584 | 585 | [[package]] 586 | name = "markupsafe" 587 | version = "2.1.2" 588 | description = "Safely add untrusted strings to HTML/XML markup." 589 | category = "dev" 590 | optional = false 591 | python-versions = ">=3.7" 592 | files = [ 593 | {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, 594 | {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, 595 | {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, 596 | {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, 597 | {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, 598 | {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, 599 | {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, 600 | {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, 601 | {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, 602 | {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, 603 | {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, 604 | {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, 605 | {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, 606 | {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, 607 | {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, 608 | {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, 609 | {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, 610 | {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, 611 | {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, 612 | {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, 613 | {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, 614 | {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, 615 | {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, 616 | {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, 617 | {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, 618 | {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, 619 | {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, 620 | {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, 621 | {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, 622 | {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, 623 | {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, 624 | {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, 625 | {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, 626 | {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, 627 | {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, 628 | {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, 629 | {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, 630 | {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, 631 | {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, 632 | {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, 633 | {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, 634 | {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, 635 | {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, 636 | {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, 637 | {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, 638 | {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, 639 | {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, 640 | {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, 641 | {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, 642 | {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, 643 | ] 644 | 645 | [[package]] 646 | name = "mdurl" 647 | version = "0.1.2" 648 | description = "Markdown URL utilities" 649 | category = "dev" 650 | optional = false 651 | python-versions = ">=3.7" 652 | files = [ 653 | {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, 654 | {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, 655 | ] 656 | 657 | [[package]] 658 | name = "mergedeep" 659 | version = "1.3.4" 660 | description = "A deep merge function for 🐍." 661 | category = "dev" 662 | optional = false 663 | python-versions = ">=3.6" 664 | files = [ 665 | {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, 666 | {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, 667 | ] 668 | 669 | [[package]] 670 | name = "mkdocs" 671 | version = "1.4.2" 672 | description = "Project documentation with Markdown." 673 | category = "dev" 674 | optional = false 675 | python-versions = ">=3.7" 676 | files = [ 677 | {file = "mkdocs-1.4.2-py3-none-any.whl", hash = "sha256:c8856a832c1e56702577023cd64cc5f84948280c1c0fcc6af4cd39006ea6aa8c"}, 678 | {file = "mkdocs-1.4.2.tar.gz", hash = "sha256:8947af423a6d0facf41ea1195b8e1e8c85ad94ac95ae307fe11232e0424b11c5"}, 679 | ] 680 | 681 | [package.dependencies] 682 | click = ">=7.0" 683 | colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} 684 | ghp-import = ">=1.0" 685 | importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} 686 | jinja2 = ">=2.11.1" 687 | markdown = ">=3.2.1,<3.4" 688 | mergedeep = ">=1.3.4" 689 | packaging = ">=20.5" 690 | pyyaml = ">=5.1" 691 | pyyaml-env-tag = ">=0.1" 692 | watchdog = ">=2.0" 693 | 694 | [package.extras] 695 | i18n = ["babel (>=2.9.0)"] 696 | min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] 697 | 698 | [[package]] 699 | name = "mkdocs-autorefs" 700 | version = "0.4.1" 701 | description = "Automatically link across pages in MkDocs." 702 | category = "dev" 703 | optional = false 704 | python-versions = ">=3.7" 705 | files = [ 706 | {file = "mkdocs-autorefs-0.4.1.tar.gz", hash = "sha256:70748a7bd025f9ecd6d6feeba8ba63f8e891a1af55f48e366d6d6e78493aba84"}, 707 | {file = "mkdocs_autorefs-0.4.1-py3-none-any.whl", hash = "sha256:a2248a9501b29dc0cc8ba4c09f4f47ff121945f6ce33d760f145d6f89d313f5b"}, 708 | ] 709 | 710 | [package.dependencies] 711 | Markdown = ">=3.3" 712 | mkdocs = ">=1.1" 713 | 714 | [[package]] 715 | name = "mkdocs-material" 716 | version = "9.1.4" 717 | description = "Documentation that simply works" 718 | category = "dev" 719 | optional = false 720 | python-versions = ">=3.7" 721 | files = [ 722 | {file = "mkdocs_material-9.1.4-py3-none-any.whl", hash = "sha256:4c92dcf9365068259bef3eed8e0dd5410056b6f7187bdea2d52848c0f94cd94c"}, 723 | {file = "mkdocs_material-9.1.4.tar.gz", hash = "sha256:c3a8943e9e4a7d2624291da365bbccf0b9f88688aa6947a46260d8c165cd4389"}, 724 | ] 725 | 726 | [package.dependencies] 727 | colorama = ">=0.4" 728 | jinja2 = ">=3.0" 729 | markdown = ">=3.2" 730 | mkdocs = ">=1.4.2" 731 | mkdocs-material-extensions = ">=1.1" 732 | pygments = ">=2.14" 733 | pymdown-extensions = ">=9.9.1" 734 | regex = ">=2022.4.24" 735 | requests = ">=2.26" 736 | 737 | [[package]] 738 | name = "mkdocs-material-extensions" 739 | version = "1.1.1" 740 | description = "Extension pack for Python Markdown and MkDocs Material." 741 | category = "dev" 742 | optional = false 743 | python-versions = ">=3.7" 744 | files = [ 745 | {file = "mkdocs_material_extensions-1.1.1-py3-none-any.whl", hash = "sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945"}, 746 | {file = "mkdocs_material_extensions-1.1.1.tar.gz", hash = "sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93"}, 747 | ] 748 | 749 | [[package]] 750 | name = "mkdocstrings" 751 | version = "0.20.0" 752 | description = "Automatic documentation from sources, for MkDocs." 753 | category = "dev" 754 | optional = false 755 | python-versions = ">=3.7" 756 | files = [ 757 | {file = "mkdocstrings-0.20.0-py3-none-any.whl", hash = "sha256:f17fc2c4f760ec302b069075ef9e31045aa6372ca91d2f35ded3adba8e25a472"}, 758 | {file = "mkdocstrings-0.20.0.tar.gz", hash = "sha256:c757f4f646d4f939491d6bc9256bfe33e36c5f8026392f49eaa351d241c838e5"}, 759 | ] 760 | 761 | [package.dependencies] 762 | Jinja2 = ">=2.11.1" 763 | Markdown = ">=3.3" 764 | MarkupSafe = ">=1.1" 765 | mkdocs = ">=1.2" 766 | mkdocs-autorefs = ">=0.3.1" 767 | mkdocstrings-python = {version = ">=0.5.2", optional = true, markers = "extra == \"python\""} 768 | pymdown-extensions = ">=6.3" 769 | 770 | [package.extras] 771 | crystal = ["mkdocstrings-crystal (>=0.3.4)"] 772 | python = ["mkdocstrings-python (>=0.5.2)"] 773 | python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] 774 | 775 | [[package]] 776 | name = "mkdocstrings-python" 777 | version = "0.8.3" 778 | description = "A Python handler for mkdocstrings." 779 | category = "dev" 780 | optional = false 781 | python-versions = ">=3.7" 782 | files = [ 783 | {file = "mkdocstrings-python-0.8.3.tar.gz", hash = "sha256:9ae473f6dc599339b09eee17e4d2b05d6ac0ec29860f3fc9b7512d940fc61adf"}, 784 | {file = "mkdocstrings_python-0.8.3-py3-none-any.whl", hash = "sha256:4e6e1cd6f37a785de0946ced6eb846eb2f5d891ac1cc2c7b832943d3529087a7"}, 785 | ] 786 | 787 | [package.dependencies] 788 | griffe = ">=0.24" 789 | mkdocstrings = ">=0.19" 790 | 791 | [[package]] 792 | name = "more-itertools" 793 | version = "9.1.0" 794 | description = "More routines for operating on iterables, beyond itertools" 795 | category = "dev" 796 | optional = false 797 | python-versions = ">=3.7" 798 | files = [ 799 | {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"}, 800 | {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"}, 801 | ] 802 | 803 | [[package]] 804 | name = "mypy-extensions" 805 | version = "1.0.0" 806 | description = "Type system extensions for programs checked with the mypy type checker." 807 | category = "dev" 808 | optional = false 809 | python-versions = ">=3.5" 810 | files = [ 811 | {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, 812 | {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, 813 | ] 814 | 815 | [[package]] 816 | name = "packaging" 817 | version = "23.0" 818 | description = "Core utilities for Python packages" 819 | category = "dev" 820 | optional = false 821 | python-versions = ">=3.7" 822 | files = [ 823 | {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, 824 | {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, 825 | ] 826 | 827 | [[package]] 828 | name = "pathspec" 829 | version = "0.11.1" 830 | description = "Utility library for gitignore style pattern matching of file paths." 831 | category = "dev" 832 | optional = false 833 | python-versions = ">=3.7" 834 | files = [ 835 | {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, 836 | {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, 837 | ] 838 | 839 | [[package]] 840 | name = "pkginfo" 841 | version = "1.9.6" 842 | description = "Query metadata from sdists / bdists / installed packages." 843 | category = "dev" 844 | optional = false 845 | python-versions = ">=3.6" 846 | files = [ 847 | {file = "pkginfo-1.9.6-py3-none-any.whl", hash = "sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546"}, 848 | {file = "pkginfo-1.9.6.tar.gz", hash = "sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046"}, 849 | ] 850 | 851 | [package.extras] 852 | testing = ["pytest", "pytest-cov"] 853 | 854 | [[package]] 855 | name = "platformdirs" 856 | version = "3.1.1" 857 | description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." 858 | category = "dev" 859 | optional = false 860 | python-versions = ">=3.7" 861 | files = [ 862 | {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, 863 | {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, 864 | ] 865 | 866 | [package.extras] 867 | docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] 868 | test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] 869 | 870 | [[package]] 871 | name = "pluggy" 872 | version = "1.0.0" 873 | description = "plugin and hook calling mechanisms for python" 874 | category = "dev" 875 | optional = false 876 | python-versions = ">=3.6" 877 | files = [ 878 | {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, 879 | {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, 880 | ] 881 | 882 | [package.extras] 883 | dev = ["pre-commit", "tox"] 884 | testing = ["pytest", "pytest-benchmark"] 885 | 886 | [[package]] 887 | name = "pycparser" 888 | version = "2.21" 889 | description = "C parser in Python" 890 | category = "dev" 891 | optional = false 892 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 893 | files = [ 894 | {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, 895 | {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, 896 | ] 897 | 898 | [[package]] 899 | name = "pygments" 900 | version = "2.14.0" 901 | description = "Pygments is a syntax highlighting package written in Python." 902 | category = "dev" 903 | optional = false 904 | python-versions = ">=3.6" 905 | files = [ 906 | {file = "Pygments-2.14.0-py3-none-any.whl", hash = "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"}, 907 | {file = "Pygments-2.14.0.tar.gz", hash = "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297"}, 908 | ] 909 | 910 | [package.extras] 911 | plugins = ["importlib-metadata"] 912 | 913 | [[package]] 914 | name = "pymdown-extensions" 915 | version = "9.10" 916 | description = "Extension pack for Python Markdown." 917 | category = "dev" 918 | optional = false 919 | python-versions = ">=3.7" 920 | files = [ 921 | {file = "pymdown_extensions-9.10-py3-none-any.whl", hash = "sha256:31eaa76ce6f96aabfcea98787c2fff2c5c0611b20a53a94213970cfbf05f02b8"}, 922 | {file = "pymdown_extensions-9.10.tar.gz", hash = "sha256:562c38eee4ce3f101ce631b804bfc2177a8a76c7e4dc908871fb6741a90257a7"}, 923 | ] 924 | 925 | [package.dependencies] 926 | markdown = ">=3.2" 927 | pyyaml = "*" 928 | 929 | [[package]] 930 | name = "pytest" 931 | version = "7.2.2" 932 | description = "pytest: simple powerful testing with Python" 933 | category = "dev" 934 | optional = false 935 | python-versions = ">=3.7" 936 | files = [ 937 | {file = "pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"}, 938 | {file = "pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"}, 939 | ] 940 | 941 | [package.dependencies] 942 | attrs = ">=19.2.0" 943 | colorama = {version = "*", markers = "sys_platform == \"win32\""} 944 | exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} 945 | iniconfig = "*" 946 | packaging = "*" 947 | pluggy = ">=0.12,<2.0" 948 | tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} 949 | 950 | [package.extras] 951 | testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] 952 | 953 | [[package]] 954 | name = "python-dateutil" 955 | version = "2.8.2" 956 | description = "Extensions to the standard Python datetime module" 957 | category = "dev" 958 | optional = false 959 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" 960 | files = [ 961 | {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, 962 | {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, 963 | ] 964 | 965 | [package.dependencies] 966 | six = ">=1.5" 967 | 968 | [[package]] 969 | name = "pywin32-ctypes" 970 | version = "0.2.0" 971 | description = "" 972 | category = "dev" 973 | optional = false 974 | python-versions = "*" 975 | files = [ 976 | {file = "pywin32-ctypes-0.2.0.tar.gz", hash = "sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942"}, 977 | {file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"}, 978 | ] 979 | 980 | [[package]] 981 | name = "pyyaml" 982 | version = "6.0" 983 | description = "YAML parser and emitter for Python" 984 | category = "dev" 985 | optional = false 986 | python-versions = ">=3.6" 987 | files = [ 988 | {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, 989 | {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, 990 | {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, 991 | {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, 992 | {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, 993 | {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, 994 | {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, 995 | {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, 996 | {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, 997 | {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, 998 | {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, 999 | {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, 1000 | {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, 1001 | {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, 1002 | {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, 1003 | {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, 1004 | {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, 1005 | {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, 1006 | {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, 1007 | {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, 1008 | {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, 1009 | {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, 1010 | {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, 1011 | {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, 1012 | {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, 1013 | {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, 1014 | {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, 1015 | {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, 1016 | {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, 1017 | {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, 1018 | {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, 1019 | {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, 1020 | {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, 1021 | {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, 1022 | {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, 1023 | {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, 1024 | {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, 1025 | {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, 1026 | {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, 1027 | {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, 1028 | ] 1029 | 1030 | [[package]] 1031 | name = "pyyaml-env-tag" 1032 | version = "0.1" 1033 | description = "A custom YAML tag for referencing environment variables in YAML files. " 1034 | category = "dev" 1035 | optional = false 1036 | python-versions = ">=3.6" 1037 | files = [ 1038 | {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, 1039 | {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, 1040 | ] 1041 | 1042 | [package.dependencies] 1043 | pyyaml = "*" 1044 | 1045 | [[package]] 1046 | name = "readme-renderer" 1047 | version = "37.3" 1048 | description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse" 1049 | category = "dev" 1050 | optional = false 1051 | python-versions = ">=3.7" 1052 | files = [ 1053 | {file = "readme_renderer-37.3-py3-none-any.whl", hash = "sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343"}, 1054 | {file = "readme_renderer-37.3.tar.gz", hash = "sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273"}, 1055 | ] 1056 | 1057 | [package.dependencies] 1058 | bleach = ">=2.1.0" 1059 | docutils = ">=0.13.1" 1060 | Pygments = ">=2.5.1" 1061 | 1062 | [package.extras] 1063 | md = ["cmarkgfm (>=0.8.0)"] 1064 | 1065 | [[package]] 1066 | name = "regex" 1067 | version = "2023.3.23" 1068 | description = "Alternative regular expression module, to replace re." 1069 | category = "dev" 1070 | optional = false 1071 | python-versions = ">=3.8" 1072 | files = [ 1073 | {file = "regex-2023.3.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:845a5e2d84389c4ddada1a9b95c055320070f18bb76512608374aca00d22eca8"}, 1074 | {file = "regex-2023.3.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:87d9951f5a538dd1d016bdc0dcae59241d15fa94860964833a54d18197fcd134"}, 1075 | {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae17d3be44c0b3f782c28ae9edd8b47c1f1776d4cabe87edc0b98e1f12b021"}, 1076 | {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b8eb1e3bca6b48dc721818a60ae83b8264d4089a4a41d62be6d05316ec38e15"}, 1077 | {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df45fac182ebc3c494460c644e853515cc24f5ad9da05f8ffb91da891bfee879"}, 1078 | {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7006105b10b59971d3b248ad75acc3651c7e4cf54d81694df5a5130a3c3f7ea"}, 1079 | {file = "regex-2023.3.23-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93f3f1aa608380fe294aa4cb82e2afda07a7598e828d0341e124b8fd9327c715"}, 1080 | {file = "regex-2023.3.23-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787954f541ab95d8195d97b0b8cf1dc304424adb1e07365967e656b92b38a699"}, 1081 | {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:20abe0bdf03630fe92ccafc45a599bca8b3501f48d1de4f7d121153350a2f77d"}, 1082 | {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11d00c31aeab9a6e0503bc77e73ed9f4527b3984279d997eb145d7c7be6268fd"}, 1083 | {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d5bbe0e1511b844794a3be43d6c145001626ba9a6c1db8f84bdc724e91131d9d"}, 1084 | {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ea3c0cb56eadbf4ab2277e7a095676370b3e46dbfc74d5c383bd87b0d6317910"}, 1085 | {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d895b4c863059a4934d3e874b90998df774644a41b349ebb330f85f11b4ef2c0"}, 1086 | {file = "regex-2023.3.23-cp310-cp310-win32.whl", hash = "sha256:9d764514d19b4edcc75fd8cb1423448ef393e8b6cbd94f38cab983ab1b75855d"}, 1087 | {file = "regex-2023.3.23-cp310-cp310-win_amd64.whl", hash = "sha256:11d1f2b7a0696dc0310de0efb51b1f4d813ad4401fe368e83c0c62f344429f98"}, 1088 | {file = "regex-2023.3.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8a9c63cde0eaa345795c0fdeb19dc62d22e378c50b0bc67bf4667cd5b482d98b"}, 1089 | {file = "regex-2023.3.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dd7200b4c27b68cf9c9646da01647141c6db09f48cc5b51bc588deaf8e98a797"}, 1090 | {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22720024b90a6ba673a725dcc62e10fb1111b889305d7c6b887ac7466b74bedb"}, 1091 | {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b190a339090e6af25f4a5fd9e77591f6d911cc7b96ecbb2114890b061be0ac1"}, 1092 | {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e76b6fc0d8e9efa39100369a9b3379ce35e20f6c75365653cf58d282ad290f6f"}, 1093 | {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7868b8f218bf69a2a15402fde08b08712213a1f4b85a156d90473a6fb6b12b09"}, 1094 | {file = "regex-2023.3.23-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2472428efc4127374f494e570e36b30bb5e6b37d9a754f7667f7073e43b0abdd"}, 1095 | {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c37df2a060cb476d94c047b18572ee2b37c31f831df126c0da3cd9227b39253d"}, 1096 | {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4479f9e2abc03362df4045b1332d4a2b7885b245a30d4f4b051c4083b97d95d8"}, 1097 | {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e2396e0678167f2d0c197da942b0b3fb48fee2f0b5915a0feb84d11b6686afe6"}, 1098 | {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:75f288c60232a5339e0ff2fa05779a5e9c74e9fc085c81e931d4a264501e745b"}, 1099 | {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c869260aa62cee21c5eb171a466c0572b5e809213612ef8d495268cd2e34f20d"}, 1100 | {file = "regex-2023.3.23-cp311-cp311-win32.whl", hash = "sha256:25f0532fd0c53e96bad84664171969de9673b4131f2297f1db850d3918d58858"}, 1101 | {file = "regex-2023.3.23-cp311-cp311-win_amd64.whl", hash = "sha256:5ccfafd98473e007cebf7da10c1411035b7844f0f204015efd050601906dbb53"}, 1102 | {file = "regex-2023.3.23-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6572ff287176c0fb96568adb292674b421fa762153ed074d94b1d939ed92c253"}, 1103 | {file = "regex-2023.3.23-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a610e0adfcb0fc84ea25f6ea685e39e74cbcd9245a72a9a7aab85ff755a5ed27"}, 1104 | {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086afe222d58b88b62847bdbd92079b4699350b4acab892f88a935db5707c790"}, 1105 | {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79e29fd62fa2f597a6754b247356bda14b866131a22444d67f907d6d341e10f3"}, 1106 | {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c07ce8e9eee878a48ebeb32ee661b49504b85e164b05bebf25420705709fdd31"}, 1107 | {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b036f401895e854de9fefe061518e78d506d8a919cc250dc3416bca03f6f9a"}, 1108 | {file = "regex-2023.3.23-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78ac8dd8e18800bb1f97aad0d73f68916592dddf233b99d2b5cabc562088503a"}, 1109 | {file = "regex-2023.3.23-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:539dd010dc35af935b32f248099e38447bbffc10b59c2b542bceead2bed5c325"}, 1110 | {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9bf4a5626f2a0ea006bf81e8963f498a57a47d58907eaa58f4b3e13be68759d8"}, 1111 | {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf86b4328c204c3f315074a61bc1c06f8a75a8e102359f18ce99fbcbbf1951f0"}, 1112 | {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:2848bf76673c83314068241c8d5b7fa9ad9bed866c979875a0e84039349e8fa7"}, 1113 | {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c125a02d22c555e68f7433bac8449992fa1cead525399f14e47c2d98f2f0e467"}, 1114 | {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cd1671e9d5ac05ce6aa86874dd8dfa048824d1dbe73060851b310c6c1a201a96"}, 1115 | {file = "regex-2023.3.23-cp38-cp38-win32.whl", hash = "sha256:fffe57312a358be6ec6baeb43d253c36e5790e436b7bf5b7a38df360363e88e9"}, 1116 | {file = "regex-2023.3.23-cp38-cp38-win_amd64.whl", hash = "sha256:dbb3f87e15d3dd76996d604af8678316ad2d7d20faa394e92d9394dfd621fd0c"}, 1117 | {file = "regex-2023.3.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c88e8c226473b5549fe9616980ea7ca09289246cfbdf469241edf4741a620004"}, 1118 | {file = "regex-2023.3.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6560776ec19c83f3645bbc5db64a7a5816c9d8fb7ed7201c5bcd269323d88072"}, 1119 | {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b1fc2632c01f42e06173d8dd9bb2e74ab9b0afa1d698058c867288d2c7a31f3"}, 1120 | {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdf7ad455f1916b8ea5cdbc482d379f6daf93f3867b4232d14699867a5a13af7"}, 1121 | {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5fc33b27b1d800fc5b78d7f7d0f287e35079ecabe68e83d46930cf45690e1c8c"}, 1122 | {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c49552dc938e3588f63f8a78c86f3c9c75301e813bca0bef13bdb4b87ccf364"}, 1123 | {file = "regex-2023.3.23-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e152461e9a0aedec7d37fc66ec0fa635eca984777d3d3c3e36f53bf3d3ceb16e"}, 1124 | {file = "regex-2023.3.23-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:db034255e72d2995cf581b14bb3fc9c00bdbe6822b49fcd4eef79e1d5f232618"}, 1125 | {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:55ae114da21b7a790b90255ea52d2aa3a0d121a646deb2d3c6a3194e722fc762"}, 1126 | {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ef3f528fe1cc3d139508fe1b22523745aa77b9d6cb5b0bf277f48788ee0b993f"}, 1127 | {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:a81c9ec59ca2303acd1ccd7b9ac409f1e478e40e96f8f79b943be476c5fdb8bb"}, 1128 | {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cde09c4fdd070772aa2596d97e942eb775a478b32459e042e1be71b739d08b77"}, 1129 | {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3cd9f5dd7b821f141d3a6ca0d5d9359b9221e4f051ca3139320adea9f1679691"}, 1130 | {file = "regex-2023.3.23-cp39-cp39-win32.whl", hash = "sha256:7304863f3a652dab5e68e6fb1725d05ebab36ec0390676d1736e0571ebb713ef"}, 1131 | {file = "regex-2023.3.23-cp39-cp39-win_amd64.whl", hash = "sha256:54c3fa855a3f7438149de3211738dd9b5f0c733f48b54ae05aa7fce83d48d858"}, 1132 | {file = "regex-2023.3.23.tar.gz", hash = "sha256:dc80df325b43ffea5cdea2e3eaa97a44f3dd298262b1c7fe9dbb2a9522b956a7"}, 1133 | ] 1134 | 1135 | [[package]] 1136 | name = "requests" 1137 | version = "2.28.2" 1138 | description = "Python HTTP for Humans." 1139 | category = "dev" 1140 | optional = false 1141 | python-versions = ">=3.7, <4" 1142 | files = [ 1143 | {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, 1144 | {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, 1145 | ] 1146 | 1147 | [package.dependencies] 1148 | certifi = ">=2017.4.17" 1149 | charset-normalizer = ">=2,<4" 1150 | idna = ">=2.5,<4" 1151 | urllib3 = ">=1.21.1,<1.27" 1152 | 1153 | [package.extras] 1154 | socks = ["PySocks (>=1.5.6,!=1.5.7)"] 1155 | use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] 1156 | 1157 | [[package]] 1158 | name = "requests-toolbelt" 1159 | version = "0.10.1" 1160 | description = "A utility belt for advanced users of python-requests" 1161 | category = "dev" 1162 | optional = false 1163 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 1164 | files = [ 1165 | {file = "requests-toolbelt-0.10.1.tar.gz", hash = "sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d"}, 1166 | {file = "requests_toolbelt-0.10.1-py2.py3-none-any.whl", hash = "sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7"}, 1167 | ] 1168 | 1169 | [package.dependencies] 1170 | requests = ">=2.0.1,<3.0.0" 1171 | 1172 | [[package]] 1173 | name = "rfc3986" 1174 | version = "2.0.0" 1175 | description = "Validating URI References per RFC 3986" 1176 | category = "dev" 1177 | optional = false 1178 | python-versions = ">=3.7" 1179 | files = [ 1180 | {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, 1181 | {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, 1182 | ] 1183 | 1184 | [package.extras] 1185 | idna2008 = ["idna"] 1186 | 1187 | [[package]] 1188 | name = "rich" 1189 | version = "13.3.2" 1190 | description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" 1191 | category = "dev" 1192 | optional = false 1193 | python-versions = ">=3.7.0" 1194 | files = [ 1195 | {file = "rich-13.3.2-py3-none-any.whl", hash = "sha256:a104f37270bf677148d8acb07d33be1569eeee87e2d1beb286a4e9113caf6f2f"}, 1196 | {file = "rich-13.3.2.tar.gz", hash = "sha256:91954fe80cfb7985727a467ca98a7618e5dd15178cc2da10f553b36a93859001"}, 1197 | ] 1198 | 1199 | [package.dependencies] 1200 | markdown-it-py = ">=2.2.0,<3.0.0" 1201 | pygments = ">=2.13.0,<3.0.0" 1202 | typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} 1203 | 1204 | [package.extras] 1205 | jupyter = ["ipywidgets (>=7.5.1,<9)"] 1206 | 1207 | [[package]] 1208 | name = "secretstorage" 1209 | version = "3.3.3" 1210 | description = "Python bindings to FreeDesktop.org Secret Service API" 1211 | category = "dev" 1212 | optional = false 1213 | python-versions = ">=3.6" 1214 | files = [ 1215 | {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"}, 1216 | {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"}, 1217 | ] 1218 | 1219 | [package.dependencies] 1220 | cryptography = ">=2.0" 1221 | jeepney = ">=0.6" 1222 | 1223 | [[package]] 1224 | name = "six" 1225 | version = "1.16.0" 1226 | description = "Python 2 and 3 compatibility utilities" 1227 | category = "dev" 1228 | optional = false 1229 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" 1230 | files = [ 1231 | {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, 1232 | {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, 1233 | ] 1234 | 1235 | [[package]] 1236 | name = "tomli" 1237 | version = "2.0.1" 1238 | description = "A lil' TOML parser" 1239 | category = "dev" 1240 | optional = false 1241 | python-versions = ">=3.7" 1242 | files = [ 1243 | {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, 1244 | {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, 1245 | ] 1246 | 1247 | [[package]] 1248 | name = "twine" 1249 | version = "4.0.2" 1250 | description = "Collection of utilities for publishing packages on PyPI" 1251 | category = "dev" 1252 | optional = false 1253 | python-versions = ">=3.7" 1254 | files = [ 1255 | {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"}, 1256 | {file = "twine-4.0.2.tar.gz", hash = "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"}, 1257 | ] 1258 | 1259 | [package.dependencies] 1260 | importlib-metadata = ">=3.6" 1261 | keyring = ">=15.1" 1262 | pkginfo = ">=1.8.1" 1263 | readme-renderer = ">=35.0" 1264 | requests = ">=2.20" 1265 | requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0" 1266 | rfc3986 = ">=1.4.0" 1267 | rich = ">=12.0.0" 1268 | urllib3 = ">=1.26.0" 1269 | 1270 | [[package]] 1271 | name = "typing-extensions" 1272 | version = "4.5.0" 1273 | description = "Backported and Experimental Type Hints for Python 3.7+" 1274 | category = "main" 1275 | optional = false 1276 | python-versions = ">=3.7" 1277 | files = [ 1278 | {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, 1279 | {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, 1280 | ] 1281 | 1282 | [[package]] 1283 | name = "urllib3" 1284 | version = "1.26.15" 1285 | description = "HTTP library with thread-safe connection pooling, file post, and more." 1286 | category = "dev" 1287 | optional = false 1288 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" 1289 | files = [ 1290 | {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, 1291 | {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, 1292 | ] 1293 | 1294 | [package.extras] 1295 | brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] 1296 | secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] 1297 | socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] 1298 | 1299 | [[package]] 1300 | name = "watchdog" 1301 | version = "3.0.0" 1302 | description = "Filesystem events monitoring" 1303 | category = "dev" 1304 | optional = false 1305 | python-versions = ">=3.7" 1306 | files = [ 1307 | {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, 1308 | {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, 1309 | {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, 1310 | {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, 1311 | {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, 1312 | {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, 1313 | {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, 1314 | {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, 1315 | {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, 1316 | {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, 1317 | {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, 1318 | {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, 1319 | {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, 1320 | {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, 1321 | {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, 1322 | {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, 1323 | {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, 1324 | {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, 1325 | {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, 1326 | {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, 1327 | {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, 1328 | {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, 1329 | {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, 1330 | {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, 1331 | {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, 1332 | {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, 1333 | {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, 1334 | ] 1335 | 1336 | [package.extras] 1337 | watchmedo = ["PyYAML (>=3.10)"] 1338 | 1339 | [[package]] 1340 | name = "webencodings" 1341 | version = "0.5.1" 1342 | description = "Character encoding aliases for legacy web content" 1343 | category = "dev" 1344 | optional = false 1345 | python-versions = "*" 1346 | files = [ 1347 | {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, 1348 | {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, 1349 | ] 1350 | 1351 | [[package]] 1352 | name = "zipp" 1353 | version = "3.15.0" 1354 | description = "Backport of pathlib-compatible object wrapper for zip files" 1355 | category = "dev" 1356 | optional = false 1357 | python-versions = ">=3.7" 1358 | files = [ 1359 | {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, 1360 | {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, 1361 | ] 1362 | 1363 | [package.extras] 1364 | docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] 1365 | testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] 1366 | 1367 | [metadata] 1368 | lock-version = "2.0" 1369 | python-versions = "^3.8.1" 1370 | content-hash = "cc9babcdfdc3679a4d84f68912408a005619a576947b059146ed1b428850ece9" 1371 | --------------------------------------------------------------------------------