├── .github ├── dependabot.yml └── workflows │ ├── publish.yaml │ └── test.yaml ├── .gitignore ├── LICENSE ├── README.md ├── SECURITY.md ├── examples ├── README.md ├── async-chat.py ├── async-generate.py ├── async-structured-outputs.py ├── async-tools.py ├── chat-stream.py ├── chat-with-history.py ├── chat.py ├── create.py ├── embed.py ├── fill-in-middle.py ├── generate-stream.py ├── generate.py ├── list.py ├── multimodal-chat.py ├── multimodal-generate.py ├── ps.py ├── pull.py ├── show.py ├── structured-outputs-image.py ├── structured-outputs.py ├── thinking-generate.py ├── thinking.py └── tools.py ├── ollama ├── __init__.py ├── _client.py ├── _types.py ├── _utils.py └── py.typed ├── pyproject.toml ├── requirements.txt ├── tests ├── test_client.py ├── test_type_serialization.py └── test_utils.py └── uv.lock /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: daily 7 | - package-ecosystem: pip 8 | directory: / 9 | schedule: 10 | interval: daily 11 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: publish 2 | 3 | on: 4 | release: 5 | types: 6 | - created 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | environment: release 12 | permissions: 13 | id-token: write 14 | contents: write 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: actions/setup-python@v5 18 | - uses: astral-sh/setup-uv@v5 19 | with: 20 | enable-cache: true 21 | - run: uv build 22 | - uses: pypa/gh-action-pypi-publish@release/v1 23 | - run: gh release upload $GITHUB_REF_NAME dist/* 24 | env: 25 | GH_TOKEN: ${{ github.token }} 26 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: astral-sh/setup-uv@v5 15 | with: 16 | enable-cache: true 17 | - run: uvx hatch test -acp 18 | if: ${{ always() }} 19 | lint: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: actions/checkout@v4 23 | - uses: actions/setup-python@v5 24 | - uses: astral-sh/setup-uv@v5 25 | with: 26 | enable-cache: true 27 | - name: check formatting 28 | run: uvx hatch fmt --check -f 29 | - name: check linting 30 | run: uvx hatch fmt --check -l --output-format=github 31 | - name: check uv.lock is up-to-date 32 | run: uv lock --check 33 | - name: check requirements.txt is up-to-date 34 | run: | 35 | uv export >requirements.txt 36 | git diff --exit-code requirements.txt 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Ollama 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ollama Python Library 2 | 3 | The Ollama Python library provides the easiest way to integrate Python 3.8+ projects with [Ollama](https://github.com/ollama/ollama). 4 | 5 | ## Prerequisites 6 | 7 | - [Ollama](https://ollama.com/download) should be installed and running 8 | - Pull a model to use with the library: `ollama pull ` e.g. `ollama pull llama3.2` 9 | - See [Ollama.com](https://ollama.com/search) for more information on the models available. 10 | 11 | ## Install 12 | 13 | ```sh 14 | pip install ollama 15 | ``` 16 | 17 | ## Usage 18 | 19 | ```python 20 | from ollama import chat 21 | from ollama import ChatResponse 22 | 23 | response: ChatResponse = chat(model='llama3.2', messages=[ 24 | { 25 | 'role': 'user', 26 | 'content': 'Why is the sky blue?', 27 | }, 28 | ]) 29 | print(response['message']['content']) 30 | # or access fields directly from the response object 31 | print(response.message.content) 32 | ``` 33 | 34 | See [_types.py](ollama/_types.py) for more information on the response types. 35 | 36 | ## Streaming responses 37 | 38 | Response streaming can be enabled by setting `stream=True`. 39 | 40 | ```python 41 | from ollama import chat 42 | 43 | stream = chat( 44 | model='llama3.2', 45 | messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], 46 | stream=True, 47 | ) 48 | 49 | for chunk in stream: 50 | print(chunk['message']['content'], end='', flush=True) 51 | ``` 52 | 53 | ## Custom client 54 | A custom client can be created by instantiating `Client` or `AsyncClient` from `ollama`. 55 | 56 | All extra keyword arguments are passed into the [`httpx.Client`](https://www.python-httpx.org/api/#client). 57 | 58 | ```python 59 | from ollama import Client 60 | client = Client( 61 | host='http://localhost:11434', 62 | headers={'x-some-header': 'some-value'} 63 | ) 64 | response = client.chat(model='llama3.2', messages=[ 65 | { 66 | 'role': 'user', 67 | 'content': 'Why is the sky blue?', 68 | }, 69 | ]) 70 | ``` 71 | 72 | ## Async client 73 | 74 | The `AsyncClient` class is used to make asynchronous requests. It can be configured with the same fields as the `Client` class. 75 | 76 | ```python 77 | import asyncio 78 | from ollama import AsyncClient 79 | 80 | async def chat(): 81 | message = {'role': 'user', 'content': 'Why is the sky blue?'} 82 | response = await AsyncClient().chat(model='llama3.2', messages=[message]) 83 | 84 | asyncio.run(chat()) 85 | ``` 86 | 87 | Setting `stream=True` modifies functions to return a Python asynchronous generator: 88 | 89 | ```python 90 | import asyncio 91 | from ollama import AsyncClient 92 | 93 | async def chat(): 94 | message = {'role': 'user', 'content': 'Why is the sky blue?'} 95 | async for part in await AsyncClient().chat(model='llama3.2', messages=[message], stream=True): 96 | print(part['message']['content'], end='', flush=True) 97 | 98 | asyncio.run(chat()) 99 | ``` 100 | 101 | ## API 102 | 103 | The Ollama Python library's API is designed around the [Ollama REST API](https://github.com/ollama/ollama/blob/main/docs/api.md) 104 | 105 | ### Chat 106 | 107 | ```python 108 | ollama.chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) 109 | ``` 110 | 111 | ### Generate 112 | 113 | ```python 114 | ollama.generate(model='llama3.2', prompt='Why is the sky blue?') 115 | ``` 116 | 117 | ### List 118 | 119 | ```python 120 | ollama.list() 121 | ``` 122 | 123 | ### Show 124 | 125 | ```python 126 | ollama.show('llama3.2') 127 | ``` 128 | 129 | ### Create 130 | 131 | ```python 132 | ollama.create(model='example', from_='llama3.2', system="You are Mario from Super Mario Bros.") 133 | ``` 134 | 135 | ### Copy 136 | 137 | ```python 138 | ollama.copy('llama3.2', 'user/llama3.2') 139 | ``` 140 | 141 | ### Delete 142 | 143 | ```python 144 | ollama.delete('llama3.2') 145 | ``` 146 | 147 | ### Pull 148 | 149 | ```python 150 | ollama.pull('llama3.2') 151 | ``` 152 | 153 | ### Push 154 | 155 | ```python 156 | ollama.push('user/llama3.2') 157 | ``` 158 | 159 | ### Embed 160 | 161 | ```python 162 | ollama.embed(model='llama3.2', input='The sky is blue because of rayleigh scattering') 163 | ``` 164 | 165 | ### Embed (batch) 166 | 167 | ```python 168 | ollama.embed(model='llama3.2', input=['The sky is blue because of rayleigh scattering', 'Grass is green because of chlorophyll']) 169 | ``` 170 | 171 | ### Ps 172 | 173 | ```python 174 | ollama.ps() 175 | ``` 176 | 177 | 178 | ## Errors 179 | 180 | Errors are raised if requests return an error status or if an error is detected while streaming. 181 | 182 | ```python 183 | model = 'does-not-yet-exist' 184 | 185 | try: 186 | ollama.chat(model) 187 | except ollama.ResponseError as e: 188 | print('Error:', e.error) 189 | if e.status_code == 404: 190 | ollama.pull(model) 191 | ``` 192 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security 2 | 3 | The Ollama maintainer team takes security seriously and will actively work to resolve security issues. 4 | 5 | ## Reporting a vulnerability 6 | 7 | If you discover a security vulnerability, please do not open a public issue. Instead, please report it by emailing hello@ollama.com. We ask that you give us sufficient time to investigate and address the vulnerability before disclosing it publicly. 8 | 9 | Please include the following details in your report: 10 | - A description of the vulnerability 11 | - Steps to reproduce the issue 12 | - Your assessment of the potential impact 13 | - Any possible mitigations 14 | 15 | ## Security best practices 16 | 17 | While the maintainer team does their best to secure Ollama, users are encouraged to implement their own security best practices, such as: 18 | 19 | - Regularly updating to the latest version of Ollama 20 | - Securing access to hosted instances of Ollama 21 | - Monitoring systems for unusual activity 22 | 23 | ## Contact 24 | 25 | For any other questions or concerns related to security, please contact us at hello@ollama.com 26 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Running Examples 2 | 3 | Run the examples in this directory with: 4 | ```sh 5 | # Run example 6 | python3 examples/.py 7 | ``` 8 | 9 | See [ollama/docs/api.md](https://github.com/ollama/ollama/blob/main/docs/api.md) for full API documentation 10 | 11 | ### Chat - Chat with a model 12 | - [chat.py](chat.py) 13 | - [async-chat.py](async-chat.py) 14 | - [chat-stream.py](chat-stream.py) - Streamed outputs 15 | - [chat-with-history.py](chat-with-history.py) - Chat with model and maintain history of the conversation 16 | 17 | 18 | ### Generate - Generate text with a model 19 | - [generate.py](generate.py) 20 | - [async-generate.py](async-generate.py) 21 | - [generate-stream.py](generate-stream.py) - Streamed outputs 22 | - [fill-in-middle.py](fill-in-middle.py) - Given a prefix and suffix, fill in the middle 23 | 24 | 25 | ### Tools/Function Calling - Call a function with a model 26 | - [tools.py](tools.py) - Simple example of Tools/Function Calling 27 | - [async-tools.py](async-tools.py) 28 | 29 | 30 | ### Multimodal with Images - Chat with a multimodal (image chat) model 31 | - [multimodal-chat.py](multimodal-chat.py) 32 | - [multimodal-generate.py](multimodal-generate.py) 33 | 34 | 35 | ### Structured Outputs - Generate structured outputs with a model 36 | - [structured-outputs.py](structured-outputs.py) 37 | - [async-structured-outputs.py](async-structured-outputs.py) 38 | - [structured-outputs-image.py](structured-outputs-image.py) 39 | 40 | 41 | ### Ollama List - List all downloaded models and their properties 42 | - [list.py](list.py) 43 | 44 | 45 | ### Ollama Show - Display model properties and capabilities 46 | - [show.py](show.py) 47 | 48 | 49 | ### Ollama ps - Show model status with CPU/GPU usage 50 | - [ps.py](ps.py) 51 | 52 | 53 | ### Ollama Pull - Pull a model from Ollama 54 | Requirement: `pip install tqdm` 55 | - [pull.py](pull.py) 56 | 57 | 58 | ### Ollama Create - Create a model from a Modelfile 59 | - [create.py](create.py) 60 | 61 | 62 | ### Ollama Embed - Generate embeddings with a model 63 | - [embed.py](embed.py) 64 | 65 | 66 | ### Thinking - Enable thinking mode for a model 67 | - [thinking.py](thinking.py) 68 | 69 | ### Thinking (generate) - Enable thinking mode for a model 70 | - [thinking-generate.py](thinking-generate.py) 71 | -------------------------------------------------------------------------------- /examples/async-chat.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from ollama import AsyncClient 4 | 5 | 6 | async def main(): 7 | messages = [ 8 | { 9 | 'role': 'user', 10 | 'content': 'Why is the sky blue?', 11 | }, 12 | ] 13 | 14 | client = AsyncClient() 15 | response = await client.chat('llama3.2', messages=messages) 16 | print(response['message']['content']) 17 | 18 | 19 | if __name__ == '__main__': 20 | asyncio.run(main()) 21 | -------------------------------------------------------------------------------- /examples/async-generate.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import ollama 4 | 5 | 6 | async def main(): 7 | client = ollama.AsyncClient() 8 | response = await client.generate('llama3.2', 'Why is the sky blue?') 9 | print(response['response']) 10 | 11 | 12 | if __name__ == '__main__': 13 | try: 14 | asyncio.run(main()) 15 | except KeyboardInterrupt: 16 | print('\nGoodbye!') 17 | -------------------------------------------------------------------------------- /examples/async-structured-outputs.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from pydantic import BaseModel 4 | 5 | from ollama import AsyncClient 6 | 7 | 8 | # Define the schema for the response 9 | class FriendInfo(BaseModel): 10 | name: str 11 | age: int 12 | is_available: bool 13 | 14 | 15 | class FriendList(BaseModel): 16 | friends: list[FriendInfo] 17 | 18 | 19 | async def main(): 20 | client = AsyncClient() 21 | response = await client.chat( 22 | model='llama3.1:8b', 23 | messages=[{'role': 'user', 'content': 'I have two friends. The first is Ollama 22 years old busy saving the world, and the second is Alonso 23 years old and wants to hang out. Return a list of friends in JSON format'}], 24 | format=FriendList.model_json_schema(), # Use Pydantic to generate the schema 25 | options={'temperature': 0}, # Make responses more deterministic 26 | ) 27 | 28 | # Use Pydantic to validate the response 29 | friends_response = FriendList.model_validate_json(response.message.content) 30 | print(friends_response) 31 | 32 | 33 | if __name__ == '__main__': 34 | asyncio.run(main()) 35 | -------------------------------------------------------------------------------- /examples/async-tools.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import ollama 4 | from ollama import ChatResponse 5 | 6 | 7 | def add_two_numbers(a: int, b: int) -> int: 8 | """ 9 | Add two numbers 10 | 11 | Args: 12 | a (int): The first number 13 | b (int): The second number 14 | 15 | Returns: 16 | int: The sum of the two numbers 17 | """ 18 | return a + b 19 | 20 | 21 | def subtract_two_numbers(a: int, b: int) -> int: 22 | """ 23 | Subtract two numbers 24 | """ 25 | return a - b 26 | 27 | 28 | # Tools can still be manually defined and passed into chat 29 | subtract_two_numbers_tool = { 30 | 'type': 'function', 31 | 'function': { 32 | 'name': 'subtract_two_numbers', 33 | 'description': 'Subtract two numbers', 34 | 'parameters': { 35 | 'type': 'object', 36 | 'required': ['a', 'b'], 37 | 'properties': { 38 | 'a': {'type': 'integer', 'description': 'The first number'}, 39 | 'b': {'type': 'integer', 'description': 'The second number'}, 40 | }, 41 | }, 42 | }, 43 | } 44 | 45 | messages = [{'role': 'user', 'content': 'What is three plus one?'}] 46 | print('Prompt:', messages[0]['content']) 47 | 48 | available_functions = { 49 | 'add_two_numbers': add_two_numbers, 50 | 'subtract_two_numbers': subtract_two_numbers, 51 | } 52 | 53 | 54 | async def main(): 55 | client = ollama.AsyncClient() 56 | 57 | response: ChatResponse = await client.chat( 58 | 'llama3.1', 59 | messages=messages, 60 | tools=[add_two_numbers, subtract_two_numbers_tool], 61 | ) 62 | 63 | if response.message.tool_calls: 64 | # There may be multiple tool calls in the response 65 | for tool in response.message.tool_calls: 66 | # Ensure the function is available, and then call it 67 | if function_to_call := available_functions.get(tool.function.name): 68 | print('Calling function:', tool.function.name) 69 | print('Arguments:', tool.function.arguments) 70 | output = function_to_call(**tool.function.arguments) 71 | print('Function output:', output) 72 | else: 73 | print('Function', tool.function.name, 'not found') 74 | 75 | # Only needed to chat with the model using the tool call results 76 | if response.message.tool_calls: 77 | # Add the function response to messages for the model to use 78 | messages.append(response.message) 79 | messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name}) 80 | 81 | # Get final response from model with function outputs 82 | final_response = await client.chat('llama3.1', messages=messages) 83 | print('Final response:', final_response.message.content) 84 | 85 | else: 86 | print('No tool calls returned from model') 87 | 88 | 89 | if __name__ == '__main__': 90 | try: 91 | asyncio.run(main()) 92 | except KeyboardInterrupt: 93 | print('\nGoodbye!') 94 | -------------------------------------------------------------------------------- /examples/chat-stream.py: -------------------------------------------------------------------------------- 1 | from ollama import chat 2 | 3 | messages = [ 4 | { 5 | 'role': 'user', 6 | 'content': 'Why is the sky blue?', 7 | }, 8 | ] 9 | 10 | for part in chat('llama3.2', messages=messages, stream=True): 11 | print(part['message']['content'], end='', flush=True) 12 | 13 | print() 14 | -------------------------------------------------------------------------------- /examples/chat-with-history.py: -------------------------------------------------------------------------------- 1 | from ollama import chat 2 | 3 | messages = [ 4 | { 5 | 'role': 'user', 6 | 'content': 'Why is the sky blue?', 7 | }, 8 | { 9 | 'role': 'assistant', 10 | 'content': "The sky is blue because of the way the Earth's atmosphere scatters sunlight.", 11 | }, 12 | { 13 | 'role': 'user', 14 | 'content': 'What is the weather in Tokyo?', 15 | }, 16 | { 17 | 'role': 'assistant', 18 | 'content': 'The weather in Tokyo is typically warm and humid during the summer months, with temperatures often exceeding 30°C (86°F). The city experiences a rainy season from June to September, with heavy rainfall and occasional typhoons. Winter is mild, with temperatures rarely dropping below freezing. The city is known for its high-tech and vibrant culture, with many popular tourist attractions such as the Tokyo Tower, Senso-ji Temple, and the bustling Shibuya district.', 19 | }, 20 | ] 21 | 22 | while True: 23 | user_input = input('Chat with history: ') 24 | response = chat( 25 | 'llama3.2', 26 | messages=[*messages, {'role': 'user', 'content': user_input}], 27 | ) 28 | 29 | # Add the response to the messages to maintain the history 30 | messages += [ 31 | {'role': 'user', 'content': user_input}, 32 | {'role': 'assistant', 'content': response.message.content}, 33 | ] 34 | print(response.message.content + '\n') 35 | -------------------------------------------------------------------------------- /examples/chat.py: -------------------------------------------------------------------------------- 1 | from ollama import chat 2 | 3 | messages = [ 4 | { 5 | 'role': 'user', 6 | 'content': 'Why is the sky blue?', 7 | }, 8 | ] 9 | 10 | response = chat('llama3.2', messages=messages) 11 | print(response['message']['content']) 12 | -------------------------------------------------------------------------------- /examples/create.py: -------------------------------------------------------------------------------- 1 | from ollama import Client 2 | 3 | client = Client() 4 | response = client.create( 5 | model='my-assistant', 6 | from_='llama3.2', 7 | system='You are mario from Super Mario Bros.', 8 | stream=False, 9 | ) 10 | print(response.status) 11 | -------------------------------------------------------------------------------- /examples/embed.py: -------------------------------------------------------------------------------- 1 | from ollama import embed 2 | 3 | response = embed(model='llama3.2', input='Hello, world!') 4 | print(response['embeddings']) 5 | -------------------------------------------------------------------------------- /examples/fill-in-middle.py: -------------------------------------------------------------------------------- 1 | from ollama import generate 2 | 3 | prompt = '''def remove_non_ascii(s: str) -> str: 4 | """ ''' 5 | 6 | suffix = """ 7 | return result 8 | """ 9 | 10 | response = generate( 11 | model='codellama:7b-code', 12 | prompt=prompt, 13 | suffix=suffix, 14 | options={ 15 | 'num_predict': 128, 16 | 'temperature': 0, 17 | 'top_p': 0.9, 18 | 'stop': [''], 19 | }, 20 | ) 21 | 22 | print(response['response']) 23 | -------------------------------------------------------------------------------- /examples/generate-stream.py: -------------------------------------------------------------------------------- 1 | from ollama import generate 2 | 3 | for part in generate('llama3.2', 'Why is the sky blue?', stream=True): 4 | print(part['response'], end='', flush=True) 5 | -------------------------------------------------------------------------------- /examples/generate.py: -------------------------------------------------------------------------------- 1 | from ollama import generate 2 | 3 | response = generate('llama3.2', 'Why is the sky blue?') 4 | print(response['response']) 5 | -------------------------------------------------------------------------------- /examples/list.py: -------------------------------------------------------------------------------- 1 | from ollama import ListResponse, list 2 | 3 | response: ListResponse = list() 4 | 5 | for model in response.models: 6 | print('Name:', model.model) 7 | print(' Size (MB):', f'{(model.size.real / 1024 / 1024):.2f}') 8 | if model.details: 9 | print(' Format:', model.details.format) 10 | print(' Family:', model.details.family) 11 | print(' Parameter Size:', model.details.parameter_size) 12 | print(' Quantization Level:', model.details.quantization_level) 13 | print('\n') 14 | -------------------------------------------------------------------------------- /examples/multimodal-chat.py: -------------------------------------------------------------------------------- 1 | from ollama import chat 2 | 3 | # from pathlib import Path 4 | 5 | # Pass in the path to the image 6 | path = input('Please enter the path to the image: ') 7 | 8 | # You can also pass in base64 encoded image data 9 | # img = base64.b64encode(Path(path).read_bytes()).decode() 10 | # or the raw bytes 11 | # img = Path(path).read_bytes() 12 | 13 | response = chat( 14 | model='llama3.2-vision', 15 | messages=[ 16 | { 17 | 'role': 'user', 18 | 'content': 'What is in this image? Be concise.', 19 | 'images': [path], 20 | } 21 | ], 22 | ) 23 | 24 | print(response.message.content) 25 | -------------------------------------------------------------------------------- /examples/multimodal-generate.py: -------------------------------------------------------------------------------- 1 | import random 2 | import sys 3 | 4 | import httpx 5 | 6 | from ollama import generate 7 | 8 | latest = httpx.get('https://xkcd.com/info.0.json') 9 | latest.raise_for_status() 10 | 11 | num = int(sys.argv[1]) if len(sys.argv) > 1 else random.randint(1, latest.json().get('num')) 12 | 13 | comic = httpx.get(f'https://xkcd.com/{num}/info.0.json') 14 | comic.raise_for_status() 15 | 16 | print(f'xkcd #{comic.json().get("num")}: {comic.json().get("alt")}') 17 | print(f'link: https://xkcd.com/{num}') 18 | print('---') 19 | 20 | raw = httpx.get(comic.json().get('img')) 21 | raw.raise_for_status() 22 | 23 | for response in generate('llava', 'explain this comic:', images=[raw.content], stream=True): 24 | print(response['response'], end='', flush=True) 25 | 26 | print() 27 | -------------------------------------------------------------------------------- /examples/ps.py: -------------------------------------------------------------------------------- 1 | from ollama import ProcessResponse, chat, ps, pull 2 | 3 | # Ensure at least one model is loaded 4 | response = pull('llama3.2', stream=True) 5 | progress_states = set() 6 | for progress in response: 7 | if progress.get('status') in progress_states: 8 | continue 9 | progress_states.add(progress.get('status')) 10 | print(progress.get('status')) 11 | 12 | print('\n') 13 | 14 | print('Waiting for model to load... \n') 15 | chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) 16 | 17 | 18 | response: ProcessResponse = ps() 19 | for model in response.models: 20 | print('Model: ', model.model) 21 | print(' Digest: ', model.digest) 22 | print(' Expires at: ', model.expires_at) 23 | print(' Size: ', model.size) 24 | print(' Size vram: ', model.size_vram) 25 | print(' Details: ', model.details) 26 | print('\n') 27 | -------------------------------------------------------------------------------- /examples/pull.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | 3 | from ollama import pull 4 | 5 | current_digest, bars = '', {} 6 | for progress in pull('llama3.2', stream=True): 7 | digest = progress.get('digest', '') 8 | if digest != current_digest and current_digest in bars: 9 | bars[current_digest].close() 10 | 11 | if not digest: 12 | print(progress.get('status')) 13 | continue 14 | 15 | if digest not in bars and (total := progress.get('total')): 16 | bars[digest] = tqdm(total=total, desc=f'pulling {digest[7:19]}', unit='B', unit_scale=True) 17 | 18 | if completed := progress.get('completed'): 19 | bars[digest].update(completed - bars[digest].n) 20 | 21 | current_digest = digest 22 | -------------------------------------------------------------------------------- /examples/show.py: -------------------------------------------------------------------------------- 1 | from ollama import ShowResponse, show 2 | 3 | response: ShowResponse = show('gemma3') 4 | print('Model Information:') 5 | print(f'Modified at: {response.modified_at}') 6 | print(f'Template: {response.template}') 7 | print(f'Modelfile: {response.modelfile}') 8 | print(f'License: {response.license}') 9 | print(f'Details: {response.details}') 10 | print(f'Model Info: {response.modelinfo}') 11 | print(f'Parameters: {response.parameters}') 12 | print(f'Capabilities: {response.capabilities}') 13 | -------------------------------------------------------------------------------- /examples/structured-outputs-image.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Literal 3 | 4 | from pydantic import BaseModel 5 | 6 | from ollama import chat 7 | 8 | 9 | # Define the schema for image objects 10 | class Object(BaseModel): 11 | name: str 12 | confidence: float 13 | attributes: str 14 | 15 | 16 | class ImageDescription(BaseModel): 17 | summary: str 18 | objects: list[Object] 19 | scene: str 20 | colors: list[str] 21 | time_of_day: Literal['Morning', 'Afternoon', 'Evening', 'Night'] 22 | setting: Literal['Indoor', 'Outdoor', 'Unknown'] 23 | text_content: str | None = None 24 | 25 | 26 | # Get path from user input 27 | path = input('Enter the path to your image: ') 28 | path = Path(path) 29 | 30 | # Verify the file exists 31 | if not path.exists(): 32 | raise FileNotFoundError(f'Image not found at: {path}') 33 | 34 | # Set up chat as usual 35 | response = chat( 36 | model='llama3.2-vision', 37 | format=ImageDescription.model_json_schema(), # Pass in the schema for the response 38 | messages=[ 39 | { 40 | 'role': 'user', 41 | 'content': 'Analyze this image and return a detailed JSON description including objects, scene, colors and any text detected. If you cannot determine certain details, leave those fields empty.', 42 | 'images': [path], 43 | }, 44 | ], 45 | options={'temperature': 0}, # Set temperature to 0 for more deterministic output 46 | ) 47 | 48 | 49 | # Convert received content to the schema 50 | image_analysis = ImageDescription.model_validate_json(response.message.content) 51 | print(image_analysis) 52 | -------------------------------------------------------------------------------- /examples/structured-outputs.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from ollama import chat 4 | 5 | 6 | # Define the schema for the response 7 | class FriendInfo(BaseModel): 8 | name: str 9 | age: int 10 | is_available: bool 11 | 12 | 13 | class FriendList(BaseModel): 14 | friends: list[FriendInfo] 15 | 16 | 17 | # schema = {'type': 'object', 'properties': {'friends': {'type': 'array', 'items': {'type': 'object', 'properties': {'name': {'type': 'string'}, 'age': {'type': 'integer'}, 'is_available': {'type': 'boolean'}}, 'required': ['name', 'age', 'is_available']}}}, 'required': ['friends']} 18 | response = chat( 19 | model='llama3.1:8b', 20 | messages=[{'role': 'user', 'content': 'I have two friends. The first is Ollama 22 years old busy saving the world, and the second is Alonso 23 years old and wants to hang out. Return a list of friends in JSON format'}], 21 | format=FriendList.model_json_schema(), # Use Pydantic to generate the schema or format=schema 22 | options={'temperature': 0}, # Make responses more deterministic 23 | ) 24 | 25 | # Use Pydantic to validate the response 26 | friends_response = FriendList.model_validate_json(response.message.content) 27 | print(friends_response) 28 | -------------------------------------------------------------------------------- /examples/thinking-generate.py: -------------------------------------------------------------------------------- 1 | from ollama import generate 2 | 3 | response = generate('deepseek-r1', 'why is the sky blue', think=True) 4 | 5 | print('Thinking:\n========\n\n' + response.thinking) 6 | print('\nResponse:\n========\n\n' + response.response) 7 | -------------------------------------------------------------------------------- /examples/thinking.py: -------------------------------------------------------------------------------- 1 | from ollama import chat 2 | 3 | messages = [ 4 | { 5 | 'role': 'user', 6 | 'content': 'What is 10 + 23?', 7 | }, 8 | ] 9 | 10 | response = chat('deepseek-r1', messages=messages, think=True) 11 | 12 | print('Thinking:\n========\n\n' + response.message.thinking) 13 | print('\nResponse:\n========\n\n' + response.message.content) 14 | -------------------------------------------------------------------------------- /examples/tools.py: -------------------------------------------------------------------------------- 1 | from ollama import ChatResponse, chat 2 | 3 | 4 | def add_two_numbers(a: int, b: int) -> int: 5 | """ 6 | Add two numbers 7 | 8 | Args: 9 | a (int): The first number 10 | b (int): The second number 11 | 12 | Returns: 13 | int: The sum of the two numbers 14 | """ 15 | 16 | # The cast is necessary as returned tool call arguments don't always conform exactly to schema 17 | # E.g. this would prevent "what is 30 + 12" to produce '3012' instead of 42 18 | return int(a) + int(b) 19 | 20 | 21 | def subtract_two_numbers(a: int, b: int) -> int: 22 | """ 23 | Subtract two numbers 24 | """ 25 | 26 | # The cast is necessary as returned tool call arguments don't always conform exactly to schema 27 | return int(a) - int(b) 28 | 29 | 30 | # Tools can still be manually defined and passed into chat 31 | subtract_two_numbers_tool = { 32 | 'type': 'function', 33 | 'function': { 34 | 'name': 'subtract_two_numbers', 35 | 'description': 'Subtract two numbers', 36 | 'parameters': { 37 | 'type': 'object', 38 | 'required': ['a', 'b'], 39 | 'properties': { 40 | 'a': {'type': 'integer', 'description': 'The first number'}, 41 | 'b': {'type': 'integer', 'description': 'The second number'}, 42 | }, 43 | }, 44 | }, 45 | } 46 | 47 | messages = [{'role': 'user', 'content': 'What is three plus one?'}] 48 | print('Prompt:', messages[0]['content']) 49 | 50 | available_functions = { 51 | 'add_two_numbers': add_two_numbers, 52 | 'subtract_two_numbers': subtract_two_numbers, 53 | } 54 | 55 | response: ChatResponse = chat( 56 | 'llama3.1', 57 | messages=messages, 58 | tools=[add_two_numbers, subtract_two_numbers_tool], 59 | ) 60 | 61 | if response.message.tool_calls: 62 | # There may be multiple tool calls in the response 63 | for tool in response.message.tool_calls: 64 | # Ensure the function is available, and then call it 65 | if function_to_call := available_functions.get(tool.function.name): 66 | print('Calling function:', tool.function.name) 67 | print('Arguments:', tool.function.arguments) 68 | output = function_to_call(**tool.function.arguments) 69 | print('Function output:', output) 70 | else: 71 | print('Function', tool.function.name, 'not found') 72 | 73 | # Only needed to chat with the model using the tool call results 74 | if response.message.tool_calls: 75 | # Add the function response to messages for the model to use 76 | messages.append(response.message) 77 | messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name}) 78 | 79 | # Get final response from model with function outputs 80 | final_response = chat('llama3.1', messages=messages) 81 | print('Final response:', final_response.message.content) 82 | 83 | else: 84 | print('No tool calls returned from model') 85 | -------------------------------------------------------------------------------- /ollama/__init__.py: -------------------------------------------------------------------------------- 1 | from ollama._client import AsyncClient, Client 2 | from ollama._types import ( 3 | ChatResponse, 4 | EmbeddingsResponse, 5 | EmbedResponse, 6 | GenerateResponse, 7 | Image, 8 | ListResponse, 9 | Message, 10 | Options, 11 | ProcessResponse, 12 | ProgressResponse, 13 | RequestError, 14 | ResponseError, 15 | ShowResponse, 16 | StatusResponse, 17 | Tool, 18 | ) 19 | 20 | __all__ = [ 21 | 'AsyncClient', 22 | 'ChatResponse', 23 | 'Client', 24 | 'EmbedResponse', 25 | 'EmbeddingsResponse', 26 | 'GenerateResponse', 27 | 'Image', 28 | 'ListResponse', 29 | 'Message', 30 | 'Options', 31 | 'ProcessResponse', 32 | 'ProgressResponse', 33 | 'RequestError', 34 | 'ResponseError', 35 | 'ShowResponse', 36 | 'StatusResponse', 37 | 'Tool', 38 | ] 39 | 40 | _client = Client() 41 | 42 | generate = _client.generate 43 | chat = _client.chat 44 | embed = _client.embed 45 | embeddings = _client.embeddings 46 | pull = _client.pull 47 | push = _client.push 48 | create = _client.create 49 | delete = _client.delete 50 | list = _client.list 51 | copy = _client.copy 52 | show = _client.show 53 | ps = _client.ps 54 | -------------------------------------------------------------------------------- /ollama/_client.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | import json 3 | import os 4 | import platform 5 | import sys 6 | import urllib.parse 7 | from hashlib import sha256 8 | from os import PathLike 9 | from pathlib import Path 10 | from typing import ( 11 | Any, 12 | Callable, 13 | Dict, 14 | List, 15 | Literal, 16 | Mapping, 17 | Optional, 18 | Sequence, 19 | Type, 20 | TypeVar, 21 | Union, 22 | overload, 23 | ) 24 | 25 | import anyio 26 | from pydantic.json_schema import JsonSchemaValue 27 | 28 | from ollama._utils import convert_function_to_tool 29 | 30 | if sys.version_info < (3, 9): 31 | from typing import AsyncIterator, Iterator 32 | else: 33 | from collections.abc import AsyncIterator, Iterator 34 | 35 | from importlib import metadata 36 | 37 | try: 38 | __version__ = metadata.version('ollama') 39 | except metadata.PackageNotFoundError: 40 | __version__ = '0.0.0' 41 | 42 | import httpx 43 | 44 | from ollama._types import ( 45 | ChatRequest, 46 | ChatResponse, 47 | CopyRequest, 48 | CreateRequest, 49 | DeleteRequest, 50 | EmbeddingsRequest, 51 | EmbeddingsResponse, 52 | EmbedRequest, 53 | EmbedResponse, 54 | GenerateRequest, 55 | GenerateResponse, 56 | Image, 57 | ListResponse, 58 | Message, 59 | Options, 60 | ProcessResponse, 61 | ProgressResponse, 62 | PullRequest, 63 | PushRequest, 64 | ResponseError, 65 | ShowRequest, 66 | ShowResponse, 67 | StatusResponse, 68 | Tool, 69 | ) 70 | 71 | T = TypeVar('T') 72 | 73 | 74 | class BaseClient: 75 | def __init__( 76 | self, 77 | client, 78 | host: Optional[str] = None, 79 | *, 80 | follow_redirects: bool = True, 81 | timeout: Any = None, 82 | headers: Optional[Mapping[str, str]] = None, 83 | **kwargs, 84 | ) -> None: 85 | """ 86 | Creates a httpx client. Default parameters are the same as those defined in httpx 87 | except for the following: 88 | - `follow_redirects`: True 89 | - `timeout`: None 90 | `kwargs` are passed to the httpx client. 91 | """ 92 | 93 | self._client = client( 94 | base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), 95 | follow_redirects=follow_redirects, 96 | timeout=timeout, 97 | # Lowercase all headers to ensure override 98 | headers={ 99 | k.lower(): v 100 | for k, v in { 101 | **(headers or {}), 102 | 'Content-Type': 'application/json', 103 | 'Accept': 'application/json', 104 | 'User-Agent': f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}', 105 | }.items() 106 | }, 107 | **kwargs, 108 | ) 109 | 110 | 111 | CONNECTION_ERROR_MESSAGE = 'Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download' 112 | 113 | 114 | class Client(BaseClient): 115 | def __init__(self, host: Optional[str] = None, **kwargs) -> None: 116 | super().__init__(httpx.Client, host, **kwargs) 117 | 118 | def _request_raw(self, *args, **kwargs): 119 | try: 120 | r = self._client.request(*args, **kwargs) 121 | r.raise_for_status() 122 | return r 123 | except httpx.HTTPStatusError as e: 124 | raise ResponseError(e.response.text, e.response.status_code) from None 125 | except httpx.ConnectError: 126 | raise ConnectionError(CONNECTION_ERROR_MESSAGE) from None 127 | 128 | @overload 129 | def _request( 130 | self, 131 | cls: Type[T], 132 | *args, 133 | stream: Literal[False] = False, 134 | **kwargs, 135 | ) -> T: ... 136 | 137 | @overload 138 | def _request( 139 | self, 140 | cls: Type[T], 141 | *args, 142 | stream: Literal[True] = True, 143 | **kwargs, 144 | ) -> Iterator[T]: ... 145 | 146 | @overload 147 | def _request( 148 | self, 149 | cls: Type[T], 150 | *args, 151 | stream: bool = False, 152 | **kwargs, 153 | ) -> Union[T, Iterator[T]]: ... 154 | 155 | def _request( 156 | self, 157 | cls: Type[T], 158 | *args, 159 | stream: bool = False, 160 | **kwargs, 161 | ) -> Union[T, Iterator[T]]: 162 | if stream: 163 | 164 | def inner(): 165 | with self._client.stream(*args, **kwargs) as r: 166 | try: 167 | r.raise_for_status() 168 | except httpx.HTTPStatusError as e: 169 | e.response.read() 170 | raise ResponseError(e.response.text, e.response.status_code) from None 171 | 172 | for line in r.iter_lines(): 173 | part = json.loads(line) 174 | if err := part.get('error'): 175 | raise ResponseError(err) 176 | yield cls(**part) 177 | 178 | return inner() 179 | 180 | return cls(**self._request_raw(*args, **kwargs).json()) 181 | 182 | @overload 183 | def generate( 184 | self, 185 | model: str = '', 186 | prompt: str = '', 187 | suffix: str = '', 188 | *, 189 | system: str = '', 190 | template: str = '', 191 | context: Optional[Sequence[int]] = None, 192 | stream: Literal[False] = False, 193 | think: Optional[bool] = None, 194 | raw: bool = False, 195 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 196 | images: Optional[Sequence[Union[str, bytes, Image]]] = None, 197 | options: Optional[Union[Mapping[str, Any], Options]] = None, 198 | keep_alive: Optional[Union[float, str]] = None, 199 | ) -> GenerateResponse: ... 200 | 201 | @overload 202 | def generate( 203 | self, 204 | model: str = '', 205 | prompt: str = '', 206 | suffix: str = '', 207 | *, 208 | system: str = '', 209 | template: str = '', 210 | context: Optional[Sequence[int]] = None, 211 | stream: Literal[True] = True, 212 | think: Optional[bool] = None, 213 | raw: bool = False, 214 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 215 | images: Optional[Sequence[Union[str, bytes, Image]]] = None, 216 | options: Optional[Union[Mapping[str, Any], Options]] = None, 217 | keep_alive: Optional[Union[float, str]] = None, 218 | ) -> Iterator[GenerateResponse]: ... 219 | 220 | def generate( 221 | self, 222 | model: str = '', 223 | prompt: Optional[str] = None, 224 | suffix: Optional[str] = None, 225 | *, 226 | system: Optional[str] = None, 227 | template: Optional[str] = None, 228 | context: Optional[Sequence[int]] = None, 229 | stream: bool = False, 230 | think: Optional[bool] = None, 231 | raw: Optional[bool] = None, 232 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 233 | images: Optional[Sequence[Union[str, bytes, Image]]] = None, 234 | options: Optional[Union[Mapping[str, Any], Options]] = None, 235 | keep_alive: Optional[Union[float, str]] = None, 236 | ) -> Union[GenerateResponse, Iterator[GenerateResponse]]: 237 | """ 238 | Create a response using the requested model. 239 | 240 | Raises `RequestError` if a model is not provided. 241 | 242 | Raises `ResponseError` if the request could not be fulfilled. 243 | 244 | Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. 245 | """ 246 | 247 | return self._request( 248 | GenerateResponse, 249 | 'POST', 250 | '/api/generate', 251 | json=GenerateRequest( 252 | model=model, 253 | prompt=prompt, 254 | suffix=suffix, 255 | system=system, 256 | template=template, 257 | context=context, 258 | stream=stream, 259 | think=think, 260 | raw=raw, 261 | format=format, 262 | images=list(_copy_images(images)) if images else None, 263 | options=options, 264 | keep_alive=keep_alive, 265 | ).model_dump(exclude_none=True), 266 | stream=stream, 267 | ) 268 | 269 | @overload 270 | def chat( 271 | self, 272 | model: str = '', 273 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 274 | *, 275 | tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, 276 | stream: Literal[False] = False, 277 | think: Optional[bool] = None, 278 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 279 | options: Optional[Union[Mapping[str, Any], Options]] = None, 280 | keep_alive: Optional[Union[float, str]] = None, 281 | ) -> ChatResponse: ... 282 | 283 | @overload 284 | def chat( 285 | self, 286 | model: str = '', 287 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 288 | *, 289 | tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, 290 | stream: Literal[True] = True, 291 | think: Optional[bool] = None, 292 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 293 | options: Optional[Union[Mapping[str, Any], Options]] = None, 294 | keep_alive: Optional[Union[float, str]] = None, 295 | ) -> Iterator[ChatResponse]: ... 296 | 297 | def chat( 298 | self, 299 | model: str = '', 300 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 301 | *, 302 | tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, 303 | stream: bool = False, 304 | think: Optional[bool] = None, 305 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 306 | options: Optional[Union[Mapping[str, Any], Options]] = None, 307 | keep_alive: Optional[Union[float, str]] = None, 308 | ) -> Union[ChatResponse, Iterator[ChatResponse]]: 309 | """ 310 | Create a chat response using the requested model. 311 | 312 | Args: 313 | tools: 314 | A JSON schema as a dict, an Ollama Tool or a Python Function. 315 | Python functions need to follow Google style docstrings to be converted to an Ollama Tool. 316 | For more information, see: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings 317 | stream: Whether to stream the response. 318 | format: The format of the response. 319 | 320 | Example: 321 | def add_two_numbers(a: int, b: int) -> int: 322 | ''' 323 | Add two numbers together. 324 | 325 | Args: 326 | a: First number to add 327 | b: Second number to add 328 | 329 | Returns: 330 | int: The sum of a and b 331 | ''' 332 | return a + b 333 | 334 | client.chat(model='llama3.2', tools=[add_two_numbers], messages=[...]) 335 | 336 | Raises `RequestError` if a model is not provided. 337 | 338 | Raises `ResponseError` if the request could not be fulfilled. 339 | 340 | Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. 341 | """ 342 | return self._request( 343 | ChatResponse, 344 | 'POST', 345 | '/api/chat', 346 | json=ChatRequest( 347 | model=model, 348 | messages=list(_copy_messages(messages)), 349 | tools=list(_copy_tools(tools)), 350 | stream=stream, 351 | think=think, 352 | format=format, 353 | options=options, 354 | keep_alive=keep_alive, 355 | ).model_dump(exclude_none=True), 356 | stream=stream, 357 | ) 358 | 359 | def embed( 360 | self, 361 | model: str = '', 362 | input: Union[str, Sequence[str]] = '', 363 | truncate: Optional[bool] = None, 364 | options: Optional[Union[Mapping[str, Any], Options]] = None, 365 | keep_alive: Optional[Union[float, str]] = None, 366 | ) -> EmbedResponse: 367 | return self._request( 368 | EmbedResponse, 369 | 'POST', 370 | '/api/embed', 371 | json=EmbedRequest( 372 | model=model, 373 | input=input, 374 | truncate=truncate, 375 | options=options, 376 | keep_alive=keep_alive, 377 | ).model_dump(exclude_none=True), 378 | ) 379 | 380 | def embeddings( 381 | self, 382 | model: str = '', 383 | prompt: Optional[str] = None, 384 | options: Optional[Union[Mapping[str, Any], Options]] = None, 385 | keep_alive: Optional[Union[float, str]] = None, 386 | ) -> EmbeddingsResponse: 387 | """ 388 | Deprecated in favor of `embed`. 389 | """ 390 | return self._request( 391 | EmbeddingsResponse, 392 | 'POST', 393 | '/api/embeddings', 394 | json=EmbeddingsRequest( 395 | model=model, 396 | prompt=prompt, 397 | options=options, 398 | keep_alive=keep_alive, 399 | ).model_dump(exclude_none=True), 400 | ) 401 | 402 | @overload 403 | def pull( 404 | self, 405 | model: str, 406 | *, 407 | insecure: bool = False, 408 | stream: Literal[False] = False, 409 | ) -> ProgressResponse: ... 410 | 411 | @overload 412 | def pull( 413 | self, 414 | model: str, 415 | *, 416 | insecure: bool = False, 417 | stream: Literal[True] = True, 418 | ) -> Iterator[ProgressResponse]: ... 419 | 420 | def pull( 421 | self, 422 | model: str, 423 | *, 424 | insecure: bool = False, 425 | stream: bool = False, 426 | ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: 427 | """ 428 | Raises `ResponseError` if the request could not be fulfilled. 429 | 430 | Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. 431 | """ 432 | return self._request( 433 | ProgressResponse, 434 | 'POST', 435 | '/api/pull', 436 | json=PullRequest( 437 | model=model, 438 | insecure=insecure, 439 | stream=stream, 440 | ).model_dump(exclude_none=True), 441 | stream=stream, 442 | ) 443 | 444 | @overload 445 | def push( 446 | self, 447 | model: str, 448 | *, 449 | insecure: bool = False, 450 | stream: Literal[False] = False, 451 | ) -> ProgressResponse: ... 452 | 453 | @overload 454 | def push( 455 | self, 456 | model: str, 457 | *, 458 | insecure: bool = False, 459 | stream: Literal[True] = True, 460 | ) -> Iterator[ProgressResponse]: ... 461 | 462 | def push( 463 | self, 464 | model: str, 465 | *, 466 | insecure: bool = False, 467 | stream: bool = False, 468 | ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: 469 | """ 470 | Raises `ResponseError` if the request could not be fulfilled. 471 | 472 | Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. 473 | """ 474 | return self._request( 475 | ProgressResponse, 476 | 'POST', 477 | '/api/push', 478 | json=PushRequest( 479 | model=model, 480 | insecure=insecure, 481 | stream=stream, 482 | ).model_dump(exclude_none=True), 483 | stream=stream, 484 | ) 485 | 486 | @overload 487 | def create( 488 | self, 489 | model: str, 490 | quantize: Optional[str] = None, 491 | from_: Optional[str] = None, 492 | files: Optional[Dict[str, str]] = None, 493 | adapters: Optional[Dict[str, str]] = None, 494 | template: Optional[str] = None, 495 | license: Optional[Union[str, List[str]]] = None, 496 | system: Optional[str] = None, 497 | parameters: Optional[Union[Mapping[str, Any], Options]] = None, 498 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 499 | *, 500 | stream: Literal[False] = False, 501 | ) -> ProgressResponse: ... 502 | 503 | @overload 504 | def create( 505 | self, 506 | model: str, 507 | quantize: Optional[str] = None, 508 | from_: Optional[str] = None, 509 | files: Optional[Dict[str, str]] = None, 510 | adapters: Optional[Dict[str, str]] = None, 511 | template: Optional[str] = None, 512 | license: Optional[Union[str, List[str]]] = None, 513 | system: Optional[str] = None, 514 | parameters: Optional[Union[Mapping[str, Any], Options]] = None, 515 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 516 | *, 517 | stream: Literal[True] = True, 518 | ) -> Iterator[ProgressResponse]: ... 519 | 520 | def create( 521 | self, 522 | model: str, 523 | quantize: Optional[str] = None, 524 | from_: Optional[str] = None, 525 | files: Optional[Dict[str, str]] = None, 526 | adapters: Optional[Dict[str, str]] = None, 527 | template: Optional[str] = None, 528 | license: Optional[Union[str, List[str]]] = None, 529 | system: Optional[str] = None, 530 | parameters: Optional[Union[Mapping[str, Any], Options]] = None, 531 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 532 | *, 533 | stream: bool = False, 534 | ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: 535 | """ 536 | Raises `ResponseError` if the request could not be fulfilled. 537 | 538 | Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. 539 | """ 540 | return self._request( 541 | ProgressResponse, 542 | 'POST', 543 | '/api/create', 544 | json=CreateRequest( 545 | model=model, 546 | stream=stream, 547 | quantize=quantize, 548 | from_=from_, 549 | files=files, 550 | adapters=adapters, 551 | license=license, 552 | template=template, 553 | system=system, 554 | parameters=parameters, 555 | messages=messages, 556 | ).model_dump(exclude_none=True), 557 | stream=stream, 558 | ) 559 | 560 | def create_blob(self, path: Union[str, Path]) -> str: 561 | sha256sum = sha256() 562 | with open(path, 'rb') as r: 563 | while True: 564 | chunk = r.read(32 * 1024) 565 | if not chunk: 566 | break 567 | sha256sum.update(chunk) 568 | 569 | digest = f'sha256:{sha256sum.hexdigest()}' 570 | 571 | with open(path, 'rb') as r: 572 | self._request_raw('POST', f'/api/blobs/{digest}', content=r) 573 | 574 | return digest 575 | 576 | def list(self) -> ListResponse: 577 | return self._request( 578 | ListResponse, 579 | 'GET', 580 | '/api/tags', 581 | ) 582 | 583 | def delete(self, model: str) -> StatusResponse: 584 | r = self._request_raw( 585 | 'DELETE', 586 | '/api/delete', 587 | json=DeleteRequest( 588 | model=model, 589 | ).model_dump(exclude_none=True), 590 | ) 591 | return StatusResponse( 592 | status='success' if r.status_code == 200 else 'error', 593 | ) 594 | 595 | def copy(self, source: str, destination: str) -> StatusResponse: 596 | r = self._request_raw( 597 | 'POST', 598 | '/api/copy', 599 | json=CopyRequest( 600 | source=source, 601 | destination=destination, 602 | ).model_dump(exclude_none=True), 603 | ) 604 | return StatusResponse( 605 | status='success' if r.status_code == 200 else 'error', 606 | ) 607 | 608 | def show(self, model: str) -> ShowResponse: 609 | return self._request( 610 | ShowResponse, 611 | 'POST', 612 | '/api/show', 613 | json=ShowRequest( 614 | model=model, 615 | ).model_dump(exclude_none=True), 616 | ) 617 | 618 | def ps(self) -> ProcessResponse: 619 | return self._request( 620 | ProcessResponse, 621 | 'GET', 622 | '/api/ps', 623 | ) 624 | 625 | 626 | class AsyncClient(BaseClient): 627 | def __init__(self, host: Optional[str] = None, **kwargs) -> None: 628 | super().__init__(httpx.AsyncClient, host, **kwargs) 629 | 630 | async def _request_raw(self, *args, **kwargs): 631 | try: 632 | r = await self._client.request(*args, **kwargs) 633 | r.raise_for_status() 634 | return r 635 | except httpx.HTTPStatusError as e: 636 | raise ResponseError(e.response.text, e.response.status_code) from None 637 | except httpx.ConnectError: 638 | raise ConnectionError(CONNECTION_ERROR_MESSAGE) from None 639 | 640 | @overload 641 | async def _request( 642 | self, 643 | cls: Type[T], 644 | *args, 645 | stream: Literal[False] = False, 646 | **kwargs, 647 | ) -> T: ... 648 | 649 | @overload 650 | async def _request( 651 | self, 652 | cls: Type[T], 653 | *args, 654 | stream: Literal[True] = True, 655 | **kwargs, 656 | ) -> AsyncIterator[T]: ... 657 | 658 | @overload 659 | async def _request( 660 | self, 661 | cls: Type[T], 662 | *args, 663 | stream: bool = False, 664 | **kwargs, 665 | ) -> Union[T, AsyncIterator[T]]: ... 666 | 667 | async def _request( 668 | self, 669 | cls: Type[T], 670 | *args, 671 | stream: bool = False, 672 | **kwargs, 673 | ) -> Union[T, AsyncIterator[T]]: 674 | if stream: 675 | 676 | async def inner(): 677 | async with self._client.stream(*args, **kwargs) as r: 678 | try: 679 | r.raise_for_status() 680 | except httpx.HTTPStatusError as e: 681 | await e.response.aread() 682 | raise ResponseError(e.response.text, e.response.status_code) from None 683 | 684 | async for line in r.aiter_lines(): 685 | part = json.loads(line) 686 | if err := part.get('error'): 687 | raise ResponseError(err) 688 | yield cls(**part) 689 | 690 | return inner() 691 | 692 | return cls(**(await self._request_raw(*args, **kwargs)).json()) 693 | 694 | @overload 695 | async def generate( 696 | self, 697 | model: str = '', 698 | prompt: str = '', 699 | suffix: str = '', 700 | *, 701 | system: str = '', 702 | template: str = '', 703 | context: Optional[Sequence[int]] = None, 704 | stream: Literal[False] = False, 705 | think: Optional[bool] = None, 706 | raw: bool = False, 707 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 708 | images: Optional[Sequence[Union[str, bytes, Image]]] = None, 709 | options: Optional[Union[Mapping[str, Any], Options]] = None, 710 | keep_alive: Optional[Union[float, str]] = None, 711 | ) -> GenerateResponse: ... 712 | 713 | @overload 714 | async def generate( 715 | self, 716 | model: str = '', 717 | prompt: str = '', 718 | suffix: str = '', 719 | *, 720 | system: str = '', 721 | template: str = '', 722 | context: Optional[Sequence[int]] = None, 723 | stream: Literal[True] = True, 724 | think: Optional[bool] = None, 725 | raw: bool = False, 726 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 727 | images: Optional[Sequence[Union[str, bytes, Image]]] = None, 728 | options: Optional[Union[Mapping[str, Any], Options]] = None, 729 | keep_alive: Optional[Union[float, str]] = None, 730 | ) -> AsyncIterator[GenerateResponse]: ... 731 | 732 | async def generate( 733 | self, 734 | model: str = '', 735 | prompt: Optional[str] = None, 736 | suffix: Optional[str] = None, 737 | *, 738 | system: Optional[str] = None, 739 | template: Optional[str] = None, 740 | context: Optional[Sequence[int]] = None, 741 | stream: bool = False, 742 | think: Optional[bool] = None, 743 | raw: Optional[bool] = None, 744 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 745 | images: Optional[Sequence[Union[str, bytes, Image]]] = None, 746 | options: Optional[Union[Mapping[str, Any], Options]] = None, 747 | keep_alive: Optional[Union[float, str]] = None, 748 | ) -> Union[GenerateResponse, AsyncIterator[GenerateResponse]]: 749 | """ 750 | Create a response using the requested model. 751 | 752 | Raises `RequestError` if a model is not provided. 753 | 754 | Raises `ResponseError` if the request could not be fulfilled. 755 | 756 | Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. 757 | """ 758 | return await self._request( 759 | GenerateResponse, 760 | 'POST', 761 | '/api/generate', 762 | json=GenerateRequest( 763 | model=model, 764 | prompt=prompt, 765 | suffix=suffix, 766 | system=system, 767 | template=template, 768 | context=context, 769 | stream=stream, 770 | think=think, 771 | raw=raw, 772 | format=format, 773 | images=list(_copy_images(images)) if images else None, 774 | options=options, 775 | keep_alive=keep_alive, 776 | ).model_dump(exclude_none=True), 777 | stream=stream, 778 | ) 779 | 780 | @overload 781 | async def chat( 782 | self, 783 | model: str = '', 784 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 785 | *, 786 | tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, 787 | stream: Literal[False] = False, 788 | think: Optional[bool] = None, 789 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 790 | options: Optional[Union[Mapping[str, Any], Options]] = None, 791 | keep_alive: Optional[Union[float, str]] = None, 792 | ) -> ChatResponse: ... 793 | 794 | @overload 795 | async def chat( 796 | self, 797 | model: str = '', 798 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 799 | *, 800 | tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, 801 | stream: Literal[True] = True, 802 | think: Optional[bool] = None, 803 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 804 | options: Optional[Union[Mapping[str, Any], Options]] = None, 805 | keep_alive: Optional[Union[float, str]] = None, 806 | ) -> AsyncIterator[ChatResponse]: ... 807 | 808 | async def chat( 809 | self, 810 | model: str = '', 811 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 812 | *, 813 | tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, 814 | stream: bool = False, 815 | think: Optional[bool] = None, 816 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None, 817 | options: Optional[Union[Mapping[str, Any], Options]] = None, 818 | keep_alive: Optional[Union[float, str]] = None, 819 | ) -> Union[ChatResponse, AsyncIterator[ChatResponse]]: 820 | """ 821 | Create a chat response using the requested model. 822 | 823 | Args: 824 | tools: 825 | A JSON schema as a dict, an Ollama Tool or a Python Function. 826 | Python functions need to follow Google style docstrings to be converted to an Ollama Tool. 827 | For more information, see: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings 828 | stream: Whether to stream the response. 829 | format: The format of the response. 830 | 831 | Example: 832 | def add_two_numbers(a: int, b: int) -> int: 833 | ''' 834 | Add two numbers together. 835 | 836 | Args: 837 | a: First number to add 838 | b: Second number to add 839 | 840 | Returns: 841 | int: The sum of a and b 842 | ''' 843 | return a + b 844 | 845 | await client.chat(model='llama3.2', tools=[add_two_numbers], messages=[...]) 846 | 847 | Raises `RequestError` if a model is not provided. 848 | 849 | Raises `ResponseError` if the request could not be fulfilled. 850 | 851 | Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. 852 | """ 853 | 854 | return await self._request( 855 | ChatResponse, 856 | 'POST', 857 | '/api/chat', 858 | json=ChatRequest( 859 | model=model, 860 | messages=list(_copy_messages(messages)), 861 | tools=list(_copy_tools(tools)), 862 | stream=stream, 863 | think=think, 864 | format=format, 865 | options=options, 866 | keep_alive=keep_alive, 867 | ).model_dump(exclude_none=True), 868 | stream=stream, 869 | ) 870 | 871 | async def embed( 872 | self, 873 | model: str = '', 874 | input: Union[str, Sequence[str]] = '', 875 | truncate: Optional[bool] = None, 876 | options: Optional[Union[Mapping[str, Any], Options]] = None, 877 | keep_alive: Optional[Union[float, str]] = None, 878 | ) -> EmbedResponse: 879 | return await self._request( 880 | EmbedResponse, 881 | 'POST', 882 | '/api/embed', 883 | json=EmbedRequest( 884 | model=model, 885 | input=input, 886 | truncate=truncate, 887 | options=options, 888 | keep_alive=keep_alive, 889 | ).model_dump(exclude_none=True), 890 | ) 891 | 892 | async def embeddings( 893 | self, 894 | model: str = '', 895 | prompt: Optional[str] = None, 896 | options: Optional[Union[Mapping[str, Any], Options]] = None, 897 | keep_alive: Optional[Union[float, str]] = None, 898 | ) -> EmbeddingsResponse: 899 | """ 900 | Deprecated in favor of `embed`. 901 | """ 902 | return await self._request( 903 | EmbeddingsResponse, 904 | 'POST', 905 | '/api/embeddings', 906 | json=EmbeddingsRequest( 907 | model=model, 908 | prompt=prompt, 909 | options=options, 910 | keep_alive=keep_alive, 911 | ).model_dump(exclude_none=True), 912 | ) 913 | 914 | @overload 915 | async def pull( 916 | self, 917 | model: str, 918 | *, 919 | insecure: bool = False, 920 | stream: Literal[False] = False, 921 | ) -> ProgressResponse: ... 922 | 923 | @overload 924 | async def pull( 925 | self, 926 | model: str, 927 | *, 928 | insecure: bool = False, 929 | stream: Literal[True] = True, 930 | ) -> AsyncIterator[ProgressResponse]: ... 931 | 932 | async def pull( 933 | self, 934 | model: str, 935 | *, 936 | insecure: bool = False, 937 | stream: bool = False, 938 | ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: 939 | """ 940 | Raises `ResponseError` if the request could not be fulfilled. 941 | 942 | Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. 943 | """ 944 | return await self._request( 945 | ProgressResponse, 946 | 'POST', 947 | '/api/pull', 948 | json=PullRequest( 949 | model=model, 950 | insecure=insecure, 951 | stream=stream, 952 | ).model_dump(exclude_none=True), 953 | stream=stream, 954 | ) 955 | 956 | @overload 957 | async def push( 958 | self, 959 | model: str, 960 | *, 961 | insecure: bool = False, 962 | stream: Literal[False] = False, 963 | ) -> ProgressResponse: ... 964 | 965 | @overload 966 | async def push( 967 | self, 968 | model: str, 969 | *, 970 | insecure: bool = False, 971 | stream: Literal[True] = True, 972 | ) -> AsyncIterator[ProgressResponse]: ... 973 | 974 | async def push( 975 | self, 976 | model: str, 977 | *, 978 | insecure: bool = False, 979 | stream: bool = False, 980 | ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: 981 | """ 982 | Raises `ResponseError` if the request could not be fulfilled. 983 | 984 | Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. 985 | """ 986 | return await self._request( 987 | ProgressResponse, 988 | 'POST', 989 | '/api/push', 990 | json=PushRequest( 991 | model=model, 992 | insecure=insecure, 993 | stream=stream, 994 | ).model_dump(exclude_none=True), 995 | stream=stream, 996 | ) 997 | 998 | @overload 999 | async def create( 1000 | self, 1001 | model: str, 1002 | quantize: Optional[str] = None, 1003 | from_: Optional[str] = None, 1004 | files: Optional[Dict[str, str]] = None, 1005 | adapters: Optional[Dict[str, str]] = None, 1006 | template: Optional[str] = None, 1007 | license: Optional[Union[str, List[str]]] = None, 1008 | system: Optional[str] = None, 1009 | parameters: Optional[Union[Mapping[str, Any], Options]] = None, 1010 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 1011 | *, 1012 | stream: Literal[False] = False, 1013 | ) -> ProgressResponse: ... 1014 | 1015 | @overload 1016 | async def create( 1017 | self, 1018 | model: str, 1019 | quantize: Optional[str] = None, 1020 | from_: Optional[str] = None, 1021 | files: Optional[Dict[str, str]] = None, 1022 | adapters: Optional[Dict[str, str]] = None, 1023 | template: Optional[str] = None, 1024 | license: Optional[Union[str, List[str]]] = None, 1025 | system: Optional[str] = None, 1026 | parameters: Optional[Union[Mapping[str, Any], Options]] = None, 1027 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 1028 | *, 1029 | stream: Literal[True] = True, 1030 | ) -> AsyncIterator[ProgressResponse]: ... 1031 | 1032 | async def create( 1033 | self, 1034 | model: str, 1035 | quantize: Optional[str] = None, 1036 | from_: Optional[str] = None, 1037 | files: Optional[Dict[str, str]] = None, 1038 | adapters: Optional[Dict[str, str]] = None, 1039 | template: Optional[str] = None, 1040 | license: Optional[Union[str, List[str]]] = None, 1041 | system: Optional[str] = None, 1042 | parameters: Optional[Union[Mapping[str, Any], Options]] = None, 1043 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, 1044 | *, 1045 | stream: bool = False, 1046 | ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: 1047 | """ 1048 | Raises `ResponseError` if the request could not be fulfilled. 1049 | 1050 | Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. 1051 | """ 1052 | 1053 | return await self._request( 1054 | ProgressResponse, 1055 | 'POST', 1056 | '/api/create', 1057 | json=CreateRequest( 1058 | model=model, 1059 | stream=stream, 1060 | quantize=quantize, 1061 | from_=from_, 1062 | files=files, 1063 | adapters=adapters, 1064 | license=license, 1065 | template=template, 1066 | system=system, 1067 | parameters=parameters, 1068 | messages=messages, 1069 | ).model_dump(exclude_none=True), 1070 | stream=stream, 1071 | ) 1072 | 1073 | async def create_blob(self, path: Union[str, Path]) -> str: 1074 | sha256sum = sha256() 1075 | async with await anyio.open_file(path, 'rb') as r: 1076 | while True: 1077 | chunk = await r.read(32 * 1024) 1078 | if not chunk: 1079 | break 1080 | sha256sum.update(chunk) 1081 | 1082 | digest = f'sha256:{sha256sum.hexdigest()}' 1083 | 1084 | async def upload_bytes(): 1085 | async with await anyio.open_file(path, 'rb') as r: 1086 | while True: 1087 | chunk = await r.read(32 * 1024) 1088 | if not chunk: 1089 | break 1090 | yield chunk 1091 | 1092 | await self._request_raw('POST', f'/api/blobs/{digest}', content=upload_bytes()) 1093 | 1094 | return digest 1095 | 1096 | async def list(self) -> ListResponse: 1097 | return await self._request( 1098 | ListResponse, 1099 | 'GET', 1100 | '/api/tags', 1101 | ) 1102 | 1103 | async def delete(self, model: str) -> StatusResponse: 1104 | r = await self._request_raw( 1105 | 'DELETE', 1106 | '/api/delete', 1107 | json=DeleteRequest( 1108 | model=model, 1109 | ).model_dump(exclude_none=True), 1110 | ) 1111 | return StatusResponse( 1112 | status='success' if r.status_code == 200 else 'error', 1113 | ) 1114 | 1115 | async def copy(self, source: str, destination: str) -> StatusResponse: 1116 | r = await self._request_raw( 1117 | 'POST', 1118 | '/api/copy', 1119 | json=CopyRequest( 1120 | source=source, 1121 | destination=destination, 1122 | ).model_dump(exclude_none=True), 1123 | ) 1124 | return StatusResponse( 1125 | status='success' if r.status_code == 200 else 'error', 1126 | ) 1127 | 1128 | async def show(self, model: str) -> ShowResponse: 1129 | return await self._request( 1130 | ShowResponse, 1131 | 'POST', 1132 | '/api/show', 1133 | json=ShowRequest( 1134 | model=model, 1135 | ).model_dump(exclude_none=True), 1136 | ) 1137 | 1138 | async def ps(self) -> ProcessResponse: 1139 | return await self._request( 1140 | ProcessResponse, 1141 | 'GET', 1142 | '/api/ps', 1143 | ) 1144 | 1145 | 1146 | def _copy_images(images: Optional[Sequence[Union[Image, Any]]]) -> Iterator[Image]: 1147 | for image in images or []: 1148 | yield image if isinstance(image, Image) else Image(value=image) 1149 | 1150 | 1151 | def _copy_messages(messages: Optional[Sequence[Union[Mapping[str, Any], Message]]]) -> Iterator[Message]: 1152 | for message in messages or []: 1153 | yield Message.model_validate( 1154 | {k: list(_copy_images(v)) if k == 'images' else v for k, v in dict(message).items() if v}, 1155 | ) 1156 | 1157 | 1158 | def _copy_tools(tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None) -> Iterator[Tool]: 1159 | for unprocessed_tool in tools or []: 1160 | yield convert_function_to_tool(unprocessed_tool) if callable(unprocessed_tool) else Tool.model_validate(unprocessed_tool) 1161 | 1162 | 1163 | def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: 1164 | if isinstance(s, (str, Path)): 1165 | try: 1166 | if (p := Path(s)).exists(): 1167 | return p 1168 | except Exception: 1169 | ... 1170 | return None 1171 | 1172 | 1173 | def _parse_host(host: Optional[str]) -> str: 1174 | """ 1175 | >>> _parse_host(None) 1176 | 'http://127.0.0.1:11434' 1177 | >>> _parse_host('') 1178 | 'http://127.0.0.1:11434' 1179 | >>> _parse_host('1.2.3.4') 1180 | 'http://1.2.3.4:11434' 1181 | >>> _parse_host(':56789') 1182 | 'http://127.0.0.1:56789' 1183 | >>> _parse_host('1.2.3.4:56789') 1184 | 'http://1.2.3.4:56789' 1185 | >>> _parse_host('http://1.2.3.4') 1186 | 'http://1.2.3.4:80' 1187 | >>> _parse_host('https://1.2.3.4') 1188 | 'https://1.2.3.4:443' 1189 | >>> _parse_host('https://1.2.3.4:56789') 1190 | 'https://1.2.3.4:56789' 1191 | >>> _parse_host('example.com') 1192 | 'http://example.com:11434' 1193 | >>> _parse_host('example.com:56789') 1194 | 'http://example.com:56789' 1195 | >>> _parse_host('http://example.com') 1196 | 'http://example.com:80' 1197 | >>> _parse_host('https://example.com') 1198 | 'https://example.com:443' 1199 | >>> _parse_host('https://example.com:56789') 1200 | 'https://example.com:56789' 1201 | >>> _parse_host('example.com/') 1202 | 'http://example.com:11434' 1203 | >>> _parse_host('example.com:56789/') 1204 | 'http://example.com:56789' 1205 | >>> _parse_host('example.com/path') 1206 | 'http://example.com:11434/path' 1207 | >>> _parse_host('example.com:56789/path') 1208 | 'http://example.com:56789/path' 1209 | >>> _parse_host('https://example.com:56789/path') 1210 | 'https://example.com:56789/path' 1211 | >>> _parse_host('example.com:56789/path/') 1212 | 'http://example.com:56789/path' 1213 | >>> _parse_host('[0001:002:003:0004::1]') 1214 | 'http://[0001:002:003:0004::1]:11434' 1215 | >>> _parse_host('[0001:002:003:0004::1]:56789') 1216 | 'http://[0001:002:003:0004::1]:56789' 1217 | >>> _parse_host('http://[0001:002:003:0004::1]') 1218 | 'http://[0001:002:003:0004::1]:80' 1219 | >>> _parse_host('https://[0001:002:003:0004::1]') 1220 | 'https://[0001:002:003:0004::1]:443' 1221 | >>> _parse_host('https://[0001:002:003:0004::1]:56789') 1222 | 'https://[0001:002:003:0004::1]:56789' 1223 | >>> _parse_host('[0001:002:003:0004::1]/') 1224 | 'http://[0001:002:003:0004::1]:11434' 1225 | >>> _parse_host('[0001:002:003:0004::1]:56789/') 1226 | 'http://[0001:002:003:0004::1]:56789' 1227 | >>> _parse_host('[0001:002:003:0004::1]/path') 1228 | 'http://[0001:002:003:0004::1]:11434/path' 1229 | >>> _parse_host('[0001:002:003:0004::1]:56789/path') 1230 | 'http://[0001:002:003:0004::1]:56789/path' 1231 | >>> _parse_host('https://[0001:002:003:0004::1]:56789/path') 1232 | 'https://[0001:002:003:0004::1]:56789/path' 1233 | >>> _parse_host('[0001:002:003:0004::1]:56789/path/') 1234 | 'http://[0001:002:003:0004::1]:56789/path' 1235 | """ 1236 | 1237 | host, port = host or '', 11434 1238 | scheme, _, hostport = host.partition('://') 1239 | if not hostport: 1240 | scheme, hostport = 'http', host 1241 | elif scheme == 'http': 1242 | port = 80 1243 | elif scheme == 'https': 1244 | port = 443 1245 | 1246 | split = urllib.parse.urlsplit(f'{scheme}://{hostport}') 1247 | host = split.hostname or '127.0.0.1' 1248 | port = split.port or port 1249 | 1250 | try: 1251 | if isinstance(ipaddress.ip_address(host), ipaddress.IPv6Address): 1252 | # Fix missing square brackets for IPv6 from urlsplit 1253 | host = f'[{host}]' 1254 | except ValueError: 1255 | ... 1256 | 1257 | if path := split.path.strip('/'): 1258 | return f'{scheme}://{host}:{port}/{path}' 1259 | 1260 | return f'{scheme}://{host}:{port}' 1261 | -------------------------------------------------------------------------------- /ollama/_types.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import json 3 | from base64 import b64decode, b64encode 4 | from datetime import datetime 5 | from pathlib import Path 6 | from typing import Any, Dict, List, Mapping, Optional, Sequence, Union 7 | 8 | from pydantic import ( 9 | BaseModel, 10 | ByteSize, 11 | ConfigDict, 12 | Field, 13 | model_serializer, 14 | ) 15 | from pydantic.json_schema import JsonSchemaValue 16 | from typing_extensions import Annotated, Literal 17 | 18 | 19 | class SubscriptableBaseModel(BaseModel): 20 | def __getitem__(self, key: str) -> Any: 21 | """ 22 | >>> msg = Message(role='user') 23 | >>> msg['role'] 24 | 'user' 25 | >>> msg = Message(role='user') 26 | >>> msg['nonexistent'] 27 | Traceback (most recent call last): 28 | KeyError: 'nonexistent' 29 | """ 30 | if key in self: 31 | return getattr(self, key) 32 | 33 | raise KeyError(key) 34 | 35 | def __setitem__(self, key: str, value: Any) -> None: 36 | """ 37 | >>> msg = Message(role='user') 38 | >>> msg['role'] = 'assistant' 39 | >>> msg['role'] 40 | 'assistant' 41 | >>> tool_call = Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={})) 42 | >>> msg = Message(role='user', content='hello') 43 | >>> msg['tool_calls'] = [tool_call] 44 | >>> msg['tool_calls'][0]['function']['name'] 45 | 'foo' 46 | """ 47 | setattr(self, key, value) 48 | 49 | def __contains__(self, key: str) -> bool: 50 | """ 51 | >>> msg = Message(role='user') 52 | >>> 'nonexistent' in msg 53 | False 54 | >>> 'role' in msg 55 | True 56 | >>> 'content' in msg 57 | False 58 | >>> msg.content = 'hello!' 59 | >>> 'content' in msg 60 | True 61 | >>> msg = Message(role='user', content='hello!') 62 | >>> 'content' in msg 63 | True 64 | >>> 'tool_calls' in msg 65 | False 66 | >>> msg['tool_calls'] = [] 67 | >>> 'tool_calls' in msg 68 | True 69 | >>> msg['tool_calls'] = [Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={}))] 70 | >>> 'tool_calls' in msg 71 | True 72 | >>> msg['tool_calls'] = None 73 | >>> 'tool_calls' in msg 74 | True 75 | >>> tool = Tool() 76 | >>> 'type' in tool 77 | True 78 | """ 79 | if key in self.model_fields_set: 80 | return True 81 | 82 | if value := self.model_fields.get(key): 83 | return value.default is not None 84 | 85 | return False 86 | 87 | def get(self, key: str, default: Any = None) -> Any: 88 | """ 89 | >>> msg = Message(role='user') 90 | >>> msg.get('role') 91 | 'user' 92 | >>> msg = Message(role='user') 93 | >>> msg.get('nonexistent') 94 | >>> msg = Message(role='user') 95 | >>> msg.get('nonexistent', 'default') 96 | 'default' 97 | >>> msg = Message(role='user', tool_calls=[ Message.ToolCall(function=Message.ToolCall.Function(name='foo', arguments={}))]) 98 | >>> msg.get('tool_calls')[0]['function']['name'] 99 | 'foo' 100 | """ 101 | return getattr(self, key) if hasattr(self, key) else default 102 | 103 | 104 | class Options(SubscriptableBaseModel): 105 | # load time options 106 | numa: Optional[bool] = None 107 | num_ctx: Optional[int] = None 108 | num_batch: Optional[int] = None 109 | num_gpu: Optional[int] = None 110 | main_gpu: Optional[int] = None 111 | low_vram: Optional[bool] = None 112 | f16_kv: Optional[bool] = None 113 | logits_all: Optional[bool] = None 114 | vocab_only: Optional[bool] = None 115 | use_mmap: Optional[bool] = None 116 | use_mlock: Optional[bool] = None 117 | embedding_only: Optional[bool] = None 118 | num_thread: Optional[int] = None 119 | 120 | # runtime options 121 | num_keep: Optional[int] = None 122 | seed: Optional[int] = None 123 | num_predict: Optional[int] = None 124 | top_k: Optional[int] = None 125 | top_p: Optional[float] = None 126 | tfs_z: Optional[float] = None 127 | typical_p: Optional[float] = None 128 | repeat_last_n: Optional[int] = None 129 | temperature: Optional[float] = None 130 | repeat_penalty: Optional[float] = None 131 | presence_penalty: Optional[float] = None 132 | frequency_penalty: Optional[float] = None 133 | mirostat: Optional[int] = None 134 | mirostat_tau: Optional[float] = None 135 | mirostat_eta: Optional[float] = None 136 | penalize_newline: Optional[bool] = None 137 | stop: Optional[Sequence[str]] = None 138 | 139 | 140 | class BaseRequest(SubscriptableBaseModel): 141 | model: Annotated[str, Field(min_length=1)] 142 | 'Model to use for the request.' 143 | 144 | 145 | class BaseStreamableRequest(BaseRequest): 146 | stream: Optional[bool] = None 147 | 'Stream response.' 148 | 149 | 150 | class BaseGenerateRequest(BaseStreamableRequest): 151 | options: Optional[Union[Mapping[str, Any], Options]] = None 152 | 'Options to use for the request.' 153 | 154 | format: Optional[Union[Literal['', 'json'], JsonSchemaValue]] = None 155 | 'Format of the response.' 156 | 157 | keep_alive: Optional[Union[float, str]] = None 158 | 'Keep model alive for the specified duration.' 159 | 160 | 161 | class Image(BaseModel): 162 | value: Union[str, bytes, Path] 163 | 164 | @model_serializer 165 | def serialize_model(self): 166 | if isinstance(self.value, (Path, bytes)): 167 | return b64encode(self.value.read_bytes() if isinstance(self.value, Path) else self.value).decode() 168 | 169 | if isinstance(self.value, str): 170 | try: 171 | if Path(self.value).exists(): 172 | return b64encode(Path(self.value).read_bytes()).decode() 173 | except Exception: 174 | # Long base64 string can't be wrapped in Path, so try to treat as base64 string 175 | pass 176 | 177 | # String might be a file path, but might not exist 178 | if self.value.split('.')[-1] in ('png', 'jpg', 'jpeg', 'webp'): 179 | raise ValueError(f'File {self.value} does not exist') 180 | 181 | try: 182 | # Try to decode to check if it's already base64 183 | b64decode(self.value) 184 | return self.value 185 | except Exception: 186 | raise ValueError('Invalid image data, expected base64 string or path to image file') from Exception 187 | 188 | 189 | class GenerateRequest(BaseGenerateRequest): 190 | prompt: Optional[str] = None 191 | 'Prompt to generate response from.' 192 | 193 | suffix: Optional[str] = None 194 | 'Suffix to append to the response.' 195 | 196 | system: Optional[str] = None 197 | 'System prompt to prepend to the prompt.' 198 | 199 | template: Optional[str] = None 200 | 'Template to use for the response.' 201 | 202 | context: Optional[Sequence[int]] = None 203 | 'Tokenized history to use for the response.' 204 | 205 | raw: Optional[bool] = None 206 | 207 | images: Optional[Sequence[Image]] = None 208 | 'Image data for multimodal models.' 209 | 210 | think: Optional[bool] = None 211 | 'Enable thinking mode (for thinking models).' 212 | 213 | 214 | class BaseGenerateResponse(SubscriptableBaseModel): 215 | model: Optional[str] = None 216 | 'Model used to generate response.' 217 | 218 | created_at: Optional[str] = None 219 | 'Time when the request was created.' 220 | 221 | done: Optional[bool] = None 222 | 'True if response is complete, otherwise False. Useful for streaming to detect the final response.' 223 | 224 | done_reason: Optional[str] = None 225 | 'Reason for completion. Only present when done is True.' 226 | 227 | total_duration: Optional[int] = None 228 | 'Total duration in nanoseconds.' 229 | 230 | load_duration: Optional[int] = None 231 | 'Load duration in nanoseconds.' 232 | 233 | prompt_eval_count: Optional[int] = None 234 | 'Number of tokens evaluated in the prompt.' 235 | 236 | prompt_eval_duration: Optional[int] = None 237 | 'Duration of evaluating the prompt in nanoseconds.' 238 | 239 | eval_count: Optional[int] = None 240 | 'Number of tokens evaluated in inference.' 241 | 242 | eval_duration: Optional[int] = None 243 | 'Duration of evaluating inference in nanoseconds.' 244 | 245 | 246 | class GenerateResponse(BaseGenerateResponse): 247 | """ 248 | Response returned by generate requests. 249 | """ 250 | 251 | response: str 252 | 'Response content. When streaming, this contains a fragment of the response.' 253 | 254 | thinking: Optional[str] = None 255 | 'Thinking content. Only present when thinking is enabled.' 256 | 257 | context: Optional[Sequence[int]] = None 258 | 'Tokenized history up to the point of the response.' 259 | 260 | 261 | class Message(SubscriptableBaseModel): 262 | """ 263 | Chat message. 264 | """ 265 | 266 | role: str 267 | "Assumed role of the message. Response messages has role 'assistant' or 'tool'." 268 | 269 | content: Optional[str] = None 270 | 'Content of the message. Response messages contains message fragments when streaming.' 271 | 272 | thinking: Optional[str] = None 273 | 'Thinking content. Only present when thinking is enabled.' 274 | 275 | images: Optional[Sequence[Image]] = None 276 | """ 277 | Optional list of image data for multimodal models. 278 | 279 | Valid input types are: 280 | 281 | - `str` or path-like object: path to image file 282 | - `bytes` or bytes-like object: raw image data 283 | 284 | Valid image formats depend on the model. See the model card for more information. 285 | """ 286 | 287 | class ToolCall(SubscriptableBaseModel): 288 | """ 289 | Model tool calls. 290 | """ 291 | 292 | class Function(SubscriptableBaseModel): 293 | """ 294 | Tool call function. 295 | """ 296 | 297 | name: str 298 | 'Name of the function.' 299 | 300 | arguments: Mapping[str, Any] 301 | 'Arguments of the function.' 302 | 303 | function: Function 304 | 'Function to be called.' 305 | 306 | tool_calls: Optional[Sequence[ToolCall]] = None 307 | """ 308 | Tools calls to be made by the model. 309 | """ 310 | 311 | 312 | class Tool(SubscriptableBaseModel): 313 | type: Optional[Literal['function']] = 'function' 314 | 315 | class Function(SubscriptableBaseModel): 316 | name: Optional[str] = None 317 | description: Optional[str] = None 318 | 319 | class Parameters(SubscriptableBaseModel): 320 | model_config = ConfigDict(populate_by_name=True) 321 | type: Optional[Literal['object']] = 'object' 322 | defs: Optional[Any] = Field(None, alias='$defs') 323 | items: Optional[Any] = None 324 | required: Optional[Sequence[str]] = None 325 | 326 | class Property(SubscriptableBaseModel): 327 | model_config = ConfigDict(arbitrary_types_allowed=True) 328 | 329 | type: Optional[Union[str, Sequence[str]]] = None 330 | items: Optional[Any] = None 331 | description: Optional[str] = None 332 | enum: Optional[Sequence[Any]] = None 333 | 334 | properties: Optional[Mapping[str, Property]] = None 335 | 336 | parameters: Optional[Parameters] = None 337 | 338 | function: Optional[Function] = None 339 | 340 | 341 | class ChatRequest(BaseGenerateRequest): 342 | @model_serializer(mode='wrap') 343 | def serialize_model(self, nxt): 344 | output = nxt(self) 345 | if output.get('tools'): 346 | for tool in output['tools']: 347 | if 'function' in tool and 'parameters' in tool['function'] and 'defs' in tool['function']['parameters']: 348 | tool['function']['parameters']['$defs'] = tool['function']['parameters'].pop('defs') 349 | return output 350 | 351 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None 352 | 'Messages to chat with.' 353 | 354 | tools: Optional[Sequence[Tool]] = None 355 | 'Tools to use for the chat.' 356 | 357 | think: Optional[bool] = None 358 | 'Enable thinking mode (for thinking models).' 359 | 360 | 361 | class ChatResponse(BaseGenerateResponse): 362 | """ 363 | Response returned by chat requests. 364 | """ 365 | 366 | message: Message 367 | 'Response message.' 368 | 369 | 370 | class EmbedRequest(BaseRequest): 371 | input: Union[str, Sequence[str]] 372 | 'Input text to embed.' 373 | 374 | truncate: Optional[bool] = None 375 | 'Truncate the input to the maximum token length.' 376 | 377 | options: Optional[Union[Mapping[str, Any], Options]] = None 378 | 'Options to use for the request.' 379 | 380 | keep_alive: Optional[Union[float, str]] = None 381 | 382 | 383 | class EmbedResponse(BaseGenerateResponse): 384 | """ 385 | Response returned by embed requests. 386 | """ 387 | 388 | embeddings: Sequence[Sequence[float]] 389 | 'Embeddings of the inputs.' 390 | 391 | 392 | class EmbeddingsRequest(BaseRequest): 393 | prompt: Optional[str] = None 394 | 'Prompt to generate embeddings from.' 395 | 396 | options: Optional[Union[Mapping[str, Any], Options]] = None 397 | 'Options to use for the request.' 398 | 399 | keep_alive: Optional[Union[float, str]] = None 400 | 401 | 402 | class EmbeddingsResponse(SubscriptableBaseModel): 403 | """ 404 | Response returned by embeddings requests. 405 | """ 406 | 407 | embedding: Sequence[float] 408 | 'Embedding of the prompt.' 409 | 410 | 411 | class PullRequest(BaseStreamableRequest): 412 | """ 413 | Request to pull the model. 414 | """ 415 | 416 | insecure: Optional[bool] = None 417 | 'Allow insecure (HTTP) connections.' 418 | 419 | 420 | class PushRequest(BaseStreamableRequest): 421 | """ 422 | Request to pull the model. 423 | """ 424 | 425 | insecure: Optional[bool] = None 426 | 'Allow insecure (HTTP) connections.' 427 | 428 | 429 | class CreateRequest(BaseStreamableRequest): 430 | @model_serializer(mode='wrap') 431 | def serialize_model(self, nxt): 432 | output = nxt(self) 433 | if 'from_' in output: 434 | output['from'] = output.pop('from_') 435 | return output 436 | 437 | """ 438 | Request to create a new model. 439 | """ 440 | quantize: Optional[str] = None 441 | from_: Optional[str] = None 442 | files: Optional[Dict[str, str]] = None 443 | adapters: Optional[Dict[str, str]] = None 444 | template: Optional[str] = None 445 | license: Optional[Union[str, List[str]]] = None 446 | system: Optional[str] = None 447 | parameters: Optional[Union[Mapping[str, Any], Options]] = None 448 | messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None 449 | 450 | 451 | class ModelDetails(SubscriptableBaseModel): 452 | parent_model: Optional[str] = None 453 | format: Optional[str] = None 454 | family: Optional[str] = None 455 | families: Optional[Sequence[str]] = None 456 | parameter_size: Optional[str] = None 457 | quantization_level: Optional[str] = None 458 | 459 | 460 | class ListResponse(SubscriptableBaseModel): 461 | class Model(SubscriptableBaseModel): 462 | model: Optional[str] = None 463 | modified_at: Optional[datetime] = None 464 | digest: Optional[str] = None 465 | size: Optional[ByteSize] = None 466 | details: Optional[ModelDetails] = None 467 | 468 | models: Sequence[Model] 469 | 'List of models.' 470 | 471 | 472 | class DeleteRequest(BaseRequest): 473 | """ 474 | Request to delete a model. 475 | """ 476 | 477 | 478 | class CopyRequest(BaseModel): 479 | """ 480 | Request to copy a model. 481 | """ 482 | 483 | source: str 484 | 'Source model to copy.' 485 | 486 | destination: str 487 | 'Destination model to copy to.' 488 | 489 | 490 | class StatusResponse(SubscriptableBaseModel): 491 | status: Optional[str] = None 492 | 493 | 494 | class ProgressResponse(StatusResponse): 495 | completed: Optional[int] = None 496 | total: Optional[int] = None 497 | digest: Optional[str] = None 498 | 499 | 500 | class ShowRequest(BaseRequest): 501 | """ 502 | Request to show model information. 503 | """ 504 | 505 | 506 | class ShowResponse(SubscriptableBaseModel): 507 | modified_at: Optional[datetime] = None 508 | 509 | template: Optional[str] = None 510 | 511 | modelfile: Optional[str] = None 512 | 513 | license: Optional[str] = None 514 | 515 | details: Optional[ModelDetails] = None 516 | 517 | modelinfo: Optional[Mapping[str, Any]] = Field(alias='model_info') 518 | 519 | parameters: Optional[str] = None 520 | 521 | capabilities: Optional[List[str]] = None 522 | 523 | 524 | class ProcessResponse(SubscriptableBaseModel): 525 | class Model(SubscriptableBaseModel): 526 | model: Optional[str] = None 527 | name: Optional[str] = None 528 | digest: Optional[str] = None 529 | expires_at: Optional[datetime] = None 530 | size: Optional[ByteSize] = None 531 | size_vram: Optional[ByteSize] = None 532 | details: Optional[ModelDetails] = None 533 | 534 | models: Sequence[Model] 535 | 536 | 537 | class RequestError(Exception): 538 | """ 539 | Common class for request errors. 540 | """ 541 | 542 | def __init__(self, error: str): 543 | super().__init__(error) 544 | self.error = error 545 | 'Reason for the error.' 546 | 547 | 548 | class ResponseError(Exception): 549 | """ 550 | Common class for response errors. 551 | """ 552 | 553 | def __init__(self, error: str, status_code: int = -1): 554 | # try to parse content as JSON and extract 'error' 555 | # fallback to raw content if JSON parsing fails 556 | with contextlib.suppress(json.JSONDecodeError): 557 | error = json.loads(error).get('error', error) 558 | 559 | super().__init__(error) 560 | self.error = error 561 | 'Reason for the error.' 562 | 563 | self.status_code = status_code 564 | 'HTTP status code of the response.' 565 | 566 | def __str__(self) -> str: 567 | return f'{self.error} (status code: {self.status_code})' 568 | -------------------------------------------------------------------------------- /ollama/_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import inspect 4 | import re 5 | from collections import defaultdict 6 | from typing import Callable, Union 7 | 8 | import pydantic 9 | 10 | from ollama._types import Tool 11 | 12 | 13 | def _parse_docstring(doc_string: Union[str, None]) -> dict[str, str]: 14 | parsed_docstring = defaultdict(str) 15 | if not doc_string: 16 | return parsed_docstring 17 | 18 | key = str(hash(doc_string)) 19 | for line in doc_string.splitlines(): 20 | lowered_line = line.lower().strip() 21 | if lowered_line.startswith('args:'): 22 | key = 'args' 23 | elif lowered_line.startswith(('returns:', 'yields:', 'raises:')): 24 | key = '_' 25 | 26 | else: 27 | # maybe change to a list and join later 28 | parsed_docstring[key] += f'{line.strip()}\n' 29 | 30 | last_key = None 31 | for line in parsed_docstring['args'].splitlines(): 32 | line = line.strip() 33 | if ':' in line: 34 | # Split the line on either: 35 | # 1. A parenthetical expression like (integer) - captured in group 1 36 | # 2. A colon : 37 | # Followed by optional whitespace. Only split on first occurrence. 38 | parts = re.split(r'(?:\(([^)]*)\)|:)\s*', line, maxsplit=1) 39 | 40 | arg_name = parts[0].strip() 41 | last_key = arg_name 42 | 43 | # Get the description - will be in parts[1] if parenthetical or parts[-1] if after colon 44 | arg_description = parts[-1].strip() 45 | if len(parts) > 2 and parts[1]: # Has parenthetical content 46 | arg_description = parts[-1].split(':', 1)[-1].strip() 47 | 48 | parsed_docstring[last_key] = arg_description 49 | 50 | elif last_key and line: 51 | parsed_docstring[last_key] += ' ' + line 52 | 53 | return parsed_docstring 54 | 55 | 56 | def convert_function_to_tool(func: Callable) -> Tool: 57 | doc_string_hash = str(hash(inspect.getdoc(func))) 58 | parsed_docstring = _parse_docstring(inspect.getdoc(func)) 59 | schema = type( 60 | func.__name__, 61 | (pydantic.BaseModel,), 62 | { 63 | '__annotations__': {k: v.annotation if v.annotation != inspect._empty else str for k, v in inspect.signature(func).parameters.items()}, 64 | '__signature__': inspect.signature(func), 65 | '__doc__': parsed_docstring[doc_string_hash], 66 | }, 67 | ).model_json_schema() 68 | 69 | for k, v in schema.get('properties', {}).items(): 70 | # If type is missing, the default is string 71 | types = {t.get('type', 'string') for t in v.get('anyOf')} if 'anyOf' in v else {v.get('type', 'string')} 72 | if 'null' in types: 73 | schema['required'].remove(k) 74 | types.discard('null') 75 | 76 | schema['properties'][k] = { 77 | 'description': parsed_docstring[k], 78 | 'type': ', '.join(types), 79 | } 80 | 81 | tool = Tool( 82 | function=Tool.Function( 83 | name=func.__name__, 84 | description=schema.get('description', ''), 85 | parameters=Tool.Function.Parameters(**schema), 86 | ) 87 | ) 88 | 89 | return Tool.model_validate(tool) 90 | -------------------------------------------------------------------------------- /ollama/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ollama/ollama-python/63ca74762284100b2f0ad207bc00fa3d32720fbd/ollama/py.typed -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = 'ollama' 3 | description = 'The official Python client for Ollama.' 4 | authors = [ 5 | { email = 'hello@ollama.com' }, 6 | ] 7 | readme = 'README.md' 8 | requires-python = '>=3.8' 9 | dependencies = [ 10 | 'httpx>=0.27', 11 | 'pydantic>=2.9', 12 | ] 13 | dynamic = [ 'version' ] 14 | 15 | [project.urls] 16 | homepage = 'https://ollama.com' 17 | repository = 'https://github.com/ollama/ollama-python' 18 | issues = 'https://github.com/ollama/ollama-python/issues' 19 | 20 | [build-system] 21 | requires = [ 'hatchling', 'hatch-vcs' ] 22 | build-backend = 'hatchling.build' 23 | 24 | [tool.hatch.version] 25 | source = 'vcs' 26 | 27 | [tool.hatch.envs.hatch-test] 28 | default-args = ['ollama', 'tests'] 29 | extra-dependencies = [ 30 | 'pytest-anyio', 31 | 'pytest-httpserver', 32 | ] 33 | 34 | [tool.hatch.envs.hatch-static-analysis] 35 | dependencies = [ 'ruff>=0.9.1' ] 36 | config-path = 'none' 37 | 38 | [tool.ruff] 39 | line-length = 999 40 | indent-width = 2 41 | 42 | [tool.ruff.format] 43 | quote-style = 'single' 44 | indent-style = 'space' 45 | docstring-code-format = false 46 | 47 | [tool.ruff.lint] 48 | select = [ 49 | 'F', # pyflakes 50 | 'E', # pycodestyle errors 51 | 'W', # pycodestyle warnings 52 | 'I', # sort imports 53 | 'N', # pep8-naming 54 | 'ASYNC', # flake8-async 55 | 'FBT', # flake8-boolean-trap 56 | 'B', # flake8-bugbear 57 | 'C4', # flake8-comprehensions 58 | 'PIE', # flake8-pie 59 | 'SIM', # flake8-simplify 60 | 'FLY', # flynt 61 | 'RUF', # ruff-specific rules 62 | ] 63 | 64 | [tool.pytest.ini_options] 65 | addopts = ['--doctest-modules'] 66 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv export 3 | -e . 4 | annotated-types==0.7.0 \ 5 | --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ 6 | --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 7 | # via pydantic 8 | anyio==4.5.2 ; python_full_version < '3.9' \ 9 | --hash=sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b \ 10 | --hash=sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f 11 | # via httpx 12 | anyio==4.8.0 ; python_full_version >= '3.9' \ 13 | --hash=sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a \ 14 | --hash=sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a 15 | # via httpx 16 | certifi==2025.1.31 \ 17 | --hash=sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651 \ 18 | --hash=sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe 19 | # via 20 | # httpcore 21 | # httpx 22 | exceptiongroup==1.2.2 ; python_full_version < '3.11' \ 23 | --hash=sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b \ 24 | --hash=sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc 25 | # via anyio 26 | h11==0.14.0 \ 27 | --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ 28 | --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761 29 | # via httpcore 30 | httpcore==1.0.7 \ 31 | --hash=sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c \ 32 | --hash=sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd 33 | # via httpx 34 | httpx==0.28.1 \ 35 | --hash=sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc \ 36 | --hash=sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad 37 | # via ollama 38 | idna==3.10 \ 39 | --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ 40 | --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 41 | # via 42 | # anyio 43 | # httpx 44 | pydantic==2.10.6 \ 45 | --hash=sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584 \ 46 | --hash=sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236 47 | # via ollama 48 | pydantic-core==2.27.2 \ 49 | --hash=sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278 \ 50 | --hash=sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50 \ 51 | --hash=sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9 \ 52 | --hash=sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f \ 53 | --hash=sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6 \ 54 | --hash=sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc \ 55 | --hash=sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54 \ 56 | --hash=sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630 \ 57 | --hash=sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9 \ 58 | --hash=sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236 \ 59 | --hash=sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7 \ 60 | --hash=sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee \ 61 | --hash=sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b \ 62 | --hash=sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048 \ 63 | --hash=sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc \ 64 | --hash=sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130 \ 65 | --hash=sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4 \ 66 | --hash=sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd \ 67 | --hash=sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4 \ 68 | --hash=sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7 \ 69 | --hash=sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7 \ 70 | --hash=sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4 \ 71 | --hash=sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e \ 72 | --hash=sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa \ 73 | --hash=sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6 \ 74 | --hash=sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962 \ 75 | --hash=sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b \ 76 | --hash=sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f \ 77 | --hash=sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474 \ 78 | --hash=sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5 \ 79 | --hash=sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459 \ 80 | --hash=sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf \ 81 | --hash=sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a \ 82 | --hash=sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c \ 83 | --hash=sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76 \ 84 | --hash=sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362 \ 85 | --hash=sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4 \ 86 | --hash=sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934 \ 87 | --hash=sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320 \ 88 | --hash=sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118 \ 89 | --hash=sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96 \ 90 | --hash=sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306 \ 91 | --hash=sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046 \ 92 | --hash=sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3 \ 93 | --hash=sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2 \ 94 | --hash=sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af \ 95 | --hash=sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9 \ 96 | --hash=sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67 \ 97 | --hash=sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a \ 98 | --hash=sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27 \ 99 | --hash=sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35 \ 100 | --hash=sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b \ 101 | --hash=sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151 \ 102 | --hash=sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b \ 103 | --hash=sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154 \ 104 | --hash=sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133 \ 105 | --hash=sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef \ 106 | --hash=sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145 \ 107 | --hash=sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15 \ 108 | --hash=sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4 \ 109 | --hash=sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc \ 110 | --hash=sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee \ 111 | --hash=sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c \ 112 | --hash=sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0 \ 113 | --hash=sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5 \ 114 | --hash=sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57 \ 115 | --hash=sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b \ 116 | --hash=sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8 \ 117 | --hash=sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1 \ 118 | --hash=sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da \ 119 | --hash=sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e \ 120 | --hash=sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc \ 121 | --hash=sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993 \ 122 | --hash=sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656 \ 123 | --hash=sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4 \ 124 | --hash=sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c \ 125 | --hash=sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb \ 126 | --hash=sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d \ 127 | --hash=sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9 \ 128 | --hash=sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e \ 129 | --hash=sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1 \ 130 | --hash=sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc \ 131 | --hash=sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a \ 132 | --hash=sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9 \ 133 | --hash=sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506 \ 134 | --hash=sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b \ 135 | --hash=sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1 \ 136 | --hash=sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d \ 137 | --hash=sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99 \ 138 | --hash=sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3 \ 139 | --hash=sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31 \ 140 | --hash=sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c \ 141 | --hash=sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39 \ 142 | --hash=sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a \ 143 | --hash=sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308 \ 144 | --hash=sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2 \ 145 | --hash=sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228 \ 146 | --hash=sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b \ 147 | --hash=sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9 \ 148 | --hash=sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad 149 | # via pydantic 150 | sniffio==1.3.1 \ 151 | --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ 152 | --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc 153 | # via anyio 154 | typing-extensions==4.12.2 \ 155 | --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ 156 | --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 157 | # via 158 | # annotated-types 159 | # anyio 160 | # pydantic 161 | # pydantic-core 162 | -------------------------------------------------------------------------------- /tests/test_client.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import os 4 | import re 5 | import tempfile 6 | from pathlib import Path 7 | from typing import Any 8 | 9 | import pytest 10 | from httpx import Response as httpxResponse 11 | from pydantic import BaseModel, ValidationError 12 | from pytest_httpserver import HTTPServer, URIPattern 13 | from werkzeug.wrappers import Request, Response 14 | 15 | from ollama._client import CONNECTION_ERROR_MESSAGE, AsyncClient, Client, _copy_tools 16 | from ollama._types import Image, Message 17 | 18 | PNG_BASE64 = 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGNgYGAAAAAEAAH2FzhVAAAAAElFTkSuQmCC' 19 | PNG_BYTES = base64.b64decode(PNG_BASE64) 20 | 21 | pytestmark = pytest.mark.anyio 22 | 23 | 24 | @pytest.fixture 25 | def anyio_backend(): 26 | return 'asyncio' 27 | 28 | 29 | class PrefixPattern(URIPattern): 30 | def __init__(self, prefix: str): 31 | self.prefix = prefix 32 | 33 | def match(self, uri): 34 | return uri.startswith(self.prefix) 35 | 36 | 37 | def test_client_chat(httpserver: HTTPServer): 38 | httpserver.expect_ordered_request( 39 | '/api/chat', 40 | method='POST', 41 | json={ 42 | 'model': 'dummy', 43 | 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 44 | 'tools': [], 45 | 'stream': False, 46 | }, 47 | ).respond_with_json( 48 | { 49 | 'model': 'dummy', 50 | 'message': { 51 | 'role': 'assistant', 52 | 'content': "I don't know.", 53 | }, 54 | } 55 | ) 56 | 57 | client = Client(httpserver.url_for('/')) 58 | response = client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) 59 | assert response['model'] == 'dummy' 60 | assert response['message']['role'] == 'assistant' 61 | assert response['message']['content'] == "I don't know." 62 | 63 | 64 | def test_client_chat_stream(httpserver: HTTPServer): 65 | def stream_handler(_: Request): 66 | def generate(): 67 | for message in ['I ', "don't ", 'know.']: 68 | yield ( 69 | json.dumps( 70 | { 71 | 'model': 'dummy', 72 | 'message': { 73 | 'role': 'assistant', 74 | 'content': message, 75 | }, 76 | } 77 | ) 78 | + '\n' 79 | ) 80 | 81 | return Response(generate()) 82 | 83 | httpserver.expect_ordered_request( 84 | '/api/chat', 85 | method='POST', 86 | json={ 87 | 'model': 'dummy', 88 | 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 89 | 'tools': [], 90 | 'stream': True, 91 | }, 92 | ).respond_with_handler(stream_handler) 93 | 94 | client = Client(httpserver.url_for('/')) 95 | response = client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], stream=True) 96 | 97 | it = iter(['I ', "don't ", 'know.']) 98 | for part in response: 99 | assert part['message']['role'] in 'assistant' 100 | assert part['message']['content'] == next(it) 101 | 102 | 103 | @pytest.mark.parametrize('message_format', ('dict', 'pydantic_model')) 104 | @pytest.mark.parametrize('file_style', ('path', 'bytes')) 105 | def test_client_chat_images(httpserver: HTTPServer, message_format: str, file_style: str, tmp_path): 106 | from ollama._types import Image, Message 107 | 108 | httpserver.expect_ordered_request( 109 | '/api/chat', 110 | method='POST', 111 | json={ 112 | 'model': 'dummy', 113 | 'messages': [ 114 | { 115 | 'role': 'user', 116 | 'content': 'Why is the sky blue?', 117 | 'images': [PNG_BASE64], 118 | }, 119 | ], 120 | 'tools': [], 121 | 'stream': False, 122 | }, 123 | ).respond_with_json( 124 | { 125 | 'model': 'dummy', 126 | 'message': { 127 | 'role': 'assistant', 128 | 'content': "I don't know.", 129 | }, 130 | } 131 | ) 132 | 133 | client = Client(httpserver.url_for('/')) 134 | 135 | if file_style == 'bytes': 136 | image_content = PNG_BYTES 137 | elif file_style == 'path': 138 | image_path = tmp_path / 'transparent.png' 139 | image_path.write_bytes(PNG_BYTES) 140 | image_content = str(image_path) 141 | 142 | if message_format == 'pydantic_model': 143 | messages = [Message(role='user', content='Why is the sky blue?', images=[Image(value=image_content)])] 144 | elif message_format == 'dict': 145 | messages = [{'role': 'user', 'content': 'Why is the sky blue?', 'images': [image_content]}] 146 | else: 147 | raise ValueError(f'Invalid message format: {message_format}') 148 | 149 | response = client.chat('dummy', messages=messages) 150 | assert response['model'] == 'dummy' 151 | assert response['message']['role'] == 'assistant' 152 | assert response['message']['content'] == "I don't know." 153 | 154 | 155 | def test_client_chat_format_json(httpserver: HTTPServer): 156 | httpserver.expect_ordered_request( 157 | '/api/chat', 158 | method='POST', 159 | json={ 160 | 'model': 'dummy', 161 | 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 162 | 'tools': [], 163 | 'format': 'json', 164 | 'stream': False, 165 | }, 166 | ).respond_with_json( 167 | { 168 | 'model': 'dummy', 169 | 'message': { 170 | 'role': 'assistant', 171 | 'content': '{"answer": "Because of Rayleigh scattering"}', 172 | }, 173 | } 174 | ) 175 | 176 | client = Client(httpserver.url_for('/')) 177 | response = client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], format='json') 178 | assert response['model'] == 'dummy' 179 | assert response['message']['role'] == 'assistant' 180 | assert response['message']['content'] == '{"answer": "Because of Rayleigh scattering"}' 181 | 182 | 183 | def test_client_chat_format_pydantic(httpserver: HTTPServer): 184 | class ResponseFormat(BaseModel): 185 | answer: str 186 | confidence: float 187 | 188 | httpserver.expect_ordered_request( 189 | '/api/chat', 190 | method='POST', 191 | json={ 192 | 'model': 'dummy', 193 | 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 194 | 'tools': [], 195 | 'format': {'title': 'ResponseFormat', 'type': 'object', 'properties': {'answer': {'title': 'Answer', 'type': 'string'}, 'confidence': {'title': 'Confidence', 'type': 'number'}}, 'required': ['answer', 'confidence']}, 196 | 'stream': False, 197 | }, 198 | ).respond_with_json( 199 | { 200 | 'model': 'dummy', 201 | 'message': { 202 | 'role': 'assistant', 203 | 'content': '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}', 204 | }, 205 | } 206 | ) 207 | 208 | client = Client(httpserver.url_for('/')) 209 | response = client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], format=ResponseFormat.model_json_schema()) 210 | assert response['model'] == 'dummy' 211 | assert response['message']['role'] == 'assistant' 212 | assert response['message']['content'] == '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}' 213 | 214 | 215 | async def test_async_client_chat_format_json(httpserver: HTTPServer): 216 | httpserver.expect_ordered_request( 217 | '/api/chat', 218 | method='POST', 219 | json={ 220 | 'model': 'dummy', 221 | 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 222 | 'tools': [], 223 | 'format': 'json', 224 | 'stream': False, 225 | }, 226 | ).respond_with_json( 227 | { 228 | 'model': 'dummy', 229 | 'message': { 230 | 'role': 'assistant', 231 | 'content': '{"answer": "Because of Rayleigh scattering"}', 232 | }, 233 | } 234 | ) 235 | 236 | client = AsyncClient(httpserver.url_for('/')) 237 | response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], format='json') 238 | assert response['model'] == 'dummy' 239 | assert response['message']['role'] == 'assistant' 240 | assert response['message']['content'] == '{"answer": "Because of Rayleigh scattering"}' 241 | 242 | 243 | async def test_async_client_chat_format_pydantic(httpserver: HTTPServer): 244 | class ResponseFormat(BaseModel): 245 | answer: str 246 | confidence: float 247 | 248 | httpserver.expect_ordered_request( 249 | '/api/chat', 250 | method='POST', 251 | json={ 252 | 'model': 'dummy', 253 | 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 254 | 'tools': [], 255 | 'format': {'title': 'ResponseFormat', 'type': 'object', 'properties': {'answer': {'title': 'Answer', 'type': 'string'}, 'confidence': {'title': 'Confidence', 'type': 'number'}}, 'required': ['answer', 'confidence']}, 256 | 'stream': False, 257 | }, 258 | ).respond_with_json( 259 | { 260 | 'model': 'dummy', 261 | 'message': { 262 | 'role': 'assistant', 263 | 'content': '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}', 264 | }, 265 | } 266 | ) 267 | 268 | client = AsyncClient(httpserver.url_for('/')) 269 | response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], format=ResponseFormat.model_json_schema()) 270 | assert response['model'] == 'dummy' 271 | assert response['message']['role'] == 'assistant' 272 | assert response['message']['content'] == '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}' 273 | 274 | 275 | def test_client_generate(httpserver: HTTPServer): 276 | httpserver.expect_ordered_request( 277 | '/api/generate', 278 | method='POST', 279 | json={ 280 | 'model': 'dummy', 281 | 'prompt': 'Why is the sky blue?', 282 | 'stream': False, 283 | }, 284 | ).respond_with_json( 285 | { 286 | 'model': 'dummy', 287 | 'response': 'Because it is.', 288 | } 289 | ) 290 | 291 | client = Client(httpserver.url_for('/')) 292 | response = client.generate('dummy', 'Why is the sky blue?') 293 | assert response['model'] == 'dummy' 294 | assert response['response'] == 'Because it is.' 295 | 296 | 297 | def test_client_generate_with_image_type(httpserver: HTTPServer): 298 | httpserver.expect_ordered_request( 299 | '/api/generate', 300 | method='POST', 301 | json={ 302 | 'model': 'dummy', 303 | 'prompt': 'What is in this image?', 304 | 'stream': False, 305 | 'images': [PNG_BASE64], 306 | }, 307 | ).respond_with_json( 308 | { 309 | 'model': 'dummy', 310 | 'response': 'A blue sky.', 311 | } 312 | ) 313 | 314 | client = Client(httpserver.url_for('/')) 315 | response = client.generate('dummy', 'What is in this image?', images=[Image(value=PNG_BASE64)]) 316 | assert response['model'] == 'dummy' 317 | assert response['response'] == 'A blue sky.' 318 | 319 | 320 | def test_client_generate_with_invalid_image(httpserver: HTTPServer): 321 | httpserver.expect_ordered_request( 322 | '/api/generate', 323 | method='POST', 324 | json={ 325 | 'model': 'dummy', 326 | 'prompt': 'What is in this image?', 327 | 'stream': False, 328 | 'images': ['invalid_base64'], 329 | }, 330 | ).respond_with_json({'error': 'Invalid image data'}, status=400) 331 | 332 | client = Client(httpserver.url_for('/')) 333 | with pytest.raises(ValueError): 334 | client.generate('dummy', 'What is in this image?', images=[Image(value='invalid_base64')]) 335 | 336 | 337 | def test_client_generate_stream(httpserver: HTTPServer): 338 | def stream_handler(_: Request): 339 | def generate(): 340 | for message in ['Because ', 'it ', 'is.']: 341 | yield ( 342 | json.dumps( 343 | { 344 | 'model': 'dummy', 345 | 'response': message, 346 | } 347 | ) 348 | + '\n' 349 | ) 350 | 351 | return Response(generate()) 352 | 353 | httpserver.expect_ordered_request( 354 | '/api/generate', 355 | method='POST', 356 | json={ 357 | 'model': 'dummy', 358 | 'prompt': 'Why is the sky blue?', 359 | 'stream': True, 360 | }, 361 | ).respond_with_handler(stream_handler) 362 | 363 | client = Client(httpserver.url_for('/')) 364 | response = client.generate('dummy', 'Why is the sky blue?', stream=True) 365 | 366 | it = iter(['Because ', 'it ', 'is.']) 367 | for part in response: 368 | assert part['model'] == 'dummy' 369 | assert part['response'] == next(it) 370 | 371 | 372 | def test_client_generate_images(httpserver: HTTPServer): 373 | httpserver.expect_ordered_request( 374 | '/api/generate', 375 | method='POST', 376 | json={ 377 | 'model': 'dummy', 378 | 'prompt': 'Why is the sky blue?', 379 | 'stream': False, 380 | 'images': [PNG_BASE64], 381 | }, 382 | ).respond_with_json( 383 | { 384 | 'model': 'dummy', 385 | 'response': 'Because it is.', 386 | } 387 | ) 388 | 389 | client = Client(httpserver.url_for('/')) 390 | 391 | with tempfile.NamedTemporaryFile() as temp: 392 | temp.write(PNG_BYTES) 393 | temp.flush() 394 | response = client.generate('dummy', 'Why is the sky blue?', images=[temp.name]) 395 | assert response['model'] == 'dummy' 396 | assert response['response'] == 'Because it is.' 397 | 398 | 399 | def test_client_generate_format_json(httpserver: HTTPServer): 400 | httpserver.expect_ordered_request( 401 | '/api/generate', 402 | method='POST', 403 | json={ 404 | 'model': 'dummy', 405 | 'prompt': 'Why is the sky blue?', 406 | 'format': 'json', 407 | 'stream': False, 408 | }, 409 | ).respond_with_json( 410 | { 411 | 'model': 'dummy', 412 | 'response': '{"answer": "Because of Rayleigh scattering"}', 413 | } 414 | ) 415 | 416 | client = Client(httpserver.url_for('/')) 417 | response = client.generate('dummy', 'Why is the sky blue?', format='json') 418 | assert response['model'] == 'dummy' 419 | assert response['response'] == '{"answer": "Because of Rayleigh scattering"}' 420 | 421 | 422 | def test_client_generate_format_pydantic(httpserver: HTTPServer): 423 | class ResponseFormat(BaseModel): 424 | answer: str 425 | confidence: float 426 | 427 | httpserver.expect_ordered_request( 428 | '/api/generate', 429 | method='POST', 430 | json={ 431 | 'model': 'dummy', 432 | 'prompt': 'Why is the sky blue?', 433 | 'format': {'title': 'ResponseFormat', 'type': 'object', 'properties': {'answer': {'title': 'Answer', 'type': 'string'}, 'confidence': {'title': 'Confidence', 'type': 'number'}}, 'required': ['answer', 'confidence']}, 434 | 'stream': False, 435 | }, 436 | ).respond_with_json( 437 | { 438 | 'model': 'dummy', 439 | 'response': '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}', 440 | } 441 | ) 442 | 443 | client = Client(httpserver.url_for('/')) 444 | response = client.generate('dummy', 'Why is the sky blue?', format=ResponseFormat.model_json_schema()) 445 | assert response['model'] == 'dummy' 446 | assert response['response'] == '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}' 447 | 448 | 449 | async def test_async_client_generate_format_json(httpserver: HTTPServer): 450 | httpserver.expect_ordered_request( 451 | '/api/generate', 452 | method='POST', 453 | json={ 454 | 'model': 'dummy', 455 | 'prompt': 'Why is the sky blue?', 456 | 'format': 'json', 457 | 'stream': False, 458 | }, 459 | ).respond_with_json( 460 | { 461 | 'model': 'dummy', 462 | 'response': '{"answer": "Because of Rayleigh scattering"}', 463 | } 464 | ) 465 | 466 | client = AsyncClient(httpserver.url_for('/')) 467 | response = await client.generate('dummy', 'Why is the sky blue?', format='json') 468 | assert response['model'] == 'dummy' 469 | assert response['response'] == '{"answer": "Because of Rayleigh scattering"}' 470 | 471 | 472 | async def test_async_client_generate_format_pydantic(httpserver: HTTPServer): 473 | class ResponseFormat(BaseModel): 474 | answer: str 475 | confidence: float 476 | 477 | httpserver.expect_ordered_request( 478 | '/api/generate', 479 | method='POST', 480 | json={ 481 | 'model': 'dummy', 482 | 'prompt': 'Why is the sky blue?', 483 | 'format': {'title': 'ResponseFormat', 'type': 'object', 'properties': {'answer': {'title': 'Answer', 'type': 'string'}, 'confidence': {'title': 'Confidence', 'type': 'number'}}, 'required': ['answer', 'confidence']}, 484 | 'stream': False, 485 | }, 486 | ).respond_with_json( 487 | { 488 | 'model': 'dummy', 489 | 'response': '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}', 490 | } 491 | ) 492 | 493 | client = AsyncClient(httpserver.url_for('/')) 494 | response = await client.generate('dummy', 'Why is the sky blue?', format=ResponseFormat.model_json_schema()) 495 | assert response['model'] == 'dummy' 496 | assert response['response'] == '{"answer": "Because of Rayleigh scattering", "confidence": 0.95}' 497 | 498 | 499 | def test_client_pull(httpserver: HTTPServer): 500 | httpserver.expect_ordered_request( 501 | '/api/pull', 502 | method='POST', 503 | json={ 504 | 'model': 'dummy', 505 | 'insecure': False, 506 | 'stream': False, 507 | }, 508 | ).respond_with_json({'status': 'success'}) 509 | 510 | client = Client(httpserver.url_for('/')) 511 | response = client.pull('dummy') 512 | assert response['status'] == 'success' 513 | 514 | 515 | def test_client_pull_stream(httpserver: HTTPServer): 516 | def stream_handler(_: Request): 517 | def generate(): 518 | yield json.dumps({'status': 'pulling manifest'}) + '\n' 519 | yield json.dumps({'status': 'verifying sha256 digest'}) + '\n' 520 | yield json.dumps({'status': 'writing manifest'}) + '\n' 521 | yield json.dumps({'status': 'removing any unused layers'}) + '\n' 522 | yield json.dumps({'status': 'success'}) + '\n' 523 | 524 | return Response(generate()) 525 | 526 | httpserver.expect_ordered_request( 527 | '/api/pull', 528 | method='POST', 529 | json={ 530 | 'model': 'dummy', 531 | 'insecure': False, 532 | 'stream': True, 533 | }, 534 | ).respond_with_handler(stream_handler) 535 | 536 | client = Client(httpserver.url_for('/')) 537 | response = client.pull('dummy', stream=True) 538 | 539 | it = iter(['pulling manifest', 'verifying sha256 digest', 'writing manifest', 'removing any unused layers', 'success']) 540 | for part in response: 541 | assert part['status'] == next(it) 542 | 543 | 544 | def test_client_push(httpserver: HTTPServer): 545 | httpserver.expect_ordered_request( 546 | '/api/push', 547 | method='POST', 548 | json={ 549 | 'model': 'dummy', 550 | 'insecure': False, 551 | 'stream': False, 552 | }, 553 | ).respond_with_json({'status': 'success'}) 554 | 555 | client = Client(httpserver.url_for('/')) 556 | response = client.push('dummy') 557 | assert response['status'] == 'success' 558 | 559 | 560 | def test_client_push_stream(httpserver: HTTPServer): 561 | def stream_handler(_: Request): 562 | def generate(): 563 | yield json.dumps({'status': 'retrieving manifest'}) + '\n' 564 | yield json.dumps({'status': 'pushing manifest'}) + '\n' 565 | yield json.dumps({'status': 'success'}) + '\n' 566 | 567 | return Response(generate()) 568 | 569 | httpserver.expect_ordered_request( 570 | '/api/push', 571 | method='POST', 572 | json={ 573 | 'model': 'dummy', 574 | 'insecure': False, 575 | 'stream': True, 576 | }, 577 | ).respond_with_handler(stream_handler) 578 | 579 | client = Client(httpserver.url_for('/')) 580 | response = client.push('dummy', stream=True) 581 | 582 | it = iter(['retrieving manifest', 'pushing manifest', 'success']) 583 | for part in response: 584 | assert part['status'] == next(it) 585 | 586 | 587 | @pytest.fixture 588 | def userhomedir(): 589 | with tempfile.TemporaryDirectory() as temp: 590 | home = os.getenv('HOME', '') 591 | os.environ['HOME'] = temp 592 | yield Path(temp) 593 | os.environ['HOME'] = home 594 | 595 | 596 | def test_client_create_with_blob(httpserver: HTTPServer): 597 | httpserver.expect_ordered_request( 598 | '/api/create', 599 | method='POST', 600 | json={ 601 | 'model': 'dummy', 602 | 'files': {'test.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 603 | 'stream': False, 604 | }, 605 | ).respond_with_json({'status': 'success'}) 606 | 607 | client = Client(httpserver.url_for('/')) 608 | 609 | with tempfile.NamedTemporaryFile(): 610 | response = client.create('dummy', files={'test.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}) 611 | assert response['status'] == 'success' 612 | 613 | 614 | def test_client_create_with_parameters_roundtrip(httpserver: HTTPServer): 615 | httpserver.expect_ordered_request( 616 | '/api/create', 617 | method='POST', 618 | json={ 619 | 'model': 'dummy', 620 | 'quantize': 'q4_k_m', 621 | 'from': 'mymodel', 622 | 'adapters': {'someadapter.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 623 | 'template': '[INST] <>{{.System}}<>\n{{.Prompt}} [/INST]', 624 | 'license': 'this is my license', 625 | 'system': '\nUse\nmultiline\nstrings.\n', 626 | 'parameters': {'stop': ['[INST]', '[/INST]', '<>', '<>'], 'pi': 3.14159}, 627 | 'messages': [{'role': 'user', 'content': 'Hello there!'}, {'role': 'assistant', 'content': 'Hello there yourself!'}], 628 | 'stream': False, 629 | }, 630 | ).respond_with_json({'status': 'success'}) 631 | 632 | client = Client(httpserver.url_for('/')) 633 | 634 | with tempfile.NamedTemporaryFile(): 635 | response = client.create( 636 | 'dummy', 637 | quantize='q4_k_m', 638 | from_='mymodel', 639 | adapters={'someadapter.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 640 | template='[INST] <>{{.System}}<>\n{{.Prompt}} [/INST]', 641 | license='this is my license', 642 | system='\nUse\nmultiline\nstrings.\n', 643 | parameters={'stop': ['[INST]', '[/INST]', '<>', '<>'], 'pi': 3.14159}, 644 | messages=[{'role': 'user', 'content': 'Hello there!'}, {'role': 'assistant', 'content': 'Hello there yourself!'}], 645 | stream=False, 646 | ) 647 | assert response['status'] == 'success' 648 | 649 | 650 | def test_client_create_from_library(httpserver: HTTPServer): 651 | httpserver.expect_ordered_request( 652 | '/api/create', 653 | method='POST', 654 | json={ 655 | 'model': 'dummy', 656 | 'from': 'llama2', 657 | 'stream': False, 658 | }, 659 | ).respond_with_json({'status': 'success'}) 660 | 661 | client = Client(httpserver.url_for('/')) 662 | 663 | response = client.create('dummy', from_='llama2') 664 | assert response['status'] == 'success' 665 | 666 | 667 | def test_client_create_blob(httpserver: HTTPServer): 668 | httpserver.expect_ordered_request(re.compile('^/api/blobs/sha256[:-][0-9a-fA-F]{64}$'), method='POST').respond_with_response(Response(status=201)) 669 | 670 | client = Client(httpserver.url_for('/')) 671 | 672 | with tempfile.NamedTemporaryFile() as blob: 673 | response = client.create_blob(blob.name) 674 | assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' 675 | 676 | 677 | def test_client_create_blob_exists(httpserver: HTTPServer): 678 | httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) 679 | 680 | client = Client(httpserver.url_for('/')) 681 | 682 | with tempfile.NamedTemporaryFile() as blob: 683 | response = client.create_blob(blob.name) 684 | assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' 685 | 686 | 687 | def test_client_delete(httpserver: HTTPServer): 688 | httpserver.expect_ordered_request(PrefixPattern('/api/delete'), method='DELETE').respond_with_response(Response(status=200)) 689 | client = Client(httpserver.url_for('/api/delete')) 690 | response = client.delete('dummy') 691 | assert response['status'] == 'success' 692 | 693 | 694 | def test_client_copy(httpserver: HTTPServer): 695 | httpserver.expect_ordered_request(PrefixPattern('/api/copy'), method='POST').respond_with_response(Response(status=200)) 696 | client = Client(httpserver.url_for('/api/copy')) 697 | response = client.copy('dum', 'dummer') 698 | assert response['status'] == 'success' 699 | 700 | 701 | async def test_async_client_chat(httpserver: HTTPServer): 702 | httpserver.expect_ordered_request( 703 | '/api/chat', 704 | method='POST', 705 | json={ 706 | 'model': 'dummy', 707 | 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 708 | 'tools': [], 709 | 'stream': False, 710 | }, 711 | ).respond_with_json( 712 | { 713 | 'model': 'dummy', 714 | 'message': { 715 | 'role': 'assistant', 716 | 'content': "I don't know.", 717 | }, 718 | } 719 | ) 720 | 721 | client = AsyncClient(httpserver.url_for('/')) 722 | response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) 723 | assert response['model'] == 'dummy' 724 | assert response['message']['role'] == 'assistant' 725 | assert response['message']['content'] == "I don't know." 726 | 727 | 728 | async def test_async_client_chat_stream(httpserver: HTTPServer): 729 | def stream_handler(_: Request): 730 | def generate(): 731 | for message in ['I ', "don't ", 'know.']: 732 | yield ( 733 | json.dumps( 734 | { 735 | 'model': 'dummy', 736 | 'message': { 737 | 'role': 'assistant', 738 | 'content': message, 739 | }, 740 | } 741 | ) 742 | + '\n' 743 | ) 744 | 745 | return Response(generate()) 746 | 747 | httpserver.expect_ordered_request( 748 | '/api/chat', 749 | method='POST', 750 | json={ 751 | 'model': 'dummy', 752 | 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 753 | 'tools': [], 754 | 'stream': True, 755 | }, 756 | ).respond_with_handler(stream_handler) 757 | 758 | client = AsyncClient(httpserver.url_for('/')) 759 | response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], stream=True) 760 | 761 | it = iter(['I ', "don't ", 'know.']) 762 | async for part in response: 763 | assert part['message']['role'] == 'assistant' 764 | assert part['message']['content'] == next(it) 765 | 766 | 767 | async def test_async_client_chat_images(httpserver: HTTPServer): 768 | httpserver.expect_ordered_request( 769 | '/api/chat', 770 | method='POST', 771 | json={ 772 | 'model': 'dummy', 773 | 'messages': [ 774 | { 775 | 'role': 'user', 776 | 'content': 'Why is the sky blue?', 777 | 'images': [PNG_BASE64], 778 | }, 779 | ], 780 | 'tools': [], 781 | 'stream': False, 782 | }, 783 | ).respond_with_json( 784 | { 785 | 'model': 'dummy', 786 | 'message': { 787 | 'role': 'assistant', 788 | 'content': "I don't know.", 789 | }, 790 | } 791 | ) 792 | 793 | client = AsyncClient(httpserver.url_for('/')) 794 | 795 | response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?', 'images': [PNG_BYTES]}]) 796 | assert response['model'] == 'dummy' 797 | assert response['message']['role'] == 'assistant' 798 | assert response['message']['content'] == "I don't know." 799 | 800 | 801 | async def test_async_client_generate(httpserver: HTTPServer): 802 | httpserver.expect_ordered_request( 803 | '/api/generate', 804 | method='POST', 805 | json={ 806 | 'model': 'dummy', 807 | 'prompt': 'Why is the sky blue?', 808 | 'stream': False, 809 | }, 810 | ).respond_with_json( 811 | { 812 | 'model': 'dummy', 813 | 'response': 'Because it is.', 814 | } 815 | ) 816 | 817 | client = AsyncClient(httpserver.url_for('/')) 818 | response = await client.generate('dummy', 'Why is the sky blue?') 819 | assert response['model'] == 'dummy' 820 | assert response['response'] == 'Because it is.' 821 | 822 | 823 | async def test_async_client_generate_stream(httpserver: HTTPServer): 824 | def stream_handler(_: Request): 825 | def generate(): 826 | for message in ['Because ', 'it ', 'is.']: 827 | yield ( 828 | json.dumps( 829 | { 830 | 'model': 'dummy', 831 | 'response': message, 832 | } 833 | ) 834 | + '\n' 835 | ) 836 | 837 | return Response(generate()) 838 | 839 | httpserver.expect_ordered_request( 840 | '/api/generate', 841 | method='POST', 842 | json={ 843 | 'model': 'dummy', 844 | 'prompt': 'Why is the sky blue?', 845 | 'stream': True, 846 | }, 847 | ).respond_with_handler(stream_handler) 848 | 849 | client = AsyncClient(httpserver.url_for('/')) 850 | response = await client.generate('dummy', 'Why is the sky blue?', stream=True) 851 | 852 | it = iter(['Because ', 'it ', 'is.']) 853 | async for part in response: 854 | assert part['model'] == 'dummy' 855 | assert part['response'] == next(it) 856 | 857 | 858 | async def test_async_client_generate_images(httpserver: HTTPServer): 859 | httpserver.expect_ordered_request( 860 | '/api/generate', 861 | method='POST', 862 | json={ 863 | 'model': 'dummy', 864 | 'prompt': 'Why is the sky blue?', 865 | 'stream': False, 866 | 'images': [PNG_BASE64], 867 | }, 868 | ).respond_with_json( 869 | { 870 | 'model': 'dummy', 871 | 'response': 'Because it is.', 872 | } 873 | ) 874 | 875 | client = AsyncClient(httpserver.url_for('/')) 876 | 877 | with tempfile.NamedTemporaryFile() as temp: 878 | temp.write(PNG_BYTES) 879 | temp.flush() 880 | response = await client.generate('dummy', 'Why is the sky blue?', images=[temp.name]) 881 | assert response['model'] == 'dummy' 882 | assert response['response'] == 'Because it is.' 883 | 884 | 885 | async def test_async_client_pull(httpserver: HTTPServer): 886 | httpserver.expect_ordered_request( 887 | '/api/pull', 888 | method='POST', 889 | json={ 890 | 'model': 'dummy', 891 | 'insecure': False, 892 | 'stream': False, 893 | }, 894 | ).respond_with_json({'status': 'success'}) 895 | 896 | client = AsyncClient(httpserver.url_for('/')) 897 | response = await client.pull('dummy') 898 | assert response['status'] == 'success' 899 | 900 | 901 | async def test_async_client_pull_stream(httpserver: HTTPServer): 902 | def stream_handler(_: Request): 903 | def generate(): 904 | yield json.dumps({'status': 'pulling manifest'}) + '\n' 905 | yield json.dumps({'status': 'verifying sha256 digest'}) + '\n' 906 | yield json.dumps({'status': 'writing manifest'}) + '\n' 907 | yield json.dumps({'status': 'removing any unused layers'}) + '\n' 908 | yield json.dumps({'status': 'success'}) + '\n' 909 | 910 | return Response(generate()) 911 | 912 | httpserver.expect_ordered_request( 913 | '/api/pull', 914 | method='POST', 915 | json={ 916 | 'model': 'dummy', 917 | 'insecure': False, 918 | 'stream': True, 919 | }, 920 | ).respond_with_handler(stream_handler) 921 | 922 | client = AsyncClient(httpserver.url_for('/')) 923 | response = await client.pull('dummy', stream=True) 924 | 925 | it = iter(['pulling manifest', 'verifying sha256 digest', 'writing manifest', 'removing any unused layers', 'success']) 926 | async for part in response: 927 | assert part['status'] == next(it) 928 | 929 | 930 | async def test_async_client_push(httpserver: HTTPServer): 931 | httpserver.expect_ordered_request( 932 | '/api/push', 933 | method='POST', 934 | json={ 935 | 'model': 'dummy', 936 | 'insecure': False, 937 | 'stream': False, 938 | }, 939 | ).respond_with_json({'status': 'success'}) 940 | 941 | client = AsyncClient(httpserver.url_for('/')) 942 | response = await client.push('dummy') 943 | assert response['status'] == 'success' 944 | 945 | 946 | async def test_async_client_push_stream(httpserver: HTTPServer): 947 | def stream_handler(_: Request): 948 | def generate(): 949 | yield json.dumps({'status': 'retrieving manifest'}) + '\n' 950 | yield json.dumps({'status': 'pushing manifest'}) + '\n' 951 | yield json.dumps({'status': 'success'}) + '\n' 952 | 953 | return Response(generate()) 954 | 955 | httpserver.expect_ordered_request( 956 | '/api/push', 957 | method='POST', 958 | json={ 959 | 'model': 'dummy', 960 | 'insecure': False, 961 | 'stream': True, 962 | }, 963 | ).respond_with_handler(stream_handler) 964 | 965 | client = AsyncClient(httpserver.url_for('/')) 966 | response = await client.push('dummy', stream=True) 967 | 968 | it = iter(['retrieving manifest', 'pushing manifest', 'success']) 969 | async for part in response: 970 | assert part['status'] == next(it) 971 | 972 | 973 | async def test_async_client_create_with_blob(httpserver: HTTPServer): 974 | httpserver.expect_ordered_request( 975 | '/api/create', 976 | method='POST', 977 | json={ 978 | 'model': 'dummy', 979 | 'files': {'test.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 980 | 'stream': False, 981 | }, 982 | ).respond_with_json({'status': 'success'}) 983 | 984 | client = AsyncClient(httpserver.url_for('/')) 985 | 986 | with tempfile.NamedTemporaryFile(): 987 | response = await client.create('dummy', files={'test.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}) 988 | assert response['status'] == 'success' 989 | 990 | 991 | async def test_async_client_create_with_parameters_roundtrip(httpserver: HTTPServer): 992 | httpserver.expect_ordered_request( 993 | '/api/create', 994 | method='POST', 995 | json={ 996 | 'model': 'dummy', 997 | 'quantize': 'q4_k_m', 998 | 'from': 'mymodel', 999 | 'adapters': {'someadapter.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 1000 | 'template': '[INST] <>{{.System}}<>\n{{.Prompt}} [/INST]', 1001 | 'license': 'this is my license', 1002 | 'system': '\nUse\nmultiline\nstrings.\n', 1003 | 'parameters': {'stop': ['[INST]', '[/INST]', '<>', '<>'], 'pi': 3.14159}, 1004 | 'messages': [{'role': 'user', 'content': 'Hello there!'}, {'role': 'assistant', 'content': 'Hello there yourself!'}], 1005 | 'stream': False, 1006 | }, 1007 | ).respond_with_json({'status': 'success'}) 1008 | 1009 | client = AsyncClient(httpserver.url_for('/')) 1010 | 1011 | with tempfile.NamedTemporaryFile(): 1012 | response = await client.create( 1013 | 'dummy', 1014 | quantize='q4_k_m', 1015 | from_='mymodel', 1016 | adapters={'someadapter.gguf': 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 1017 | template='[INST] <>{{.System}}<>\n{{.Prompt}} [/INST]', 1018 | license='this is my license', 1019 | system='\nUse\nmultiline\nstrings.\n', 1020 | parameters={'stop': ['[INST]', '[/INST]', '<>', '<>'], 'pi': 3.14159}, 1021 | messages=[{'role': 'user', 'content': 'Hello there!'}, {'role': 'assistant', 'content': 'Hello there yourself!'}], 1022 | stream=False, 1023 | ) 1024 | assert response['status'] == 'success' 1025 | 1026 | 1027 | async def test_async_client_create_from_library(httpserver: HTTPServer): 1028 | httpserver.expect_ordered_request( 1029 | '/api/create', 1030 | method='POST', 1031 | json={ 1032 | 'model': 'dummy', 1033 | 'from': 'llama2', 1034 | 'stream': False, 1035 | }, 1036 | ).respond_with_json({'status': 'success'}) 1037 | 1038 | client = AsyncClient(httpserver.url_for('/')) 1039 | 1040 | response = await client.create('dummy', from_='llama2') 1041 | assert response['status'] == 'success' 1042 | 1043 | 1044 | async def test_async_client_create_blob(httpserver: HTTPServer): 1045 | httpserver.expect_ordered_request(re.compile('^/api/blobs/sha256[:-][0-9a-fA-F]{64}$'), method='POST').respond_with_response(Response(status=201)) 1046 | 1047 | client = AsyncClient(httpserver.url_for('/')) 1048 | 1049 | with tempfile.NamedTemporaryFile() as blob: 1050 | response = await client.create_blob(blob.name) 1051 | assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' 1052 | 1053 | 1054 | async def test_async_client_create_blob_exists(httpserver: HTTPServer): 1055 | httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) 1056 | 1057 | client = AsyncClient(httpserver.url_for('/')) 1058 | 1059 | with tempfile.NamedTemporaryFile() as blob: 1060 | response = await client.create_blob(blob.name) 1061 | assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' 1062 | 1063 | 1064 | async def test_async_client_delete(httpserver: HTTPServer): 1065 | httpserver.expect_ordered_request(PrefixPattern('/api/delete'), method='DELETE').respond_with_response(Response(status=200)) 1066 | client = AsyncClient(httpserver.url_for('/api/delete')) 1067 | response = await client.delete('dummy') 1068 | assert response['status'] == 'success' 1069 | 1070 | 1071 | async def test_async_client_copy(httpserver: HTTPServer): 1072 | httpserver.expect_ordered_request(PrefixPattern('/api/copy'), method='POST').respond_with_response(Response(status=200)) 1073 | client = AsyncClient(httpserver.url_for('/api/copy')) 1074 | response = await client.copy('dum', 'dummer') 1075 | assert response['status'] == 'success' 1076 | 1077 | 1078 | def test_headers(): 1079 | client = Client() 1080 | assert client._client.headers['content-type'] == 'application/json' 1081 | assert client._client.headers['accept'] == 'application/json' 1082 | assert client._client.headers['user-agent'].startswith('ollama-python/') 1083 | 1084 | client = Client( 1085 | headers={ 1086 | 'X-Custom': 'value', 1087 | 'Content-Type': 'text/plain', 1088 | } 1089 | ) 1090 | assert client._client.headers['x-custom'] == 'value' 1091 | assert client._client.headers['content-type'] == 'application/json' 1092 | 1093 | 1094 | def test_copy_tools(): 1095 | def func1(x: int) -> str: 1096 | """Simple function 1. 1097 | Args: 1098 | x (integer): A number 1099 | """ 1100 | 1101 | def func2(y: str) -> int: 1102 | """Simple function 2. 1103 | Args: 1104 | y (string): A string 1105 | """ 1106 | 1107 | # Test with list of functions 1108 | tools = list(_copy_tools([func1, func2])) 1109 | assert len(tools) == 2 1110 | assert tools[0].function.name == 'func1' 1111 | assert tools[1].function.name == 'func2' 1112 | 1113 | # Test with empty input 1114 | assert list(_copy_tools()) == [] 1115 | assert list(_copy_tools(None)) == [] 1116 | assert list(_copy_tools([])) == [] 1117 | 1118 | # Test with mix of functions and tool dicts 1119 | tool_dict = { 1120 | 'type': 'function', 1121 | 'function': { 1122 | 'name': 'test', 1123 | 'description': 'Test function', 1124 | 'parameters': { 1125 | 'type': 'object', 1126 | 'properties': {'x': {'type': 'string', 'description': 'A string', 'enum': ['a', 'b', 'c']}, 'y': {'type': ['integer', 'number'], 'description': 'An integer'}}, 1127 | 'required': ['x'], 1128 | }, 1129 | }, 1130 | } 1131 | 1132 | tools = list(_copy_tools([func1, tool_dict])) 1133 | assert len(tools) == 2 1134 | assert tools[0].function.name == 'func1' 1135 | assert tools[1].function.name == 'test' 1136 | 1137 | 1138 | def test_tool_validation(): 1139 | # Raises ValidationError when used as it is a generator 1140 | with pytest.raises(ValidationError): 1141 | invalid_tool = {'type': 'invalid_type', 'function': {'name': 'test'}} 1142 | list(_copy_tools([invalid_tool])) 1143 | 1144 | 1145 | def test_client_connection_error(): 1146 | client = Client('http://localhost:1234') 1147 | 1148 | with pytest.raises(ConnectionError, match=CONNECTION_ERROR_MESSAGE): 1149 | client.chat('model', messages=[{'role': 'user', 'content': 'prompt'}]) 1150 | with pytest.raises(ConnectionError, match=CONNECTION_ERROR_MESSAGE): 1151 | client.chat('model', messages=[{'role': 'user', 'content': 'prompt'}]) 1152 | with pytest.raises(ConnectionError, match=CONNECTION_ERROR_MESSAGE): 1153 | client.generate('model', 'prompt') 1154 | with pytest.raises(ConnectionError, match=CONNECTION_ERROR_MESSAGE): 1155 | client.show('model') 1156 | 1157 | 1158 | async def test_async_client_connection_error(): 1159 | client = AsyncClient('http://localhost:1234') 1160 | with pytest.raises(ConnectionError) as exc_info: 1161 | await client.chat('model', messages=[{'role': 'user', 'content': 'prompt'}]) 1162 | assert str(exc_info.value) == 'Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download' 1163 | with pytest.raises(ConnectionError) as exc_info: 1164 | await client.generate('model', 'prompt') 1165 | assert str(exc_info.value) == 'Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download' 1166 | with pytest.raises(ConnectionError) as exc_info: 1167 | await client.show('model') 1168 | assert str(exc_info.value) == 'Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download' 1169 | 1170 | 1171 | def test_arbitrary_roles_accepted_in_message(): 1172 | _ = Message(role='somerandomrole', content="I'm ok with you adding any role message now!") 1173 | 1174 | 1175 | def _mock_request(*args: Any, **kwargs: Any) -> Response: 1176 | return httpxResponse(status_code=200, content="{'response': 'Hello world!'}") 1177 | 1178 | 1179 | def test_arbitrary_roles_accepted_in_message_request(monkeypatch: pytest.MonkeyPatch): 1180 | monkeypatch.setattr(Client, '_request', _mock_request) 1181 | 1182 | client = Client() 1183 | 1184 | client.chat(model='llama3.1', messages=[{'role': 'somerandomrole', 'content': "I'm ok with you adding any role message now!"}, {'role': 'user', 'content': 'Hello world!'}]) 1185 | 1186 | 1187 | async def _mock_request_async(*args: Any, **kwargs: Any) -> Response: 1188 | return httpxResponse(status_code=200, content="{'response': 'Hello world!'}") 1189 | 1190 | 1191 | async def test_arbitrary_roles_accepted_in_message_request_async(monkeypatch: pytest.MonkeyPatch): 1192 | monkeypatch.setattr(AsyncClient, '_request', _mock_request_async) 1193 | 1194 | client = AsyncClient() 1195 | 1196 | await client.chat(model='llama3.1', messages=[{'role': 'somerandomrole', 'content': "I'm ok with you adding any role message now!"}, {'role': 'user', 'content': 'Hello world!'}]) 1197 | -------------------------------------------------------------------------------- /tests/test_type_serialization.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | from base64 import b64encode 3 | from pathlib import Path 4 | 5 | import pytest 6 | 7 | from ollama._types import CreateRequest, Image 8 | 9 | 10 | def test_image_serialization_bytes(): 11 | image_bytes = b'test image bytes' 12 | encoded_string = b64encode(image_bytes).decode() 13 | img = Image(value=image_bytes) 14 | assert img.model_dump() == encoded_string 15 | 16 | 17 | def test_image_serialization_base64_string(): 18 | b64_str = 'dGVzdCBiYXNlNjQgc3RyaW5n' 19 | img = Image(value=b64_str) 20 | assert img.model_dump() == b64_str # Should return as-is if valid base64 21 | 22 | 23 | def test_image_serialization_long_base64_string(): 24 | b64_str = 'dGVzdCBiYXNlNjQgc3RyaW5n' * 1000 25 | img = Image(value=b64_str) 26 | assert img.model_dump() == b64_str # Should return as-is if valid base64 27 | 28 | 29 | def test_image_serialization_plain_string(): 30 | img = Image(value='not a path or base64') 31 | assert img.model_dump() == 'not a path or base64' # Should return as-is 32 | 33 | 34 | def test_image_serialization_path(): 35 | with tempfile.NamedTemporaryFile() as temp_file: 36 | temp_file.write(b'test file content') 37 | temp_file.flush() 38 | img = Image(value=Path(temp_file.name)) 39 | assert img.model_dump() == b64encode(b'test file content').decode() 40 | 41 | 42 | def test_image_serialization_string_path(): 43 | with tempfile.NamedTemporaryFile() as temp_file: 44 | temp_file.write(b'test file content') 45 | temp_file.flush() 46 | img = Image(value=temp_file.name) 47 | assert img.model_dump() == b64encode(b'test file content').decode() 48 | 49 | with pytest.raises(ValueError): 50 | img = Image(value='some_path/that/does/not/exist.png') 51 | img.model_dump() 52 | 53 | with pytest.raises(ValueError): 54 | img = Image(value='not an image') 55 | img.model_dump() 56 | 57 | 58 | def test_create_request_serialization(): 59 | request = CreateRequest(model='test-model', from_='base-model', quantize='q4_0', files={'file1': 'content1'}, adapters={'adapter1': 'content1'}, template='test template', license='MIT', system='test system', parameters={'param1': 'value1'}) 60 | 61 | serialized = request.model_dump() 62 | assert serialized['from'] == 'base-model' 63 | assert 'from_' not in serialized 64 | assert serialized['quantize'] == 'q4_0' 65 | assert serialized['files'] == {'file1': 'content1'} 66 | assert serialized['adapters'] == {'adapter1': 'content1'} 67 | assert serialized['template'] == 'test template' 68 | assert serialized['license'] == 'MIT' 69 | assert serialized['system'] == 'test system' 70 | assert serialized['parameters'] == {'param1': 'value1'} 71 | 72 | 73 | def test_create_request_serialization_exclude_none_true(): 74 | request = CreateRequest(model='test-model', from_=None, quantize=None) 75 | serialized = request.model_dump(exclude_none=True) 76 | assert serialized == {'model': 'test-model'} 77 | assert 'from' not in serialized 78 | assert 'from_' not in serialized 79 | assert 'quantize' not in serialized 80 | 81 | 82 | def test_create_request_serialization_exclude_none_false(): 83 | request = CreateRequest(model='test-model', from_=None, quantize=None) 84 | serialized = request.model_dump(exclude_none=False) 85 | assert 'from' in serialized 86 | assert 'quantize' in serialized 87 | assert 'adapters' in serialized 88 | assert 'from_' not in serialized 89 | 90 | 91 | def test_create_request_serialization_license_list(): 92 | request = CreateRequest(model='test-model', license=['MIT', 'Apache-2.0']) 93 | serialized = request.model_dump() 94 | assert serialized['license'] == ['MIT', 'Apache-2.0'] 95 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | from typing import Dict, List, Mapping, Sequence, Set, Tuple, Union 4 | 5 | from ollama._utils import convert_function_to_tool 6 | 7 | 8 | def test_function_to_tool_conversion(): 9 | def add_numbers(x: int, y: Union[int, None] = None) -> int: 10 | """Add two numbers together. 11 | args: 12 | x (integer): The first number 13 | y (integer, optional): The second number 14 | 15 | Returns: 16 | integer: The sum of x and y 17 | """ 18 | return x + y 19 | 20 | tool = convert_function_to_tool(add_numbers).model_dump() 21 | 22 | assert tool['type'] == 'function' 23 | assert tool['function']['name'] == 'add_numbers' 24 | assert tool['function']['description'] == 'Add two numbers together.' 25 | assert tool['function']['parameters']['type'] == 'object' 26 | assert tool['function']['parameters']['properties']['x']['type'] == 'integer' 27 | assert tool['function']['parameters']['properties']['x']['description'] == 'The first number' 28 | assert tool['function']['parameters']['required'] == ['x'] 29 | 30 | 31 | def test_function_with_no_args(): 32 | def simple_func(): 33 | """ 34 | A simple function with no arguments. 35 | Args: 36 | None 37 | Returns: 38 | None 39 | """ 40 | 41 | tool = convert_function_to_tool(simple_func).model_dump() 42 | assert tool['function']['name'] == 'simple_func' 43 | assert tool['function']['description'] == 'A simple function with no arguments.' 44 | assert tool['function']['parameters']['properties'] == {} 45 | 46 | 47 | def test_function_with_all_types(): 48 | if sys.version_info >= (3, 10): 49 | 50 | def all_types( 51 | x: int, 52 | y: str, 53 | z: list[int], 54 | w: dict[str, int], 55 | v: int | str | None, 56 | ) -> int | dict[str, int] | str | list[int] | None: 57 | """ 58 | A function with all types. 59 | Args: 60 | x (integer): The first number 61 | y (string): The second number 62 | z (array): The third number 63 | w (object): The fourth number 64 | v (integer | string | None): The fifth number 65 | """ 66 | else: 67 | 68 | def all_types( 69 | x: int, 70 | y: str, 71 | z: Sequence, 72 | w: Mapping[str, int], 73 | d: Dict[str, int], 74 | s: Set[int], 75 | t: Tuple[int, str], 76 | l: List[int], # noqa: E741 77 | o: Union[int, None], 78 | ) -> Union[Mapping[str, int], str, None]: 79 | """ 80 | A function with all types. 81 | Args: 82 | x (integer): The first number 83 | y (string): The second number 84 | z (array): The third number 85 | w (object): The fourth number 86 | d (object): The fifth number 87 | s (array): The sixth number 88 | t (array): The seventh number 89 | l (array): The eighth number 90 | o (integer | None): The ninth number 91 | """ 92 | 93 | tool_json = convert_function_to_tool(all_types).model_dump_json() 94 | tool = json.loads(tool_json) 95 | assert tool['function']['parameters']['properties']['x']['type'] == 'integer' 96 | assert tool['function']['parameters']['properties']['y']['type'] == 'string' 97 | 98 | if sys.version_info >= (3, 10): 99 | assert tool['function']['parameters']['properties']['z']['type'] == 'array' 100 | assert tool['function']['parameters']['properties']['w']['type'] == 'object' 101 | assert {x.strip().strip("'") for x in tool['function']['parameters']['properties']['v']['type'].removeprefix('[').removesuffix(']').split(',')} == {'string', 'integer'} 102 | assert tool['function']['parameters']['properties']['v']['type'] != 'null' 103 | assert tool['function']['parameters']['required'] == ['x', 'y', 'z', 'w'] 104 | else: 105 | assert tool['function']['parameters']['properties']['z']['type'] == 'array' 106 | assert tool['function']['parameters']['properties']['w']['type'] == 'object' 107 | assert tool['function']['parameters']['properties']['d']['type'] == 'object' 108 | assert tool['function']['parameters']['properties']['s']['type'] == 'array' 109 | assert tool['function']['parameters']['properties']['t']['type'] == 'array' 110 | assert tool['function']['parameters']['properties']['l']['type'] == 'array' 111 | assert tool['function']['parameters']['properties']['o']['type'] == 'integer' 112 | assert tool['function']['parameters']['properties']['o']['type'] != 'null' 113 | assert tool['function']['parameters']['required'] == ['x', 'y', 'z', 'w', 'd', 's', 't', 'l'] 114 | 115 | 116 | def test_function_docstring_parsing(): 117 | from typing import Any, Dict, List 118 | 119 | def func_with_complex_docs(x: int, y: List[str]) -> Dict[str, Any]: 120 | """ 121 | Test function with complex docstring. 122 | 123 | Args: 124 | x (integer): A number 125 | with multiple lines 126 | y (array of string): A list 127 | with multiple lines 128 | 129 | Returns: 130 | object: A dictionary 131 | with multiple lines 132 | """ 133 | 134 | tool = convert_function_to_tool(func_with_complex_docs).model_dump() 135 | assert tool['function']['description'] == 'Test function with complex docstring.' 136 | assert tool['function']['parameters']['properties']['x']['description'] == 'A number with multiple lines' 137 | assert tool['function']['parameters']['properties']['y']['description'] == 'A list with multiple lines' 138 | 139 | 140 | def test_skewed_docstring_parsing(): 141 | def add_two_numbers(x: int, y: int) -> int: 142 | """ 143 | Add two numbers together. 144 | Args: 145 | x (integer): : The first number 146 | 147 | 148 | 149 | 150 | y (integer ): The second number 151 | Returns: 152 | integer: The sum of x and y 153 | """ 154 | 155 | tool = convert_function_to_tool(add_two_numbers).model_dump() 156 | assert tool['function']['parameters']['properties']['x']['description'] == ': The first number' 157 | assert tool['function']['parameters']['properties']['y']['description'] == 'The second number' 158 | 159 | 160 | def test_function_with_no_docstring(): 161 | def no_docstring(): ... 162 | 163 | def no_docstring_with_args(x: int, y: int): ... 164 | 165 | tool = convert_function_to_tool(no_docstring).model_dump() 166 | assert tool['function']['description'] == '' 167 | 168 | tool = convert_function_to_tool(no_docstring_with_args).model_dump() 169 | assert tool['function']['description'] == '' 170 | assert tool['function']['parameters']['properties']['x']['description'] == '' 171 | assert tool['function']['parameters']['properties']['y']['description'] == '' 172 | 173 | 174 | def test_function_with_only_description(): 175 | def only_description(): 176 | """ 177 | A function with only a description. 178 | """ 179 | 180 | tool = convert_function_to_tool(only_description).model_dump() 181 | assert tool['function']['description'] == 'A function with only a description.' 182 | assert tool['function']['parameters'] == {'type': 'object', 'defs': None, 'items': None, 'required': None, 'properties': {}} 183 | 184 | def only_description_with_args(x: int, y: int): 185 | """ 186 | A function with only a description. 187 | """ 188 | 189 | tool = convert_function_to_tool(only_description_with_args).model_dump() 190 | assert tool['function']['description'] == 'A function with only a description.' 191 | assert tool['function']['parameters'] == { 192 | 'type': 'object', 193 | 'defs': None, 194 | 'items': None, 195 | 'properties': { 196 | 'x': {'type': 'integer', 'description': '', 'enum': None, 'items': None}, 197 | 'y': {'type': 'integer', 'description': '', 'enum': None, 'items': None}, 198 | }, 199 | 'required': ['x', 'y'], 200 | } 201 | 202 | 203 | def test_function_with_yields(): 204 | def function_with_yields(x: int, y: int): 205 | """ 206 | A function with yields section. 207 | 208 | Args: 209 | x: the first number 210 | y: the second number 211 | 212 | Yields: 213 | The sum of x and y 214 | """ 215 | 216 | tool = convert_function_to_tool(function_with_yields).model_dump() 217 | assert tool['function']['description'] == 'A function with yields section.' 218 | assert tool['function']['parameters']['properties']['x']['description'] == 'the first number' 219 | assert tool['function']['parameters']['properties']['y']['description'] == 'the second number' 220 | 221 | 222 | def test_function_with_no_types(): 223 | def no_types(a, b): 224 | """ 225 | A function with no types. 226 | """ 227 | 228 | tool = convert_function_to_tool(no_types).model_dump() 229 | assert tool['function']['parameters']['properties']['a']['type'] == 'string' 230 | assert tool['function']['parameters']['properties']['b']['type'] == 'string' 231 | 232 | 233 | def test_function_with_parentheses(): 234 | def func_with_parentheses(a: int, b: int) -> int: 235 | """ 236 | A function with parentheses. 237 | Args: 238 | a: First (:thing) number to add 239 | b: Second number to add 240 | Returns: 241 | int: The sum of a and b 242 | """ 243 | 244 | def func_with_parentheses_and_args(a: int, b: int): 245 | """ 246 | A function with parentheses and args. 247 | Args: 248 | a(integer) : First (:thing) number to add 249 | b(integer) :Second number to add 250 | """ 251 | 252 | tool = convert_function_to_tool(func_with_parentheses).model_dump() 253 | assert tool['function']['parameters']['properties']['a']['description'] == 'First (:thing) number to add' 254 | assert tool['function']['parameters']['properties']['b']['description'] == 'Second number to add' 255 | 256 | tool = convert_function_to_tool(func_with_parentheses_and_args).model_dump() 257 | assert tool['function']['parameters']['properties']['a']['description'] == 'First (:thing) number to add' 258 | assert tool['function']['parameters']['properties']['b']['description'] == 'Second number to add' 259 | -------------------------------------------------------------------------------- /uv.lock: -------------------------------------------------------------------------------- 1 | version = 1 2 | requires-python = ">=3.8" 3 | resolution-markers = [ 4 | "python_full_version >= '3.9'", 5 | "python_full_version < '3.9'", 6 | ] 7 | 8 | [[package]] 9 | name = "annotated-types" 10 | version = "0.7.0" 11 | source = { registry = "https://pypi.org/simple" } 12 | dependencies = [ 13 | { name = "typing-extensions", marker = "python_full_version < '3.9'" }, 14 | ] 15 | sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } 16 | wheels = [ 17 | { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, 18 | ] 19 | 20 | [[package]] 21 | name = "anyio" 22 | version = "4.5.2" 23 | source = { registry = "https://pypi.org/simple" } 24 | resolution-markers = [ 25 | "python_full_version < '3.9'", 26 | ] 27 | dependencies = [ 28 | { name = "exceptiongroup", marker = "python_full_version < '3.9'" }, 29 | { name = "idna", marker = "python_full_version < '3.9'" }, 30 | { name = "sniffio", marker = "python_full_version < '3.9'" }, 31 | { name = "typing-extensions", marker = "python_full_version < '3.9'" }, 32 | ] 33 | sdist = { url = "https://files.pythonhosted.org/packages/4d/f9/9a7ce600ebe7804daf90d4d48b1c0510a4561ddce43a596be46676f82343/anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b", size = 171293 } 34 | wheels = [ 35 | { url = "https://files.pythonhosted.org/packages/1b/b4/f7e396030e3b11394436358ca258a81d6010106582422f23443c16ca1873/anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f", size = 89766 }, 36 | ] 37 | 38 | [[package]] 39 | name = "anyio" 40 | version = "4.8.0" 41 | source = { registry = "https://pypi.org/simple" } 42 | resolution-markers = [ 43 | "python_full_version >= '3.9'", 44 | ] 45 | dependencies = [ 46 | { name = "exceptiongroup", marker = "python_full_version >= '3.9' and python_full_version < '3.11'" }, 47 | { name = "idna", marker = "python_full_version >= '3.9'" }, 48 | { name = "sniffio", marker = "python_full_version >= '3.9'" }, 49 | { name = "typing-extensions", marker = "python_full_version >= '3.9' and python_full_version < '3.13'" }, 50 | ] 51 | sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126 } 52 | wheels = [ 53 | { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041 }, 54 | ] 55 | 56 | [[package]] 57 | name = "certifi" 58 | version = "2025.1.31" 59 | source = { registry = "https://pypi.org/simple" } 60 | sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 } 61 | wheels = [ 62 | { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 }, 63 | ] 64 | 65 | [[package]] 66 | name = "exceptiongroup" 67 | version = "1.2.2" 68 | source = { registry = "https://pypi.org/simple" } 69 | sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883 } 70 | wheels = [ 71 | { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 }, 72 | ] 73 | 74 | [[package]] 75 | name = "h11" 76 | version = "0.14.0" 77 | source = { registry = "https://pypi.org/simple" } 78 | sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } 79 | wheels = [ 80 | { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, 81 | ] 82 | 83 | [[package]] 84 | name = "httpcore" 85 | version = "1.0.7" 86 | source = { registry = "https://pypi.org/simple" } 87 | dependencies = [ 88 | { name = "certifi" }, 89 | { name = "h11" }, 90 | ] 91 | sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 } 92 | wheels = [ 93 | { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 }, 94 | ] 95 | 96 | [[package]] 97 | name = "httpx" 98 | version = "0.28.1" 99 | source = { registry = "https://pypi.org/simple" } 100 | dependencies = [ 101 | { name = "anyio", version = "4.5.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" }, 102 | { name = "anyio", version = "4.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" }, 103 | { name = "certifi" }, 104 | { name = "httpcore" }, 105 | { name = "idna" }, 106 | ] 107 | sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 } 108 | wheels = [ 109 | { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 }, 110 | ] 111 | 112 | [[package]] 113 | name = "idna" 114 | version = "3.10" 115 | source = { registry = "https://pypi.org/simple" } 116 | sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } 117 | wheels = [ 118 | { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, 119 | ] 120 | 121 | [[package]] 122 | name = "ollama" 123 | source = { editable = "." } 124 | dependencies = [ 125 | { name = "httpx" }, 126 | { name = "pydantic" }, 127 | ] 128 | 129 | [package.metadata] 130 | requires-dist = [ 131 | { name = "httpx", specifier = ">=0.27" }, 132 | { name = "pydantic", specifier = ">=2.9" }, 133 | ] 134 | 135 | [[package]] 136 | name = "pydantic" 137 | version = "2.10.6" 138 | source = { registry = "https://pypi.org/simple" } 139 | dependencies = [ 140 | { name = "annotated-types" }, 141 | { name = "pydantic-core" }, 142 | { name = "typing-extensions" }, 143 | ] 144 | sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681 } 145 | wheels = [ 146 | { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696 }, 147 | ] 148 | 149 | [[package]] 150 | name = "pydantic-core" 151 | version = "2.27.2" 152 | source = { registry = "https://pypi.org/simple" } 153 | dependencies = [ 154 | { name = "typing-extensions" }, 155 | ] 156 | sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 } 157 | wheels = [ 158 | { url = "https://files.pythonhosted.org/packages/3a/bc/fed5f74b5d802cf9a03e83f60f18864e90e3aed7223adaca5ffb7a8d8d64/pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa", size = 1895938 }, 159 | { url = "https://files.pythonhosted.org/packages/71/2a/185aff24ce844e39abb8dd680f4e959f0006944f4a8a0ea372d9f9ae2e53/pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c", size = 1815684 }, 160 | { url = "https://files.pythonhosted.org/packages/c3/43/fafabd3d94d159d4f1ed62e383e264f146a17dd4d48453319fd782e7979e/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a", size = 1829169 }, 161 | { url = "https://files.pythonhosted.org/packages/a2/d1/f2dfe1a2a637ce6800b799aa086d079998959f6f1215eb4497966efd2274/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5", size = 1867227 }, 162 | { url = "https://files.pythonhosted.org/packages/7d/39/e06fcbcc1c785daa3160ccf6c1c38fea31f5754b756e34b65f74e99780b5/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c", size = 2037695 }, 163 | { url = "https://files.pythonhosted.org/packages/7a/67/61291ee98e07f0650eb756d44998214231f50751ba7e13f4f325d95249ab/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7", size = 2741662 }, 164 | { url = "https://files.pythonhosted.org/packages/32/90/3b15e31b88ca39e9e626630b4c4a1f5a0dfd09076366f4219429e6786076/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a", size = 1993370 }, 165 | { url = "https://files.pythonhosted.org/packages/ff/83/c06d333ee3a67e2e13e07794995c1535565132940715931c1c43bfc85b11/pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236", size = 1996813 }, 166 | { url = "https://files.pythonhosted.org/packages/7c/f7/89be1c8deb6e22618a74f0ca0d933fdcb8baa254753b26b25ad3acff8f74/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962", size = 2005287 }, 167 | { url = "https://files.pythonhosted.org/packages/b7/7d/8eb3e23206c00ef7feee17b83a4ffa0a623eb1a9d382e56e4aa46fd15ff2/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9", size = 2128414 }, 168 | { url = "https://files.pythonhosted.org/packages/4e/99/fe80f3ff8dd71a3ea15763878d464476e6cb0a2db95ff1c5c554133b6b83/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af", size = 2155301 }, 169 | { url = "https://files.pythonhosted.org/packages/2b/a3/e50460b9a5789ca1451b70d4f52546fa9e2b420ba3bfa6100105c0559238/pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4", size = 1816685 }, 170 | { url = "https://files.pythonhosted.org/packages/57/4c/a8838731cb0f2c2a39d3535376466de6049034d7b239c0202a64aaa05533/pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31", size = 1982876 }, 171 | { url = "https://files.pythonhosted.org/packages/c2/89/f3450af9d09d44eea1f2c369f49e8f181d742f28220f88cc4dfaae91ea6e/pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc", size = 1893421 }, 172 | { url = "https://files.pythonhosted.org/packages/9e/e3/71fe85af2021f3f386da42d291412e5baf6ce7716bd7101ea49c810eda90/pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7", size = 1814998 }, 173 | { url = "https://files.pythonhosted.org/packages/a6/3c/724039e0d848fd69dbf5806894e26479577316c6f0f112bacaf67aa889ac/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15", size = 1826167 }, 174 | { url = "https://files.pythonhosted.org/packages/2b/5b/1b29e8c1fb5f3199a9a57c1452004ff39f494bbe9bdbe9a81e18172e40d3/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306", size = 1865071 }, 175 | { url = "https://files.pythonhosted.org/packages/89/6c/3985203863d76bb7d7266e36970d7e3b6385148c18a68cc8915fd8c84d57/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99", size = 2036244 }, 176 | { url = "https://files.pythonhosted.org/packages/0e/41/f15316858a246b5d723f7d7f599f79e37493b2e84bfc789e58d88c209f8a/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459", size = 2737470 }, 177 | { url = "https://files.pythonhosted.org/packages/a8/7c/b860618c25678bbd6d1d99dbdfdf0510ccb50790099b963ff78a124b754f/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048", size = 1992291 }, 178 | { url = "https://files.pythonhosted.org/packages/bf/73/42c3742a391eccbeab39f15213ecda3104ae8682ba3c0c28069fbcb8c10d/pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d", size = 1994613 }, 179 | { url = "https://files.pythonhosted.org/packages/94/7a/941e89096d1175d56f59340f3a8ebaf20762fef222c298ea96d36a6328c5/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b", size = 2002355 }, 180 | { url = "https://files.pythonhosted.org/packages/6e/95/2359937a73d49e336a5a19848713555605d4d8d6940c3ec6c6c0ca4dcf25/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474", size = 2126661 }, 181 | { url = "https://files.pythonhosted.org/packages/2b/4c/ca02b7bdb6012a1adef21a50625b14f43ed4d11f1fc237f9d7490aa5078c/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6", size = 2153261 }, 182 | { url = "https://files.pythonhosted.org/packages/72/9d/a241db83f973049a1092a079272ffe2e3e82e98561ef6214ab53fe53b1c7/pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c", size = 1812361 }, 183 | { url = "https://files.pythonhosted.org/packages/e8/ef/013f07248041b74abd48a385e2110aa3a9bbfef0fbd97d4e6d07d2f5b89a/pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc", size = 1982484 }, 184 | { url = "https://files.pythonhosted.org/packages/10/1c/16b3a3e3398fd29dca77cea0a1d998d6bde3902fa2706985191e2313cc76/pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4", size = 1867102 }, 185 | { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 }, 186 | { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 }, 187 | { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 }, 188 | { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 }, 189 | { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 }, 190 | { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 }, 191 | { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 }, 192 | { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 }, 193 | { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 }, 194 | { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 }, 195 | { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 }, 196 | { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064 }, 197 | { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046 }, 198 | { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092 }, 199 | { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 }, 200 | { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 }, 201 | { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 }, 202 | { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 }, 203 | { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 }, 204 | { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 }, 205 | { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 }, 206 | { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 }, 207 | { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 }, 208 | { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 }, 209 | { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 }, 210 | { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387 }, 211 | { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453 }, 212 | { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186 }, 213 | { url = "https://files.pythonhosted.org/packages/43/53/13e9917fc69c0a4aea06fd63ed6a8d6cda9cf140ca9584d49c1650b0ef5e/pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506", size = 1899595 }, 214 | { url = "https://files.pythonhosted.org/packages/f4/20/26c549249769ed84877f862f7bb93f89a6ee08b4bee1ed8781616b7fbb5e/pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320", size = 1775010 }, 215 | { url = "https://files.pythonhosted.org/packages/35/eb/8234e05452d92d2b102ffa1b56d801c3567e628fdc63f02080fdfc68fd5e/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145", size = 1830727 }, 216 | { url = "https://files.pythonhosted.org/packages/8f/df/59f915c8b929d5f61e5a46accf748a87110ba145156f9326d1a7d28912b2/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1", size = 1868393 }, 217 | { url = "https://files.pythonhosted.org/packages/d5/52/81cf4071dca654d485c277c581db368b0c95b2b883f4d7b736ab54f72ddf/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228", size = 2040300 }, 218 | { url = "https://files.pythonhosted.org/packages/9c/00/05197ce1614f5c08d7a06e1d39d5d8e704dc81971b2719af134b844e2eaf/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046", size = 2738785 }, 219 | { url = "https://files.pythonhosted.org/packages/f7/a3/5f19bc495793546825ab160e530330c2afcee2281c02b5ffafd0b32ac05e/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5", size = 1996493 }, 220 | { url = "https://files.pythonhosted.org/packages/ed/e8/e0102c2ec153dc3eed88aea03990e1b06cfbca532916b8a48173245afe60/pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a", size = 1998544 }, 221 | { url = "https://files.pythonhosted.org/packages/fb/a3/4be70845b555bd80aaee9f9812a7cf3df81550bce6dadb3cfee9c5d8421d/pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d", size = 2007449 }, 222 | { url = "https://files.pythonhosted.org/packages/e3/9f/b779ed2480ba355c054e6d7ea77792467631d674b13d8257085a4bc7dcda/pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9", size = 2129460 }, 223 | { url = "https://files.pythonhosted.org/packages/a0/f0/a6ab0681f6e95260c7fbf552874af7302f2ea37b459f9b7f00698f875492/pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da", size = 2159609 }, 224 | { url = "https://files.pythonhosted.org/packages/8a/2b/e1059506795104349712fbca647b18b3f4a7fd541c099e6259717441e1e0/pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b", size = 1819886 }, 225 | { url = "https://files.pythonhosted.org/packages/aa/6d/df49c17f024dfc58db0bacc7b03610058018dd2ea2eaf748ccbada4c3d06/pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad", size = 1980773 }, 226 | { url = "https://files.pythonhosted.org/packages/27/97/3aef1ddb65c5ccd6eda9050036c956ff6ecbfe66cb7eb40f280f121a5bb0/pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993", size = 1896475 }, 227 | { url = "https://files.pythonhosted.org/packages/ad/d3/5668da70e373c9904ed2f372cb52c0b996426f302e0dee2e65634c92007d/pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308", size = 1772279 }, 228 | { url = "https://files.pythonhosted.org/packages/8a/9e/e44b8cb0edf04a2f0a1f6425a65ee089c1d6f9c4c2dcab0209127b6fdfc2/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4", size = 1829112 }, 229 | { url = "https://files.pythonhosted.org/packages/1c/90/1160d7ac700102effe11616e8119e268770f2a2aa5afb935f3ee6832987d/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf", size = 1866780 }, 230 | { url = "https://files.pythonhosted.org/packages/ee/33/13983426df09a36d22c15980008f8d9c77674fc319351813b5a2739b70f3/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76", size = 2037943 }, 231 | { url = "https://files.pythonhosted.org/packages/01/d7/ced164e376f6747e9158c89988c293cd524ab8d215ae4e185e9929655d5c/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118", size = 2740492 }, 232 | { url = "https://files.pythonhosted.org/packages/8b/1f/3dc6e769d5b7461040778816aab2b00422427bcaa4b56cc89e9c653b2605/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630", size = 1995714 }, 233 | { url = "https://files.pythonhosted.org/packages/07/d7/a0bd09bc39283530b3f7c27033a814ef254ba3bd0b5cfd040b7abf1fe5da/pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54", size = 1997163 }, 234 | { url = "https://files.pythonhosted.org/packages/2d/bb/2db4ad1762e1c5699d9b857eeb41959191980de6feb054e70f93085e1bcd/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f", size = 2005217 }, 235 | { url = "https://files.pythonhosted.org/packages/53/5f/23a5a3e7b8403f8dd8fc8a6f8b49f6b55c7d715b77dcf1f8ae919eeb5628/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362", size = 2127899 }, 236 | { url = "https://files.pythonhosted.org/packages/c2/ae/aa38bb8dd3d89c2f1d8362dd890ee8f3b967330821d03bbe08fa01ce3766/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96", size = 2155726 }, 237 | { url = "https://files.pythonhosted.org/packages/98/61/4f784608cc9e98f70839187117ce840480f768fed5d386f924074bf6213c/pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e", size = 1817219 }, 238 | { url = "https://files.pythonhosted.org/packages/57/82/bb16a68e4a1a858bb3768c2c8f1ff8d8978014e16598f001ea29a25bf1d1/pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67", size = 1985382 }, 239 | { url = "https://files.pythonhosted.org/packages/46/72/af70981a341500419e67d5cb45abe552a7c74b66326ac8877588488da1ac/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e", size = 1891159 }, 240 | { url = "https://files.pythonhosted.org/packages/ad/3d/c5913cccdef93e0a6a95c2d057d2c2cba347815c845cda79ddd3c0f5e17d/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8", size = 1768331 }, 241 | { url = "https://files.pythonhosted.org/packages/f6/f0/a3ae8fbee269e4934f14e2e0e00928f9346c5943174f2811193113e58252/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3", size = 1822467 }, 242 | { url = "https://files.pythonhosted.org/packages/d7/7a/7bbf241a04e9f9ea24cd5874354a83526d639b02674648af3f350554276c/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f", size = 1979797 }, 243 | { url = "https://files.pythonhosted.org/packages/4f/5f/4784c6107731f89e0005a92ecb8a2efeafdb55eb992b8e9d0a2be5199335/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133", size = 1987839 }, 244 | { url = "https://files.pythonhosted.org/packages/6d/a7/61246562b651dff00de86a5f01b6e4befb518df314c54dec187a78d81c84/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc", size = 1998861 }, 245 | { url = "https://files.pythonhosted.org/packages/86/aa/837821ecf0c022bbb74ca132e117c358321e72e7f9702d1b6a03758545e2/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50", size = 2116582 }, 246 | { url = "https://files.pythonhosted.org/packages/81/b0/5e74656e95623cbaa0a6278d16cf15e10a51f6002e3ec126541e95c29ea3/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9", size = 2151985 }, 247 | { url = "https://files.pythonhosted.org/packages/63/37/3e32eeb2a451fddaa3898e2163746b0cffbbdbb4740d38372db0490d67f3/pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151", size = 2004715 }, 248 | { url = "https://files.pythonhosted.org/packages/29/0e/dcaea00c9dbd0348b723cae82b0e0c122e0fa2b43fa933e1622fd237a3ee/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656", size = 1891733 }, 249 | { url = "https://files.pythonhosted.org/packages/86/d3/e797bba8860ce650272bda6383a9d8cad1d1c9a75a640c9d0e848076f85e/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278", size = 1768375 }, 250 | { url = "https://files.pythonhosted.org/packages/41/f7/f847b15fb14978ca2b30262548f5fc4872b2724e90f116393eb69008299d/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb", size = 1822307 }, 251 | { url = "https://files.pythonhosted.org/packages/9c/63/ed80ec8255b587b2f108e514dc03eed1546cd00f0af281e699797f373f38/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd", size = 1979971 }, 252 | { url = "https://files.pythonhosted.org/packages/a9/6d/6d18308a45454a0de0e975d70171cadaf454bc7a0bf86b9c7688e313f0bb/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc", size = 1987616 }, 253 | { url = "https://files.pythonhosted.org/packages/82/8a/05f8780f2c1081b800a7ca54c1971e291c2d07d1a50fb23c7e4aef4ed403/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b", size = 1998943 }, 254 | { url = "https://files.pythonhosted.org/packages/5e/3e/fe5b6613d9e4c0038434396b46c5303f5ade871166900b357ada4766c5b7/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b", size = 2116654 }, 255 | { url = "https://files.pythonhosted.org/packages/db/ad/28869f58938fad8cc84739c4e592989730bfb69b7c90a8fff138dff18e1e/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2", size = 2152292 }, 256 | { url = "https://files.pythonhosted.org/packages/a1/0c/c5c5cd3689c32ed1fe8c5d234b079c12c281c051759770c05b8bed6412b5/pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35", size = 2004961 }, 257 | ] 258 | 259 | [[package]] 260 | name = "sniffio" 261 | version = "1.3.1" 262 | source = { registry = "https://pypi.org/simple" } 263 | sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } 264 | wheels = [ 265 | { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, 266 | ] 267 | 268 | [[package]] 269 | name = "typing-extensions" 270 | version = "4.12.2" 271 | source = { registry = "https://pypi.org/simple" } 272 | sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } 273 | wheels = [ 274 | { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, 275 | ] 276 | --------------------------------------------------------------------------------