├── .editorconfig ├── .github └── workflows │ └── tests.yml ├── .php-cs-fixer.dist.php ├── Makefile ├── README.md ├── composer.json ├── docs └── ollama.yaml ├── phpstan.neon └── src ├── Client.php ├── Client ├── Message.php ├── Request.php ├── Request │ ├── ChatRequest.php │ ├── CreateRequest.php │ ├── PullRequest.php │ └── PushRequest.php └── Response │ ├── ChatResponse.php │ ├── ChatStreamFinalResponse.php │ ├── ChatStreamResponse.php │ ├── ListModelsResponse.php │ └── StreamStatusResponse.php ├── ClientBuilder.php ├── Http.php ├── OllamaException.php └── Resource ├── Model.php └── Model └── Details.php /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | insert_final_newline = true 7 | indent_style = space 8 | indent_size = 4 9 | trim_trailing_whitespace = true 10 | 11 | [Makefile] 12 | indent_style = tab 13 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | on: [push] 3 | 4 | jobs: 5 | linter: 6 | name: Code Style 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Checkout 10 | uses: actions/checkout@v4 11 | - name: Setup PHP 12 | uses: shivammathur/setup-php@v2 13 | with: 14 | php-version: 8.1 15 | coverage: xdebug 16 | - name: Get Composer Cache Directory 17 | id: composer-cache 18 | run: echo "::set-output name=dir::$(composer config cache-files-dir)" 19 | - name: Cache dependencies 20 | uses: actions/cache@v4 21 | with: 22 | path: ${{ steps.composer-cache.outputs.dir }} 23 | key: ${{ runner.os }}-composer-${{ hashFiles('**/composer.json') }} 24 | restore-keys: ${{ runner.os }}-composer- 25 | - name: Install Dependencies 26 | run: composer install --no-progress 27 | - name: Run php-cs-fixture 28 | run: vendor/bin/php-cs-fixer fix -v --dry-run 29 | 30 | phpstan: 31 | name: Static analysis 32 | runs-on: ubuntu-latest 33 | steps: 34 | - name: Checkout 35 | uses: actions/checkout@v4 36 | - name: Setup PHP 37 | uses: shivammathur/setup-php@v2 38 | with: 39 | php-version: 8.1 40 | coverage: xdebug 41 | - name: Get Composer Cache Directory 42 | id: composer-cache 43 | run: echo "::set-output name=dir::$(composer config cache-files-dir)" 44 | - name: Cache dependencies 45 | uses: actions/cache@v4 46 | with: 47 | path: ${{ steps.composer-cache.outputs.dir }} 48 | key: ${{ runner.os }}-composer-${{ hashFiles('**/composer.json') }} 49 | restore-keys: ${{ runner.os }}-composer- 50 | - name: Install Dependencies 51 | run: composer install --no-progress 52 | - name: Run phpstan 53 | run: vendor/bin/phpstan 54 | 55 | phpunit: 56 | name: Unit Tests 57 | runs-on: ubuntu-latest 58 | strategy: 59 | matrix: 60 | php: ['8.1', '8.2', '8.3'] 61 | flags: ['', '--prefer-lowest', '--prefer-stable'] 62 | steps: 63 | - name: Checkout 64 | uses: actions/checkout@v4 65 | - name: Setup PHP 66 | uses: shivammathur/setup-php@v2 67 | with: 68 | php-version: ${{ matrix.php }} 69 | coverage: xdebug 70 | - name: Get Composer Cache Directory 71 | id: composer-cache 72 | run: echo "::set-output name=dir::$(composer config cache-files-dir)" 73 | - name: Cache dependencies 74 | uses: actions/cache@v4 75 | with: 76 | path: ${{ steps.composer-cache.outputs.dir }} 77 | key: ${{ runner.os }}-composer-${{ hashFiles('**/composer.json') }} 78 | restore-keys: ${{ runner.os }}-composer- 79 | - name: Install Dependencies 80 | run: composer update --prefer-dist --no-interaction --optimize-autoloader --prefer-stable --no-progress $COMPOSER_FLAGS 81 | env: 82 | COMPOSER_FLAGS: ${{ matrix.flags }} 83 | - name: Run PHPUnit 84 | run: vendor/bin/phpunit 85 | 86 | openapi: 87 | name: Validate OpenAPI specification 88 | runs-on: ubuntu-latest 89 | steps: 90 | - name: Checkout 91 | uses: actions/checkout@v4 92 | - uses: swaggerexpert/apidom-validate@v1 93 | with: 94 | definition-file: docs/ollama.yaml 95 | fails-on: 1 # Fails if error messages exist in validation output 96 | -------------------------------------------------------------------------------- /.php-cs-fixer.dist.php: -------------------------------------------------------------------------------- 1 | in(__DIR__) 5 | ->exclude('vendor') 6 | ; 7 | 8 | $config = new PhpCsFixer\Config(); 9 | $config 10 | ->setRiskyAllowed(true) 11 | ->setRules([ 12 | '@PSR2' => true, 13 | 'array_syntax' => ['syntax' => 'short'], 14 | 'declare_strict_types' => true, 15 | 'function_declaration' => ['closure_function_spacing' => 'none'], 16 | 'single_import_per_statement' => false, 17 | ]) 18 | ->setFinder($finder) 19 | ; 20 | 21 | return $config; 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | define printSection 2 | @printf "\033[36m\n==================================================\n\033[0m" 3 | @printf "\033[36m $1 \033[0m" 4 | @printf "\033[36m\n==================================================\n\033[0m" 5 | endef 6 | 7 | .PHONY: cs 8 | cs: 9 | $(call printSection,CODE STYLE) 10 | vendor/bin/php-cs-fixer fix --dry-run --stop-on-violation --diff 11 | 12 | .PHONY: cs-fix 13 | cs-fix: 14 | vendor/bin/php-cs-fixer fix 15 | 16 | .PHONY: cs-ci 17 | cs-ci: 18 | $(call printSection,CODE STYLE) 19 | vendor/bin/php-cs-fixer fix --ansi --dry-run --using-cache=no --verbose 20 | 21 | .PHONY: phpstan 22 | phpstan: 23 | $(call printSection,PHPSTAN) 24 | vendor/bin/phpstan analyse -c phpstan.neon --ansi --no-progress --no-interaction 25 | 26 | .PHONY: phpunit 27 | phpunit: 28 | $(call printSection,PHPUNIT) 29 | vendor/bin/phpunit 30 | 31 | .PHONY: phpunit-coverage 32 | phpunit-coverage: 33 | $(call printSection,PHPUNIT COVERAGE) 34 | vendor/bin/phpunit --coverage-text 35 | 36 | .PHONY: clean-vendor 37 | clean-vendor: 38 | $(call printSection,CLEAN-VENDOR) 39 | rm -f composer.lock 40 | rm -rf vendor 41 | 42 | .PHONY: composer-install 43 | composer-install: vendor/composer/installed.json 44 | 45 | vendor/composer/installed.json: 46 | $(call printSection,COMPOSER INSTALL) 47 | composer --no-interaction install --ansi --no-progress --prefer-dist 48 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Ollama PHP Client 2 | ================= 3 | 4 | This is a PHP client for the Ollama API. 5 | 6 | ## Getting Started 7 | 8 | ``` 9 | $ composer require jdecool/ollama-client 10 | ``` 11 | 12 | ## Usage 13 | 14 | ```php 15 | use JDecool\OllamaClient\ClientBuilder; 16 | 17 | $builder = new ClientBuilder(); 18 | $client = $builder->create(); 19 | ``` 20 | -------------------------------------------------------------------------------- /composer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "jdecool/ollama-client", 3 | "description": "Ollama PHP API client", 4 | "license": "MIT", 5 | "type": "library", 6 | "authors": [ 7 | { 8 | "name": "Jérémy DECOOL", 9 | "email": "contact@jdecool.fr" 10 | } 11 | ], 12 | "require": { 13 | "php": ">=8.1", 14 | "psr/log": "^3.0", 15 | "symfony/http-client": "^5.4 || ^6.0 || ^7.0" 16 | }, 17 | "require-dev": { 18 | "ergebnis/composer-normalize": "^2.41", 19 | "friendsofphp/php-cs-fixer": "^3.47", 20 | "nyholm/psr7": "^1.8", 21 | "php-http/guzzle7-adapter": "^1.0", 22 | "php-http/mock-client": "^1.6", 23 | "phpstan/phpstan": "^1.10", 24 | "phpunit/phpunit": "^10.5" 25 | }, 26 | "autoload": { 27 | "psr-4": { 28 | "JDecool\\OllamaClient\\": "src/" 29 | } 30 | }, 31 | "autoload-dev": { 32 | "psr-4": { 33 | "JDecool\\OllamaClient\\Tests\\": "tests/" 34 | } 35 | }, 36 | "config": { 37 | "allow-plugins": { 38 | "ergebnis/composer-normalize": true, 39 | "php-http/discovery": true 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /docs/ollama.yaml: -------------------------------------------------------------------------------- 1 | openapi: 3.1.1 2 | info: 3 | title: Ollama API 4 | description: API for interacting with Ollama to generate completions, manage models, and generate embeddings. 5 | version: '0.1.0' 6 | servers: 7 | - url: http://localhost:11434 8 | description: Local Ollama server 9 | paths: 10 | /api/generate: 11 | post: 12 | summary: Generate a completion 13 | description: Generate a response for a given prompt with a provided model. This is a streaming endpoint, so there will be a series of responses. 14 | requestBody: 15 | required: true 16 | content: 17 | application/json: 18 | schema: 19 | $ref: '#/components/schemas/GenerateRequest' 20 | responses: 21 | '200': 22 | description: A stream of JSON objects with the generated completion 23 | content: 24 | application/x-ndjson: 25 | schema: 26 | oneOf: 27 | - $ref: '#/components/schemas/GenerateResponse' 28 | - $ref: '#/components/schemas/GenerateFinalResponse' 29 | /api/chat: 30 | post: 31 | summary: Generate a chat completion 32 | description: Generate the next message in a chat with a provided model. This is a streaming endpoint. 33 | requestBody: 34 | required: true 35 | content: 36 | application/json: 37 | schema: 38 | $ref: '#/components/schemas/ChatRequest' 39 | responses: 40 | '200': 41 | description: A stream of JSON objects with the generated chat message 42 | content: 43 | application/x-ndjson: 44 | schema: 45 | oneOf: 46 | - $ref: '#/components/schemas/ChatResponse' 47 | - $ref: '#/components/schemas/ChatFinalResponse' 48 | /api/create: 49 | post: 50 | summary: Create a model 51 | description: Create a model from another model, a safetensors directory, or a GGUF file. 52 | requestBody: 53 | required: true 54 | content: 55 | application/json: 56 | schema: 57 | $ref: '#/components/schemas/CreateRequest' 58 | responses: 59 | '200': 60 | description: A stream of JSON objects showing the creation progress 61 | content: 62 | application/x-ndjson: 63 | schema: 64 | type: object 65 | properties: 66 | status: 67 | type: string 68 | description: Current status of the model creation 69 | /api/blobs/{digest}: 70 | parameters: 71 | - in: path 72 | name: digest 73 | schema: 74 | type: string 75 | required: true 76 | description: The SHA256 digest of the blob 77 | head: 78 | summary: Check if a blob exists 79 | description: Ensures that the file blob used with create a model exists on the server. 80 | responses: 81 | '200': 82 | description: Blob exists 83 | '404': 84 | description: Blob not found 85 | post: 86 | summary: Push a blob 87 | description: Push a file to the Ollama server to create a "blob" (Binary Large Object). 88 | requestBody: 89 | required: true 90 | content: 91 | application/octet-stream: 92 | schema: 93 | type: string 94 | format: binary 95 | responses: 96 | '201': 97 | description: Blob was successfully created 98 | '400': 99 | description: Bad Request if the digest used is not expected 100 | /api/tags: 101 | get: 102 | summary: List local models 103 | description: List models that are available locally. 104 | responses: 105 | '200': 106 | description: A list of available models 107 | content: 108 | application/json: 109 | schema: 110 | $ref: '#/components/schemas/TagsResponse' 111 | /api/show: 112 | post: 113 | summary: Show model information 114 | description: Show information about a model including details, modelfile, template, parameters, license, system prompt. 115 | requestBody: 116 | required: true 117 | content: 118 | application/json: 119 | schema: 120 | $ref: '#/components/schemas/ShowRequest' 121 | responses: 122 | '200': 123 | description: Model information 124 | content: 125 | application/json: 126 | schema: 127 | $ref: '#/components/schemas/ShowResponse' 128 | /api/copy: 129 | post: 130 | summary: Copy a model 131 | description: Copy a model. Creates a model with another name from an existing model. 132 | requestBody: 133 | required: true 134 | content: 135 | application/json: 136 | schema: 137 | $ref: '#/components/schemas/CopyRequest' 138 | responses: 139 | '200': 140 | description: Model copied successfully 141 | '404': 142 | description: Source model not found 143 | /api/delete: 144 | delete: 145 | summary: Delete a model 146 | description: Delete a model and its data. 147 | requestBody: 148 | required: true 149 | content: 150 | application/json: 151 | schema: 152 | $ref: '#/components/schemas/DeleteRequest' 153 | responses: 154 | '200': 155 | description: Model deleted successfully 156 | '404': 157 | description: Model not found 158 | /api/pull: 159 | post: 160 | summary: Pull a model 161 | description: Download a model from the ollama library. 162 | requestBody: 163 | required: true 164 | content: 165 | application/json: 166 | schema: 167 | $ref: '#/components/schemas/PullRequest' 168 | responses: 169 | '200': 170 | description: A stream of JSON objects showing the pull progress 171 | content: 172 | application/x-ndjson: 173 | schema: 174 | oneOf: 175 | - type: object 176 | properties: 177 | status: 178 | type: string 179 | description: Current status of the pull operation 180 | - type: object 181 | properties: 182 | status: 183 | type: string 184 | enum: [downloading digestname] 185 | description: Download status 186 | digest: 187 | type: string 188 | description: Digest being downloaded 189 | total: 190 | type: integer 191 | description: Total size to download 192 | completed: 193 | type: integer 194 | description: Amount downloaded so far 195 | /api/push: 196 | post: 197 | summary: Push a model 198 | description: Upload a model to a model library. 199 | requestBody: 200 | required: true 201 | content: 202 | application/json: 203 | schema: 204 | $ref: '#/components/schemas/PushRequest' 205 | responses: 206 | '200': 207 | description: A stream of JSON objects showing the push progress 208 | content: 209 | application/x-ndjson: 210 | schema: 211 | oneOf: 212 | - type: object 213 | properties: 214 | status: 215 | type: string 216 | description: Current status of the push operation 217 | - type: object 218 | properties: 219 | status: 220 | type: string 221 | enum: [starting upload] 222 | description: Upload status 223 | digest: 224 | type: string 225 | description: Digest being uploaded 226 | total: 227 | type: integer 228 | description: Total size to upload 229 | /api/embed: 230 | post: 231 | summary: Generate embeddings 232 | description: Generate embeddings from a model 233 | requestBody: 234 | required: true 235 | content: 236 | application/json: 237 | schema: 238 | $ref: '#/components/schemas/EmbedRequest' 239 | responses: 240 | '200': 241 | description: Generated embeddings 242 | content: 243 | application/json: 244 | schema: 245 | $ref: '#/components/schemas/EmbedResponse' 246 | /api/ps: 247 | get: 248 | summary: List running models 249 | description: List models that are currently loaded into memory. 250 | responses: 251 | '200': 252 | description: A list of running models 253 | content: 254 | application/json: 255 | schema: 256 | $ref: '#/components/schemas/PSResponse' 257 | /api/embeddings: 258 | post: 259 | summary: Generate embedding (deprecated) 260 | description: Generate embeddings from a model (superseded by /api/embed) 261 | requestBody: 262 | required: true 263 | content: 264 | application/json: 265 | schema: 266 | $ref: '#/components/schemas/EmbeddingsRequest' 267 | responses: 268 | '200': 269 | description: Generated embedding 270 | content: 271 | application/json: 272 | schema: 273 | $ref: '#/components/schemas/EmbeddingsResponse' 274 | /api/version: 275 | get: 276 | summary: Get version 277 | description: Retrieve the Ollama version 278 | responses: 279 | '200': 280 | description: Version information 281 | content: 282 | application/json: 283 | schema: 284 | $ref: '#/components/schemas/VersionResponse' 285 | components: 286 | schemas: 287 | GenerateRequest: 288 | type: object 289 | required: 290 | - model 291 | properties: 292 | model: 293 | type: string 294 | description: The model name 295 | prompt: 296 | type: string 297 | description: The prompt to generate a response for 298 | suffix: 299 | type: string 300 | description: The text after the model response 301 | images: 302 | type: array 303 | items: 304 | type: string 305 | format: binary 306 | description: A list of base64-encoded images (for multimodal models such as llava) 307 | format: 308 | type: object 309 | description: The format to return a response in, can be json or a JSON schema 310 | options: 311 | type: object 312 | description: Additional model parameters such as temperature 313 | additionalProperties: true 314 | system: 315 | type: string 316 | description: System message to (overrides what is defined in the Modelfile) 317 | template: 318 | type: string 319 | description: The prompt template to use (overrides what is defined in the Modelfile) 320 | stream: 321 | type: boolean 322 | default: true 323 | description: If false the response will be returned as a single response object 324 | raw: 325 | type: boolean 326 | default: false 327 | description: If true no formatting will be applied to the prompt 328 | keep_alive: 329 | type: string 330 | default: 5m 331 | description: Controls how long the model will stay loaded into memory 332 | context: 333 | type: array 334 | items: 335 | type: integer 336 | description: The context parameter returned from a previous request 337 | example: 338 | model: llama3.2 339 | prompt: Why is the sky blue? 340 | options: 341 | temperature: 0.8 342 | GenerateResponse: 343 | type: object 344 | properties: 345 | model: 346 | type: string 347 | description: The model name 348 | created_at: 349 | type: string 350 | format: date-time 351 | description: Timestamp of when the response was created 352 | response: 353 | type: string 354 | description: The generated text 355 | done: 356 | type: boolean 357 | description: Whether this is the final response 358 | example: 359 | model: llama3.2 360 | created_at: 2023-08-04T08:52:19.385406455-07:00 361 | response: The 362 | done: false 363 | GenerateFinalResponse: 364 | type: object 365 | properties: 366 | model: 367 | type: string 368 | description: The model name 369 | created_at: 370 | type: string 371 | format: date-time 372 | description: Timestamp of when the response was created 373 | response: 374 | type: string 375 | description: The generated text (empty if streamed) 376 | done: 377 | type: boolean 378 | description: Always true for the final response 379 | done_reason: 380 | type: string 381 | description: Reason for completion (stop, length, etc.) 382 | context: 383 | type: array 384 | items: 385 | type: integer 386 | description: An encoding of the conversation context 387 | total_duration: 388 | type: integer 389 | description: Time spent generating the response (in nanoseconds) 390 | load_duration: 391 | type: integer 392 | description: Time spent loading the model (in nanoseconds) 393 | prompt_eval_count: 394 | type: integer 395 | description: Number of tokens in the prompt 396 | prompt_eval_duration: 397 | type: integer 398 | description: Time spent evaluating the prompt (in nanoseconds) 399 | eval_count: 400 | type: integer 401 | description: Number of tokens in the response 402 | eval_duration: 403 | type: integer 404 | description: Time spent generating the response (in nanoseconds) 405 | example: 406 | model: llama3.2 407 | created_at: 2023-08-04T19:22:45.499127Z 408 | response: "" 409 | done: true 410 | context: [1, 2, 3] 411 | total_duration: 10706818083 412 | load_duration: 6338219291 413 | prompt_eval_count: 26 414 | prompt_eval_duration: 130079000 415 | eval_count: 259 416 | eval_duration: 4232710000 417 | ChatRequest: 418 | type: object 419 | required: 420 | - model 421 | properties: 422 | model: 423 | type: string 424 | description: The model name 425 | messages: 426 | type: array 427 | description: The messages of the chat 428 | items: 429 | $ref: '#/components/schemas/Message' 430 | tools: 431 | type: array 432 | description: List of tools for the model to use if supported 433 | items: 434 | $ref: '#/components/schemas/Tool' 435 | format: 436 | type: object 437 | description: The format to return a response in, can be json or a JSON schema 438 | options: 439 | type: object 440 | description: Additional model parameters such as temperature 441 | additionalProperties: true 442 | stream: 443 | type: boolean 444 | default: true 445 | description: If false the response will be returned as a single response object 446 | keep_alive: 447 | type: string 448 | default: 5m 449 | description: Controls how long the model will stay loaded into memory 450 | example: 451 | model: llama3.2 452 | messages: 453 | - role: user 454 | content: Why is the sky blue? 455 | Message: 456 | type: object 457 | required: 458 | - role 459 | - content 460 | properties: 461 | role: 462 | type: string 463 | enum: [system, user, assistant, tool] 464 | description: The role of the message 465 | content: 466 | type: string 467 | description: The content of the message 468 | images: 469 | type: array 470 | items: 471 | type: string 472 | format: binary 473 | description: A list of images to include in the message 474 | tool_calls: 475 | type: array 476 | description: A list of tools that the model wants to use 477 | items: 478 | $ref: '#/components/schemas/ToolCall' 479 | Tool: 480 | type: object 481 | required: 482 | - type 483 | - function 484 | properties: 485 | type: 486 | type: string 487 | enum: [function] 488 | description: The type of tool 489 | function: 490 | $ref: '#/components/schemas/Function' 491 | Function: 492 | type: object 493 | required: 494 | - name 495 | - description 496 | - parameters 497 | properties: 498 | name: 499 | type: string 500 | description: The name of the function 501 | description: 502 | type: string 503 | description: A description of what the function does 504 | parameters: 505 | type: object 506 | description: The parameters the function accepts 507 | ToolCall: 508 | type: object 509 | properties: 510 | function: 511 | type: object 512 | properties: 513 | name: 514 | type: string 515 | description: The name of the function to call 516 | arguments: 517 | type: object 518 | description: The arguments to pass to the function 519 | additionalProperties: true 520 | ChatResponse: 521 | type: object 522 | properties: 523 | model: 524 | type: string 525 | description: The model name 526 | created_at: 527 | type: string 528 | format: date-time 529 | description: Timestamp of when the response was created 530 | message: 531 | $ref: '#/components/schemas/Message' 532 | done: 533 | type: boolean 534 | description: Whether this is the final response 535 | example: 536 | model: llama3.2 537 | created_at: 2023-08-04T08:52:19.385406455-07:00 538 | message: 539 | role: assistant 540 | content: The 541 | done: false 542 | ChatFinalResponse: 543 | type: object 544 | properties: 545 | model: 546 | type: string 547 | description: The model name 548 | created_at: 549 | type: string 550 | format: date-time 551 | description: Timestamp of when the response was created 552 | message: 553 | $ref: '#/components/schemas/Message' 554 | done: 555 | type: boolean 556 | description: Always true for the final response 557 | done_reason: 558 | type: string 559 | description: Reason for completion (stop, length, etc.) 560 | total_duration: 561 | type: integer 562 | description: Time spent generating the response (in nanoseconds) 563 | load_duration: 564 | type: integer 565 | description: Time spent loading the model (in nanoseconds) 566 | prompt_eval_count: 567 | type: integer 568 | description: Number of tokens in the prompt 569 | prompt_eval_duration: 570 | type: integer 571 | description: Time spent evaluating the prompt (in nanoseconds) 572 | eval_count: 573 | type: integer 574 | description: Number of tokens in the response 575 | eval_duration: 576 | type: integer 577 | description: Time spent generating the response (in nanoseconds) 578 | example: 579 | model: llama3.2 580 | created_at: 2023-08-04T19:22:45.499127Z 581 | message: 582 | role: assistant 583 | content: "" 584 | done: true 585 | total_duration: 4883583458 586 | load_duration: 1334875 587 | prompt_eval_count: 26 588 | prompt_eval_duration: 342546000 589 | eval_count: 282 590 | eval_duration: 4535599000 591 | CreateRequest: 592 | type: object 593 | required: 594 | - model 595 | properties: 596 | model: 597 | type: string 598 | description: Name of the model to create 599 | from: 600 | type: string 601 | description: Name of an existing model to create the new model from 602 | files: 603 | type: object 604 | additionalProperties: 605 | type: string 606 | description: A dictionary of file names to SHA256 digests of blobs 607 | adapters: 608 | type: object 609 | additionalProperties: 610 | type: string 611 | description: A dictionary of file names to SHA256 digests of blobs for LORA adapters 612 | template: 613 | type: string 614 | description: The prompt template for the model 615 | license: 616 | oneOf: 617 | - type: string 618 | - type: array 619 | items: 620 | type: string 621 | description: A string or list of strings containing the license or licenses for the model 622 | system: 623 | type: string 624 | description: A string containing the system prompt for the model 625 | parameters: 626 | type: object 627 | additionalProperties: true 628 | description: A dictionary of parameters for the model 629 | messages: 630 | type: array 631 | items: 632 | $ref: '#/components/schemas/Message' 633 | description: A list of message objects used to create a conversation 634 | stream: 635 | type: boolean 636 | default: true 637 | description: If false the response will be returned as a single response object 638 | quantize: 639 | type: string 640 | enum: [q2_K, q3_K_L, q3_K_M, q3_K_S, q4_0, q4_1, q4_K_M, q4_K_S, q5_0, q5_1, q5_K_M, q5_K_S, q6_K, q8_0] 641 | description: Quantize a non-quantized (e.g. float16) model 642 | example: 643 | model: mario 644 | from: llama3.2 645 | system: You are Mario from Super Mario Bros. 646 | ShowRequest: 647 | type: object 648 | required: 649 | - model 650 | properties: 651 | model: 652 | type: string 653 | description: Name of the model to show information for 654 | verbose: 655 | type: boolean 656 | default: false 657 | description: If set to true, returns full data for verbose response fields 658 | example: 659 | model: llava 660 | ShowResponse: 661 | type: object 662 | properties: 663 | modelfile: 664 | type: string 665 | description: The model's Modelfile content 666 | parameters: 667 | type: string 668 | description: Parameters for the model 669 | template: 670 | type: string 671 | description: The prompt template 672 | details: 673 | type: object 674 | properties: 675 | parent_model: 676 | type: string 677 | format: 678 | type: string 679 | family: 680 | type: string 681 | families: 682 | type: array 683 | items: 684 | type: string 685 | parameter_size: 686 | type: string 687 | quantization_level: 688 | type: string 689 | model_info: 690 | type: object 691 | additionalProperties: true 692 | description: Detailed information about the model architecture 693 | capabilities: 694 | type: array 695 | items: 696 | type: string 697 | description: Model capabilities (e.g., completion, vision) 698 | CopyRequest: 699 | type: object 700 | required: 701 | - source 702 | - destination 703 | properties: 704 | source: 705 | type: string 706 | description: Source model name 707 | destination: 708 | type: string 709 | description: Destination model name 710 | example: 711 | source: llama3.2 712 | destination: llama3-backup 713 | DeleteRequest: 714 | type: object 715 | required: 716 | - model 717 | properties: 718 | model: 719 | type: string 720 | description: Model name to delete 721 | example: 722 | model: llama3:13b 723 | PullRequest: 724 | type: object 725 | required: 726 | - model 727 | properties: 728 | model: 729 | type: string 730 | description: Name of the model to pull 731 | insecure: 732 | type: boolean 733 | default: false 734 | description: Allow insecure connections to the library 735 | stream: 736 | type: boolean 737 | default: true 738 | description: If false the response will be returned as a single response object 739 | example: 740 | model: llama3.2 741 | PushRequest: 742 | type: object 743 | required: 744 | - model 745 | properties: 746 | model: 747 | type: string 748 | description: Name of the model to push in the form of /: 749 | insecure: 750 | type: boolean 751 | default: false 752 | description: Allow insecure connections to the library 753 | stream: 754 | type: boolean 755 | default: true 756 | description: If false the response will be returned as a single response object 757 | example: 758 | model: mattw/pygmalion:latest 759 | EmbedRequest: 760 | type: object 761 | required: 762 | - model 763 | - input 764 | properties: 765 | model: 766 | type: string 767 | description: Name of model to generate embeddings from 768 | input: 769 | oneOf: 770 | - type: string 771 | - type: array 772 | items: 773 | type: string 774 | description: Text or list of text to generate embeddings for 775 | truncate: 776 | type: boolean 777 | default: true 778 | description: Truncates the end of each input to fit within context length 779 | options: 780 | type: object 781 | additionalProperties: true 782 | description: Additional model parameters 783 | keep_alive: 784 | type: string 785 | default: 5m 786 | description: Controls how long the model will stay loaded into memory 787 | example: 788 | model: all-minilm 789 | input: Why is the sky blue? 790 | EmbedResponse: 791 | type: object 792 | properties: 793 | model: 794 | type: string 795 | description: The model name 796 | embeddings: 797 | type: array 798 | items: 799 | type: array 800 | items: 801 | type: number 802 | description: The generated embeddings 803 | total_duration: 804 | type: integer 805 | description: Time spent generating the embeddings (in nanoseconds) 806 | load_duration: 807 | type: integer 808 | description: Time spent loading the model (in nanoseconds) 809 | prompt_eval_count: 810 | type: integer 811 | description: Number of tokens in the prompt 812 | example: 813 | model: all-minilm 814 | embeddings: [[0.010071029, -0.0017594862, 0.05007221, 0.04692972, 0.054916814]] 815 | total_duration: 14143917 816 | load_duration: 1019500 817 | prompt_eval_count: 8 818 | EmbeddingsRequest: 819 | type: object 820 | required: 821 | - model 822 | - prompt 823 | properties: 824 | model: 825 | type: string 826 | description: Name of model to generate embeddings from 827 | prompt: 828 | type: string 829 | description: Text to generate embeddings for 830 | options: 831 | type: object 832 | additionalProperties: true 833 | description: Additional model parameters 834 | keep_alive: 835 | type: string 836 | default: 5m 837 | description: Controls how long the model will stay loaded into memory 838 | example: 839 | model: all-minilm 840 | prompt: Here is an article about llamas... 841 | EmbeddingsResponse: 842 | type: object 843 | properties: 844 | embedding: 845 | type: array 846 | items: 847 | type: number 848 | description: The generated embedding 849 | example: 850 | embedding: [0.5670403838157654, 0.009260174818336964, 0.23178744316101074] 851 | TagsResponse: 852 | type: object 853 | properties: 854 | models: 855 | type: array 856 | items: 857 | type: object 858 | properties: 859 | name: 860 | type: string 861 | description: The model name 862 | modified_at: 863 | type: string 864 | format: date-time 865 | description: When the model was last modified 866 | size: 867 | type: integer 868 | description: Size of the model in bytes 869 | digest: 870 | type: string 871 | description: The SHA256 digest of the model 872 | details: 873 | type: object 874 | properties: 875 | format: 876 | type: string 877 | description: The model format (e.g., gguf) 878 | family: 879 | type: string 880 | description: The model family (e.g., llama) 881 | families: 882 | type: array 883 | items: 884 | type: string 885 | nullable: true 886 | description: List of model families 887 | parameter_size: 888 | type: string 889 | description: The parameter size (e.g., 13B) 890 | quantization_level: 891 | type: string 892 | description: The quantization level (e.g., Q4_0) 893 | PSResponse: 894 | type: object 895 | properties: 896 | models: 897 | type: array 898 | items: 899 | type: object 900 | properties: 901 | name: 902 | type: string 903 | description: The model name 904 | model: 905 | type: string 906 | description: The model name 907 | size: 908 | type: integer 909 | description: Size of the model in bytes 910 | digest: 911 | type: string 912 | description: The SHA256 digest of the model 913 | details: 914 | type: object 915 | properties: 916 | parent_model: 917 | type: string 918 | description: Parent model name 919 | format: 920 | type: string 921 | description: The model format (e.g., gguf) 922 | family: 923 | type: string 924 | description: The model family (e.g., llama) 925 | families: 926 | type: array 927 | items: 928 | type: string 929 | description: List of model families 930 | parameter_size: 931 | type: string 932 | description: The parameter size (e.g., 7.2B) 933 | quantization_level: 934 | type: string 935 | description: The quantization level (e.g., Q4_0) 936 | expires_at: 937 | type: string 938 | format: date-time 939 | description: When the model will be unloaded from memory 940 | size_vram: 941 | type: integer 942 | description: Size of the model in VRAM 943 | VersionResponse: 944 | type: object 945 | properties: 946 | version: 947 | type: string 948 | description: Ollama version 949 | example: 950 | version: 0.5.1 951 | -------------------------------------------------------------------------------- /phpstan.neon: -------------------------------------------------------------------------------- 1 | parameters: 2 | level: max 3 | paths: 4 | - examples 5 | - src 6 | - tests 7 | 8 | checkMissingIterableValueType: false 9 | -------------------------------------------------------------------------------- /src/Client.php: -------------------------------------------------------------------------------- 1 | processRequest('POST', 'chat', $request); 24 | 25 | return Response\ChatResponse::fromArray($data); 26 | } 27 | 28 | /** 29 | * @return Generator 30 | * 31 | * @throws OllamaException 32 | */ 33 | public function chatStream(Request\ChatRequest $request): Generator 34 | { 35 | foreach ($this->processStream('POST', 'chat', $request) as $chunk) { 36 | /** @phpstan-ignore-next-line */ 37 | yield match ($chunk['done'] ?? false) { 38 | true => Response\ChatStreamFinalResponse::fromArray($chunk), 39 | false => Response\ChatStreamResponse::fromArray($chunk), 40 | }; 41 | } 42 | } 43 | 44 | /** 45 | * @throws OllamaException 46 | */ 47 | public function create(Request\CreateRequest $request): void 48 | { 49 | $data = $this->processRequest('POST', 'create', $request); 50 | 51 | $this->processResponseWithoutContent($data); 52 | } 53 | 54 | /** 55 | * @return Generator 56 | * 57 | * @throws OllamaException 58 | */ 59 | public function createStream(Request\CreateRequest $request): Generator 60 | { 61 | foreach ($this->processStream('POST', 'create', $request) as $chunk) { 62 | yield Response\StreamStatusResponse::fromArray($chunk); 63 | } 64 | } 65 | 66 | public function list(): Response\ListModelsResponse 67 | { 68 | $data = $this->processRequest('GET', 'tags'); 69 | 70 | return Response\ListModelsResponse::fromArray($data); 71 | } 72 | 73 | /** 74 | * @throws OllamaException 75 | */ 76 | public function pull(Request\PullRequest $request): void 77 | { 78 | $data = $this->processRequest('POST', 'pull', $request); 79 | 80 | $this->processResponseWithoutContent($data); 81 | } 82 | 83 | /** 84 | * @return Generator 85 | * 86 | * @throws OllamaException 87 | */ 88 | public function pullStream(Request\PullRequest $request): Generator 89 | { 90 | foreach ($this->processStream('POST', 'pull', $request) as $chunk) { 91 | yield Response\StreamStatusResponse::fromArray($chunk); 92 | } 93 | } 94 | 95 | /** 96 | * @throws OllamaException 97 | */ 98 | public function push(Request\PushRequest $request): void 99 | { 100 | $data = $this->processRequest('POST', 'push', $request); 101 | 102 | $this->processResponseWithoutContent($data); 103 | } 104 | 105 | /** 106 | * @return Generator 107 | * 108 | * @throws OllamaException 109 | */ 110 | public function pushStream(Request\PushRequest $request): Generator 111 | { 112 | foreach ($this->processStream('POST', 'push', $request) as $chunk) { 113 | yield Response\StreamStatusResponse::fromArray($chunk); 114 | } 115 | } 116 | 117 | /** 118 | * @throws OllamaException 119 | */ 120 | private function processRequest(string $method, string $endpoint, ?Client\Request $request = null): array 121 | { 122 | $body = $request?->toArray(); 123 | if ($body !== null) { 124 | $body['stream'] = false; 125 | 126 | $body = json_encode($body, JSON_THROW_ON_ERROR); 127 | } 128 | 129 | $response = $this->http->request($method, "/api/$endpoint", body: $body); 130 | 131 | try { 132 | $data = json_decode($response, true, flags: JSON_THROW_ON_ERROR); 133 | } catch (JsonException $e) { 134 | throw new OllamaException([], 'Invalid JSON.', previous: $e); 135 | } 136 | 137 | if (!is_array($data)) { 138 | throw new OllamaException([], 'Invalid response content.'); 139 | } 140 | 141 | return $data; 142 | } 143 | 144 | /** 145 | * @return Generator 146 | */ 147 | private function processStream(string $method, string $endpoint, Client\Request $request): Generator 148 | { 149 | $body = $request->toArray(); 150 | $body['stream'] = true; 151 | 152 | $chunks = $this->http->stream($method, "/api/$endpoint", body: json_encode($body, JSON_THROW_ON_ERROR)); 153 | foreach ($chunks as $chunk) { 154 | try { 155 | $content = json_decode($chunk, true, flags: JSON_THROW_ON_ERROR); 156 | } catch (JsonException $e) { 157 | throw new OllamaException([], 'Invalid JSON.', previous: $e); 158 | } 159 | 160 | if (!is_array($content)) { 161 | throw new OllamaException([], 'Invalid response content.'); 162 | } 163 | 164 | yield $content; 165 | } 166 | } 167 | 168 | /** 169 | * @throws OllamaException 170 | */ 171 | private function processResponseWithoutContent(array $data): void 172 | { 173 | if (isset($data['error'])) { 174 | throw new OllamaException($data, $data['error']); 175 | } 176 | 177 | if ($data['status'] !== 'success') { 178 | throw new OllamaException($data, 'Unexpected response.'); 179 | } 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/Client/Message.php: -------------------------------------------------------------------------------- 1 | toArray(); 12 | } 13 | 14 | public function toArray(): array 15 | { 16 | return get_object_vars($this); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/Client/Request/ChatRequest.php: -------------------------------------------------------------------------------- 1 | logger, 22 | ); 23 | 24 | return new Client($http); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/Http.php: -------------------------------------------------------------------------------- 1 | executeRequest($method, $uri, $body); 21 | 22 | $this->logger->debug('HTTP Response: {statusCode} {content}', [ 23 | 'statusCode' => $response->getStatusCode(), 24 | 'content' => $response->getContent(), 25 | ]); 26 | 27 | return $response->getContent(); 28 | } 29 | 30 | /** 31 | * @return Generator 32 | */ 33 | public function stream(string $method, string $uri, string $body = null): Generator 34 | { 35 | $response = $this->executeRequest($method, $uri, $body); 36 | 37 | foreach ($this->http->stream($response) as $chunk) { 38 | $content = $chunk->getContent(); 39 | 40 | $this->logger->debug('HTTP Response chunk: {content}', [ 41 | 'content' => $content, 42 | ]); 43 | 44 | if (empty($content)) { 45 | continue; 46 | } 47 | 48 | yield $content; 49 | } 50 | } 51 | 52 | private function executeRequest(string $method, string $uri, string $body = null): ResponseInterface 53 | { 54 | $this->logger->debug('HTTP Request: {method} {uri}', [ 55 | 'method' => $method, 56 | 'uri' => $uri, 57 | 'body' => $body, 58 | ]); 59 | 60 | return $this->http->request($method, $uri, [ 61 | 'body' => $body, 62 | ]); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/OllamaException.php: -------------------------------------------------------------------------------- 1 |