├── src
├── Config
│ ├── ReasoningConfig.php
│ ├── ReasoningEffort.php
│ └── ReasoningBudget.php
├── Client
│ ├── LLMClientException.php
│ ├── LLMChainClient.php
│ ├── Gemini
│ │ ├── Model
│ │ │ ├── GeminiModel.php
│ │ │ ├── GeminiImageModel.php
│ │ │ ├── Gemini25FlashImage.php
│ │ │ ├── Gemini25Pro.php
│ │ │ ├── Gemini20Flash.php
│ │ │ ├── Gemini25Flash.php
│ │ │ ├── Gemini25FlashLite.php
│ │ │ ├── Gemini20FlashLite.php
│ │ │ ├── Gemini3ProPreview.php
│ │ │ ├── Gemini25ProPreview.php
│ │ │ ├── Gemini25FlashImagePreview.php
│ │ │ └── Gemini3ProImagePreview.php
│ │ └── GeminiClient.php
│ ├── OpenAI
│ │ ├── Model
│ │ │ ├── OpenAIModel.php
│ │ │ ├── GPTo3.php
│ │ │ ├── GPT41.php
│ │ │ ├── GPT4o.php
│ │ │ ├── GPT5.php
│ │ │ ├── GPT41Mini.php
│ │ │ ├── GPT5Mini.php
│ │ │ ├── GPT5Nano.php
│ │ │ ├── GPTo4Mini.php
│ │ │ ├── GPT41Nano.php
│ │ │ └── GPT4oMini.php
│ │ ├── OpenAICompatibleClient.php
│ │ ├── OpenAIClient.php
│ │ └── AbstractOpenAIClient.php
│ ├── Anthropic
│ │ ├── Model
│ │ │ ├── AnthropicModel.php
│ │ │ ├── AnthropicClaude45Opus.php
│ │ │ ├── AnthropicClaude4Opus.php
│ │ │ ├── AnthropicClaude35Haiku.php
│ │ │ ├── AnthropicClaude35Sonnet.php
│ │ │ ├── AnthropicClaude37Sonnet.php
│ │ │ ├── AnthropicClaude41Opus.php
│ │ │ ├── AnthropicClaude45Sonnet.php
│ │ │ └── AnthropicClaude4Sonnet.php
│ │ ├── Tool
│ │ │ ├── AnthropicNativeTool.php
│ │ │ └── AnthropicToolTypeResolver.php
│ │ └── AnthropicClient.php
│ ├── StopReason.php
│ ├── ModelEncoder.php
│ ├── LLMClient.php
│ ├── ModelResponse.php
│ ├── LLMBatchClient.php
│ ├── Universal
│ │ └── LocalModel.php
│ ├── ModelInterface.php
│ └── LLMAgentClient.php
├── JsonDeserializable.php
├── Message
│ ├── LLMMessageContent.php
│ ├── LLMMessageText.php
│ ├── LLMMessageArrayData.php
│ ├── LLMMessagePdf.php
│ ├── LLMMessageToolResult.php
│ ├── LLMMessageReasoning.php
│ ├── LLMMessageToolUse.php
│ ├── LLMMessageImage.php
│ ├── LLMMessage.php
│ └── LLMMessageContents.php
├── Log
│ └── LLMLogger.php
├── Tool
│ ├── ToolDefinition.php
│ ├── TextEditor
│ │ └── TextEditorStorage.php
│ └── CallbackToolDefinition.php
├── Cache
│ ├── CacheInterface.php
│ ├── AbstractCache.php
│ └── FileCache.php
├── Http
│ ├── GuzzleHttpHandler.php
│ └── HttpClientFactory.php
├── LLMConversation.php
├── LLMResponse.php
├── LLMRequest.php
└── MarkdownFormatter.php
├── phpstan.neon
├── .gitignore
├── .env.example
├── phpunit.xml
├── composer.json
├── .github
├── workflows
│ ├── README.md
│ ├── docs.yml
│ ├── php-tests.yml
│ └── integration-tests.yml
└── scripts
│ └── fix-php-highlighting.py
├── docs
├── examples
│ ├── index.md
│ ├── best-practices.md
│ └── quick-start.md
└── guides
│ ├── multimodal.md
│ └── reasoning.md
├── tests
├── Client
│ ├── Anthropic
│ │ ├── AnthropicEncoderTest.php
│ │ ├── Tool
│ │ │ └── AnthropicToolTypeResolverTest.php
│ │ ├── AnthropicEncoderTextTest.php
│ │ ├── AnthropicEncoderErrorsTest.php
│ │ ├── AnthropicEncoderMediaTest.php
│ │ └── AnthropicEncoderToolsTest.php
│ ├── Gemini
│ │ ├── GeminiClientTest.php
│ │ ├── GeminiEncoderTextTest.php
│ │ ├── GeminiEncoderMediaTest.php
│ │ └── GeminiEncoderToolsTest.php
│ └── OpenAI
│ │ ├── OpenAICompatibleClientTest.php
│ │ ├── OpenAIEncoderErrorsTest.php
│ │ ├── OpenAIEncoderTextTest.php
│ │ └── OpenAIEncoderMediaTest.php
├── LLMConversationTest.php
└── Integration
│ └── IntegrationTestBase.php
├── mkdocs.yml
└── .php-cs-fixer.dist.php
/src/Config/ReasoningConfig.php:
--------------------------------------------------------------------------------
1 | maxTokens;
11 | }
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini25FlashImage.php:
--------------------------------------------------------------------------------
1 |
12 | */
13 | public function sendRequestAsync(LLMRequest $request): PromiseInterface;
14 | }
15 |
--------------------------------------------------------------------------------
/src/Client/ModelResponse.php:
--------------------------------------------------------------------------------
1 | data;
12 | }
13 |
14 | public function getResponseTimeMs(): int {
15 | return $this->responseTimeMs;
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/src/Tool/ToolDefinition.php:
--------------------------------------------------------------------------------
1 | client->send($request, $options);
16 | }
17 |
18 | public function async(RequestInterface $request, array $options = []): PromiseInterface {
19 | return $this->client->sendAsync($request, $options);
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini25Pro.php:
--------------------------------------------------------------------------------
1 | getInputPricePerMillionTokens();
20 | }
21 |
22 | public function getCachedOutputPricePerMillionTokens(): float {
23 | return $this->getOutputPricePerMillionTokens();
24 | }
25 | }
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini20Flash.php:
--------------------------------------------------------------------------------
1 | getInputPricePerMillionTokens();
20 | }
21 |
22 | public function getCachedOutputPricePerMillionTokens(): float {
23 | return $this->getOutputPricePerMillionTokens();
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini25Flash.php:
--------------------------------------------------------------------------------
1 | getInputPricePerMillionTokens();
20 | }
21 |
22 | public function getCachedOutputPricePerMillionTokens(): float {
23 | return $this->getOutputPricePerMillionTokens();
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini25FlashLite.php:
--------------------------------------------------------------------------------
1 | getInputPricePerMillionTokens();
20 | }
21 |
22 | public function getCachedOutputPricePerMillionTokens(): float {
23 | return $this->getOutputPricePerMillionTokens();
24 | }
25 | }
--------------------------------------------------------------------------------
/src/Message/LLMMessageText.php:
--------------------------------------------------------------------------------
1 | text;
11 | }
12 |
13 |
14 | public function isCached(): bool {
15 | return $this->cached;
16 | }
17 |
18 | public function jsonSerialize(): array {
19 | return [
20 | 'text' => $this->text,
21 | 'cached' => $this->cached,
22 | ];
23 | }
24 |
25 | public static function fromJson(array $data): self {
26 | return new self($data['text'], $data['cached']);
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini20FlashLite.php:
--------------------------------------------------------------------------------
1 | getInputPricePerMillionTokens();
20 | }
21 |
22 | public function getCachedOutputPricePerMillionTokens(): float {
23 | return $this->getOutputPricePerMillionTokens();
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/src/Message/LLMMessageArrayData.php:
--------------------------------------------------------------------------------
1 | data;
11 | }
12 |
13 | public function isCached(): bool {
14 | return $this->cached;
15 | }
16 |
17 | public function jsonSerialize(): array {
18 | return [
19 | 'data' => $this->data,
20 | 'cached' => $this->cached,
21 | ];
22 | }
23 |
24 | public static function fromJson(array $data): self {
25 | return new self($data['data'], $data['cached']);
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/Client/Universal/LocalModel.php:
--------------------------------------------------------------------------------
1 | code;
16 | }
17 |
18 | public function getInputPricePerMillionTokens(): float {
19 | return 0.0;
20 | }
21 |
22 | public function getOutputPricePerMillionTokens(): float {
23 | return 0.0;
24 | }
25 |
26 | public function getCachedInputPricePerMillionTokens(): float {
27 | return 0.0;
28 | }
29 |
30 | public function getCachedOutputPricePerMillionTokens(): float {
31 | return 0.0;
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPTo3.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 10;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 40.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 2.5;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini3ProPreview.php:
--------------------------------------------------------------------------------
1 | getInputPricePerMillionTokens();
23 | }
24 |
25 | public function getCachedOutputPricePerMillionTokens(): float {
26 | return $this->getOutputPricePerMillionTokens();
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPT41.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 2.0;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 8.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 0.5;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPT4o.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 2.5;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 10.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 1.25;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPT5.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 1.25;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 10.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 0.125;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPT41Mini.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 0.4;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 1.6;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 0.1;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPT5Mini.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 0.25;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 2.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 0.025;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPT5Nano.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 0.05;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 0.4;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 0.005;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPTo4Mini.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 1.1;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 4.4;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 0.275;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/phpunit.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | tests
6 | tests/Integration
7 |
8 |
9 | tests/Integration
10 |
11 |
12 |
13 |
14 | src
15 |
16 |
17 |
18 |
19 | integration
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPT41Nano.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 0.1;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 0.4;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 0.025;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/Model/GPT4oMini.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 0.15;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 0.6;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 0.075;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.0;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/OpenAICompatibleClient.php:
--------------------------------------------------------------------------------
1 | 'Bearer ' . $this->apiKey,
21 | ];
22 | }
23 |
24 | protected function getBaseUrl(): string {
25 | return $this->baseUrl;
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/Client/ModelInterface.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 5.0;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 25.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 6.25;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.5;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/Model/AnthropicClaude4Opus.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 15.0;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 75.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 18.75;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 1.5;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/Model/AnthropicClaude35Haiku.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 0.8;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 4.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 1.0;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.08;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/Model/AnthropicClaude35Sonnet.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 3.0;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 15.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 3.75;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.3;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/Model/AnthropicClaude37Sonnet.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 3.0;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 15.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 3.75;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.3;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/Model/AnthropicClaude41Opus.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 15.0;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 75.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 18.75;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 1.5;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/Model/AnthropicClaude45Sonnet.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 3.0;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 15.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 3.75;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.3;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/Model/AnthropicClaude4Sonnet.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 3.0;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 15.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return 3.75;
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return 0.3;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/OpenAIClient.php:
--------------------------------------------------------------------------------
1 | 'Bearer ' . $this->apiKey,
17 | 'OpenAI-Organization' => $this->apiOrganization,
18 | ];
19 | }
20 |
21 | protected function getBaseUrl(): string {
22 | return 'https://api.openai.com/v1';
23 | }
24 |
25 | public function getCode(): string {
26 | return self::CODE;
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini25ProPreview.php:
--------------------------------------------------------------------------------
1 | version;
15 | }
16 |
17 | public function getInputPricePerMillionTokens(): float {
18 | return 1.25;
19 | }
20 |
21 | public function getOutputPricePerMillionTokens(): float {
22 | return 10.0;
23 | }
24 |
25 | public function getCachedInputPricePerMillionTokens(): float {
26 | return $this->getInputPricePerMillionTokens();
27 | }
28 |
29 | public function getCachedOutputPricePerMillionTokens(): float {
30 | return $this->getOutputPricePerMillionTokens();
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/Tool/TextEditor/TextEditorStorage.php:
--------------------------------------------------------------------------------
1 | encoding;
12 | }
13 |
14 | public function getData(): string {
15 | return $this->data;
16 | }
17 |
18 | public function isCached(): bool {
19 | return $this->cached;
20 | }
21 |
22 | public function jsonSerialize(): array {
23 | return [
24 | 'encoding' => $this->encoding,
25 | 'data' => $this->data,
26 | 'cached' => $this->cached,
27 | ];
28 | }
29 |
30 | public static function fromJson(array $data): self {
31 | return new self($data['encoding'], $data['data'], $data['cached']);
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/src/Message/LLMMessageToolResult.php:
--------------------------------------------------------------------------------
1 | id;
11 | }
12 |
13 | public function getContent(): LLMMessageContents {
14 | return $this->content;
15 | }
16 |
17 | public function isCached(): bool {
18 | return $this->cached;
19 | }
20 |
21 | public function jsonSerialize(): array {
22 | return [
23 | 'id' => $this->id,
24 | 'content' => $this->content,
25 | 'cached' => $this->cached,
26 | ];
27 | }
28 |
29 | public static function fromJson(array $data): self {
30 | return new self($data['id'], LLMMessageContents::fromJson($data['content']), $data['cached']);
31 | }
32 |
33 | }
34 |
--------------------------------------------------------------------------------
/src/Message/LLMMessageReasoning.php:
--------------------------------------------------------------------------------
1 | text;
11 | }
12 |
13 | public function getSignature(): ?string {
14 | return $this->signature;
15 | }
16 |
17 |
18 | public function isCached(): bool {
19 | return $this->cached;
20 | }
21 |
22 | public function jsonSerialize(): array {
23 | return [
24 | 'text' => $this->text,
25 | 'signature' => $this->signature,
26 | 'cached' => $this->cached,
27 | ];
28 | }
29 |
30 | public static function fromJson(array $data): self {
31 | return new self(
32 | $data['text'],
33 | $data['signature'] ?? null,
34 | $data['cached'] ?? false,
35 | );
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/composer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "soukicz/llm",
3 | "description": "LLM client with support for cache, tools and async requests",
4 | "keywords": ["llm", "ai", "openai", "anthropic"],
5 | "type": "library",
6 | "license": ["BSD-3-Clause"],
7 | "autoload": {
8 | "psr-4": {
9 | "Soukicz\\Llm\\": "src/"
10 | }
11 | },
12 | "autoload-dev": {
13 | "psr-4": {
14 | "Soukicz\\Llm\\Tests\\": "tests/"
15 | }
16 | },
17 | "authors": [
18 | {
19 | "name": "Petr Soukup",
20 | "email": "soukup@simplia.cz"
21 | }
22 | ],
23 | "require": {
24 | "php": ">=8.3",
25 | "psr/http-message": "^1.1 || ^2.0",
26 | "guzzlehttp/guzzle": "^7.9",
27 | "guzzlehttp/promises": "^2.0",
28 | "guzzlehttp/psr7": "^2.7",
29 | "ext-zlib": "*",
30 | "swaggest/json-schema": "^0.12.43",
31 | "ramsey/uuid": "^4.8"
32 | },
33 | "require-dev": {
34 | "phpstan/phpstan": "^2.1",
35 | "phpunit/phpunit": "^10.5"
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/Message/LLMMessageToolUse.php:
--------------------------------------------------------------------------------
1 | id;
12 | }
13 |
14 | public function getName(): string {
15 | return $this->name;
16 | }
17 |
18 | public function getInput(): array {
19 | return $this->input;
20 | }
21 |
22 | public function isCached(): bool {
23 | return $this->cached;
24 | }
25 |
26 | public function jsonSerialize(): array {
27 | return [
28 | 'id' => $this->id,
29 | 'name' => $this->name,
30 | 'input' => $this->input,
31 | 'cached' => $this->cached,
32 | ];
33 | }
34 |
35 | public static function fromJson(array $data): self {
36 | return new self($data['id'], $data['name'], $data['input'], $data['cached']);
37 | }
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/.github/workflows/README.md:
--------------------------------------------------------------------------------
1 | # GitHub Actions Workflows
2 |
3 | This directory contains GitHub Actions workflows for the PHP LLM library.
4 |
5 | ## Workflows
6 |
7 | ### php-tests.yml
8 | Runs on every push and pull request to main/master branches.
9 | - **PHPStan Analysis**: Static analysis on src/ and tests/ (excluding Integration tests)
10 | - **Unit Tests**: Runs unit tests on PHP 8.1, 8.2, and 8.3 (excludes integration tests)
11 | - **Lowest Dependencies Test**: Tests with minimum dependency versions (excludes integration tests)
12 |
13 | ### integration-tests.yml
14 | Runs only when:
15 | - Manually triggered via workflow_dispatch
16 | - Push to master branch (requires environment approval)
17 | - Pull requests with 'integration-tests' label
18 |
19 | Requires GitHub secrets:
20 | - `ANTHROPIC_API_KEY`
21 | - `OPENAI_API_KEY`
22 | - `GEMINI_API_KEY`
23 |
24 | ## Important Notes
25 |
26 | 1. Integration tests are explicitly excluded from regular test runs using `--exclude-group integration`
27 | 2. The `integration-tests` environment should have protection rules requiring manual approval
28 | 3. Integration tests have a 10-minute timeout and cost tracking ($5 limit per run)
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini25FlashImagePreview.php:
--------------------------------------------------------------------------------
1 | imageAspectRatio;
14 | }
15 |
16 | public function getImageSize(): ?string {
17 | return $this->imageSize;
18 | }
19 |
20 | public function getCode(): string {
21 | return 'gemini-2.5-flash-image-preview';
22 | }
23 |
24 | public function getInputPricePerMillionTokens(): float {
25 | return 0.30;
26 | }
27 |
28 | public function getOutputPricePerMillionTokens(): float {
29 | return 30.0;
30 | }
31 |
32 | public function getCachedInputPricePerMillionTokens(): float {
33 | return $this->getInputPricePerMillionTokens();
34 | }
35 |
36 | public function getCachedOutputPricePerMillionTokens(): float {
37 | return $this->getOutputPricePerMillionTokens();
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/src/Cache/AbstractCache.php:
--------------------------------------------------------------------------------
1 | (string) $request->getUri(),
13 | 'method' => $request->getMethod(),
14 | 'body' => (string) $request->getBody(),
15 | ], JSON_THROW_ON_ERROR));
16 | }
17 |
18 | protected function responseFromJson(string $jsonString): ResponseInterface {
19 | $data = json_decode($jsonString, true, 512, JSON_THROW_ON_ERROR);
20 |
21 | return new Response($data['status'], $data['headers'], $data['body']);
22 | }
23 |
24 | protected function responseToJson(ResponseInterface $response): string {
25 | return json_encode([
26 | 'body' => (string) $response->getBody(),
27 | 'status' => $response->getStatusCode(),
28 | 'headers' => $response->getHeaders(),
29 | ], JSON_THROW_ON_ERROR);
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/src/Message/LLMMessageImage.php:
--------------------------------------------------------------------------------
1 | encoding;
12 | }
13 |
14 | public function getMediaType(): string {
15 | return $this->mediaType;
16 | }
17 |
18 | public function getData(): string {
19 | return $this->data;
20 | }
21 |
22 | public function isCached(): bool {
23 | return $this->cached;
24 | }
25 |
26 | public function jsonSerialize(): array {
27 | return [
28 | 'encoding' => $this->encoding,
29 | 'mediaType' => $this->mediaType,
30 | 'data' => $this->data,
31 | 'cached' => $this->cached,
32 | ];
33 | }
34 |
35 | public static function fromJson(array $data): self {
36 | return new self(
37 | $data['encoding'],
38 | $data['mediaType'],
39 | $data['data'],
40 | $data['cached'],
41 | );
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/Client/Gemini/Model/Gemini3ProImagePreview.php:
--------------------------------------------------------------------------------
1 | imageAspectRatio;
17 | }
18 |
19 | public function getImageSize(): ?string {
20 | return $this->imageSize;
21 | }
22 |
23 | public function getCode(): string {
24 | return 'gemini-3-pro-image-preview';
25 | }
26 |
27 | public function getInputPricePerMillionTokens(): float {
28 | return 2.0;
29 | }
30 |
31 | public function getOutputPricePerMillionTokens(): float {
32 | return 120.0;
33 | }
34 |
35 | public function getCachedInputPricePerMillionTokens(): float {
36 | return $this->getInputPricePerMillionTokens();
37 | }
38 |
39 | public function getCachedOutputPricePerMillionTokens(): float {
40 | return $this->getOutputPricePerMillionTokens();
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/Cache/FileCache.php:
--------------------------------------------------------------------------------
1 | cacheDir)) {
11 | throw new \RuntimeException('Cache directory does not exist: ' . $this->cacheDir);
12 | }
13 | }
14 |
15 | private function getPath(string $key): string {
16 | return $this->cacheDir . '/' . md5($key) . '.json';
17 | }
18 |
19 | public function fetch(RequestInterface $request): ?ResponseInterface {
20 | $path = $this->getPath($this->getCacheKey($request));
21 | if (!file_exists($path)) {
22 | return null;
23 | }
24 |
25 | return $this->responseFromJson(file_get_contents($path));
26 | }
27 |
28 | public function store(RequestInterface $request, ResponseInterface $response): void {
29 | $key = $this->getCacheKey($request);
30 | file_put_contents($this->getPath($key), $this->responseToJson($response));
31 | }
32 |
33 | public function invalidate(RequestInterface $request): void {
34 | @unlink($this->getPath($this->getCacheKey($request)));
35 | }
36 |
37 | }
38 |
--------------------------------------------------------------------------------
/src/Tool/CallbackToolDefinition.php:
--------------------------------------------------------------------------------
1 | name = $name;
20 | $this->description = $description;
21 | $this->inputSchema = $inputSchema;
22 | $this->handler = $handler;
23 | }
24 |
25 | public function getName(): string {
26 | return $this->name;
27 | }
28 |
29 | public function getDescription(): string {
30 | return $this->description;
31 | }
32 |
33 | public function getInputSchema(): array {
34 | return $this->inputSchema;
35 | }
36 |
37 | public function handle(array $input): PromiseInterface|LLMMessageContents {
38 | $result = ($this->handler)($input);
39 |
40 | if ($result instanceof PromiseInterface) {
41 | return $result->then(static function (LLMMessageContents $response) {
42 | return $response;
43 | });
44 | }
45 |
46 | return $result;
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/src/LLMConversation.php:
--------------------------------------------------------------------------------
1 | threadId = $threadId ?? Uuid::uuid7()->toString();
13 | foreach ($messages as $message) {
14 | if (!$message instanceof LLMMessage) {
15 | throw new InvalidArgumentException('Only LLMMessage instances are allowed');
16 | }
17 | }
18 | }
19 |
20 | public function getThreadId(): string {
21 | return $this->threadId;
22 | }
23 |
24 | /**
25 | * @return LLMMessage[]
26 | */
27 | public function getMessages(): array {
28 | return $this->messages;
29 | }
30 |
31 | public function withMessage(LLMMessage $message): self {
32 | $messages = $this->messages;
33 | $messages[] = $message;
34 |
35 | return new self($messages, $this->threadId);
36 | }
37 |
38 | public function jsonSerialize(): array {
39 | return [
40 | 'threadId' => $this->threadId,
41 | 'messages' => $this->messages,
42 | ];
43 | }
44 |
45 | public static function fromJson(array $data): self {
46 | return new self(array_map(static fn(array $message) => LLMMessage::fromJson($message), $data['messages']), $data['threadId'] ?? null);
47 | }
48 |
49 | public function getLastMessage(): LLMMessage {
50 | return $this->messages[array_key_last($this->messages)];
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Deploy Documentation
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | workflow_dispatch:
8 |
9 | permissions:
10 | contents: read
11 | pages: write
12 | id-token: write
13 |
14 | concurrency:
15 | group: "pages"
16 | cancel-in-progress: false
17 |
18 | jobs:
19 | build:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - name: Checkout code
23 | uses: actions/checkout@v4
24 |
25 | - name: Setup Pages
26 | uses: actions/configure-pages@v5
27 |
28 | - name: Set up Python
29 | uses: actions/setup-python@v5
30 | with:
31 | python-version: 3.x
32 |
33 | - name: Cache pip dependencies
34 | uses: actions/cache@v4
35 | with:
36 | key: ${{ github.ref }}
37 | path: .cache
38 |
39 | - name: Install dependencies
40 | run: pip install mkdocs-material
41 |
42 | - name: Prepare documentation
43 | run: |
44 | echo "Copying Readme.md to docs/index.md..."
45 | cp Readme.md docs/index.md
46 |
47 | echo "Fixing links in docs/index.md..."
48 | sed -i 's|](docs/|](|g' docs/index.md
49 |
50 | echo "Fixing PHP code blocks..."
51 | python3 .github/scripts/fix-php-highlighting.py
52 |
53 | - name: Build site
54 | run: mkdocs build --strict --site-dir ./_site
55 | env:
56 | CI: true
57 |
58 | - name: Upload artifact
59 | uses: actions/upload-pages-artifact@v3
60 |
61 | deploy:
62 | environment:
63 | name: github-pages
64 | url: ${{ steps.deployment.outputs.page_url }}
65 | runs-on: ubuntu-latest
66 | needs: build
67 | steps:
68 | - name: Deploy to GitHub Pages
69 | id: deployment
70 | uses: actions/deploy-pages@v4
71 |
--------------------------------------------------------------------------------
/docs/examples/index.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | Practical, copy-paste ready examples to help you get started with PHP LLM and build powerful AI applications.
4 |
5 | ## Getting Started
6 |
7 | - **[Quick Start](quick-start.md)** - Get up and running in minutes with basic examples for simple synchronous requests, conversation management, and streaming responses.
8 |
9 | ## Core Functionality
10 |
11 | - **[Tools & Function Calling](tools-and-function-calling.md)** - Enable AI agents to interact with external systems, databases, APIs, and custom code. Learn how to create tools, handle parallel execution, and build intelligent agents.
12 |
13 | - **[Multimodal](multimodal.md)** - Process images and PDFs with AI models. Perfect for document analysis, visual understanding, and data extraction tasks.
14 |
15 | - **[State Management](state-management.md)** - Manage conversation history, implement persistent sessions, and build stateful AI applications.
16 |
17 | ## Production Patterns
18 |
19 | - **[Logging & Debugging](logging-debugging.md)** - Monitor your AI applications with comprehensive logging, debugging tools, and performance tracking.
20 |
21 | - **[Best Practices](best-practices.md)** - Security, performance optimization, cost management, and architectural patterns for building production-ready AI applications.
22 |
23 | ## What You'll Learn
24 |
25 | These examples cover:
26 | - **Basic usage**: Simple requests, conversations, streaming
27 | - **Advanced features**: Tools, multimodal, caching, reasoning models
28 | - **Production patterns**: Error handling, logging, retries, resilience
29 | - **Best practices**: Security, performance, cost optimization
30 | - **Real-world scenarios**: Practical code you can adapt to your needs
31 |
32 | All examples are tested and production-ready. Start with [Quick Start](quick-start.md) if you're new to PHP LLM.
33 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/Tool/AnthropicToolTypeResolver.php:
--------------------------------------------------------------------------------
1 | request->getConversation();
14 | }
15 |
16 | public function getRequest(): LLMRequest {
17 | return $this->request;
18 | }
19 |
20 | public function getLastText(): string {
21 | $lastMessage = $this->getConversation()->getMessages()[count($this->getConversation()->getMessages()) - 1];
22 | foreach (array_reverse(iterator_to_array($lastMessage->getContents())) as $content) {
23 | if ($content instanceof LLMMessageText) {
24 | return $content->getText();
25 | }
26 | }
27 |
28 | throw new \RuntimeException('No text message found');
29 | }
30 |
31 | public function getStopReason(): StopReason {
32 | return $this->stopReason;
33 | }
34 |
35 | public function getInputTokens(): int {
36 | return $this->inputTokens;
37 | }
38 |
39 | public function getOutputTokens(): int {
40 | return $this->outputTokens;
41 | }
42 |
43 | public function getMaximumOutputTokens(): int {
44 | return $this->maximumOutputTokens;
45 | }
46 |
47 | public function getInputPriceUsd(): ?float {
48 | return $this->inputPriceUsd;
49 | }
50 |
51 | public function getOutputPriceUsd(): ?float {
52 | return $this->outputPriceUsd;
53 | }
54 |
55 | public function getTotalTimeMs(): int {
56 | return $this->totalTimeMs;
57 | }
58 |
59 | }
60 |
--------------------------------------------------------------------------------
/tests/Client/Anthropic/AnthropicEncoderTest.php:
--------------------------------------------------------------------------------
1 | encoder = new AnthropicEncoder();
20 | }
21 |
22 | public function testBasicTextRequestEncoding(): void {
23 | // Create a simple request with text only
24 | $conversation = new LLMConversation([
25 | LLMMessage::createFromSystemString('System instruction'),
26 | LLMMessage::createFromUserString('User message'),
27 | ]);
28 |
29 | $request = new LLMRequest(
30 | model: new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
31 | conversation: $conversation,
32 | temperature: 0.5,
33 | maxTokens: 500,
34 | stopSequences: ['STOP']
35 | );
36 |
37 | $encoded = $this->encoder->encodeRequest($request);
38 |
39 | // Basic structure checks
40 | $this->assertEquals('claude-3-5-sonnet-20241022', $encoded['model']);
41 | $this->assertEquals(500, $encoded['max_tokens']);
42 | $this->assertEquals(0.5, $encoded['temperature']);
43 | $this->assertEquals('System instruction', $encoded['system']);
44 | $this->assertEquals(['STOP'], $encoded['stop_sequences']);
45 |
46 | // Message structure
47 | $this->assertCount(1, $encoded['messages']);
48 | $this->assertEquals('user', $encoded['messages'][0]['role']);
49 | $this->assertCount(1, $encoded['messages'][0]['content']);
50 | $this->assertEquals('text', $encoded['messages'][0]['content'][0]['type']);
51 | $this->assertEquals('User message', $encoded['messages'][0]['content'][0]['text']);
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/src/Message/LLMMessage.php:
--------------------------------------------------------------------------------
1 | content;
17 | }
18 |
19 | public function isUser(): bool {
20 | return $this->type === self::TYPE_USER;
21 | }
22 |
23 | public function isAssistant(): bool {
24 | return $this->type === self::TYPE_ASSISTANT;
25 | }
26 |
27 | public function isSystem(): bool {
28 | return $this->type === self::TYPE_SYSTEM;
29 | }
30 |
31 |
32 | public static function createFromUser(LLMMessageContents $content): LLMMessage {
33 | return new self(self::TYPE_USER, $content);
34 | }
35 |
36 | public static function createFromUserString(string $content): LLMMessage {
37 | return new self(self::TYPE_USER, new LLMMessageContents([new LLMMessageText($content)]));
38 | }
39 |
40 |
41 | public static function createFromAssistant(LLMMessageContents $content): LLMMessage {
42 | return new self(self::TYPE_ASSISTANT, $content);
43 | }
44 |
45 | public static function createFromAssistantString(string $content): LLMMessage {
46 | return new self(self::TYPE_ASSISTANT, new LLMMessageContents([new LLMMessageText($content)]));
47 | }
48 |
49 | public static function createFromSystem(LLMMessageContents $content): LLMMessage {
50 | return new self(self::TYPE_SYSTEM, $content);
51 | }
52 |
53 | public static function createFromSystemString(string $content): LLMMessage {
54 | return new self(self::TYPE_SYSTEM, new LLMMessageContents([new LLMMessageText($content)]));
55 | }
56 |
57 |
58 | public function jsonSerialize(): array {
59 | return [
60 | 'type' => $this->type,
61 | 'content' => $this->content,
62 | ];
63 | }
64 |
65 | public static function fromJson(array $data): self {
66 | return new self($data['type'], LLMMessageContents::fromJson($data['content']));
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/tests/Client/Anthropic/Tool/AnthropicToolTypeResolverTest.php:
--------------------------------------------------------------------------------
1 | assertEquals(
26 | 'text_editor_20250728',
27 | AnthropicToolTypeResolver::getTextEditorType($model),
28 | 'Failed for model: ' . $model->getCode()
29 | );
30 | }
31 | }
32 |
33 | public function testGetTextEditorTypeForClaude37Sonnet(): void {
34 | $model = new AnthropicClaude37Sonnet(AnthropicClaude37Sonnet::VERSION_20250219);
35 | $this->assertEquals('text_editor_20250124', AnthropicToolTypeResolver::getTextEditorType($model));
36 | }
37 |
38 | public function testGetTextEditorTypeForOtherModels(): void {
39 | $models = [
40 | new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
41 | new AnthropicClaude35Haiku(AnthropicClaude35Haiku::VERSION_20241022),
42 | ];
43 |
44 | foreach ($models as $model) {
45 | $this->assertEquals(
46 | 'text_editor_20250429',
47 | AnthropicToolTypeResolver::getTextEditorType($model),
48 | 'Failed for model: ' . $model->getCode()
49 | );
50 | }
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: PHP LLM
2 | site_description: Agentic AI Framework for PHP - Build powerful AI agents with Large Language Models
3 | site_url: https://soukicz.github.io/php-llm/
4 | repo_url: https://github.com/soukicz/php-llm
5 | repo_name: soukicz/php-llm
6 |
7 | theme:
8 | name: material
9 | palette:
10 | # Palette toggle for light mode
11 | - scheme: default
12 | primary: indigo
13 | accent: indigo
14 | toggle:
15 | icon: material/brightness-7
16 | name: Switch to dark mode
17 | # Palette toggle for dark mode
18 | - scheme: slate
19 | primary: indigo
20 | accent: indigo
21 | toggle:
22 | icon: material/brightness-4
23 | name: Switch to light mode
24 | features:
25 | - navigation.tabs
26 | - navigation.sections
27 | - navigation.expand
28 | - navigation.top
29 | - search.suggest
30 | - search.highlight
31 | - content.code.copy
32 | - content.code.annotate
33 | icon:
34 | repo: fontawesome/brands/github
35 |
36 | nav:
37 | - Home: index.md
38 | - Getting Started:
39 | - Quick Start: examples/quick-start.md
40 | - Examples:
41 | - Quick Start: examples/quick-start.md
42 | - Best Practices: examples/best-practices.md
43 | - Tools & Function Calling: examples/tools-and-function-calling.md
44 | - Multimodal: examples/multimodal.md
45 | - State Management: examples/state-management.md
46 | - Logging & Debugging: examples/logging-debugging.md
47 | - Guides:
48 | - Configuration: guides/configuration.md
49 | - Tools: guides/tools.md
50 | - Caching: guides/caching.md
51 | - Batch Processing: guides/batch-processing.md
52 | - Multimodal: guides/multimodal.md
53 | - Reasoning: guides/reasoning.md
54 | - Feedback Loops: guides/feedback-loops.md
55 | - Providers:
56 | - Overview: providers/README.md
57 |
58 | markdown_extensions:
59 | - pymdownx.highlight:
60 | anchor_linenums: true
61 | line_spans: __span
62 | pygments_lang_class: true
63 | - pymdownx.inlinehilite
64 | - pymdownx.snippets
65 | - pymdownx.superfences
66 | - pymdownx.details
67 | - admonition
68 | - tables
69 | - attr_list
70 | - md_in_html
71 | - toc:
72 | permalink: true
73 |
74 | plugins:
75 | - search
76 |
77 | extra:
78 | social:
79 | - icon: fontawesome/brands/github
80 | link: https://github.com/soukicz/php-llm
81 |
--------------------------------------------------------------------------------
/.github/workflows/php-tests.yml:
--------------------------------------------------------------------------------
1 | name: PHP Tests
2 |
3 | on:
4 | push:
5 | branches: [ main, master ]
6 | pull_request:
7 | branches: [ main, master ]
8 | workflow_dispatch:
9 |
10 | jobs:
11 | phpstan:
12 | name: PHPStan Analysis
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: Checkout code
17 | uses: actions/checkout@v3
18 |
19 | - name: Setup PHP
20 | uses: shivammathur/setup-php@v2
21 | with:
22 | php-version: '8.3'
23 | extensions: mbstring, xml, json, zlib
24 | coverage: none
25 |
26 | - name: Install dependencies
27 | run: composer install --prefer-dist --no-progress
28 |
29 | - name: Run PHPStan
30 | run: vendor/bin/phpstan analyse src
31 |
32 | unit-tests:
33 | name: Unit Tests (PHP ${{ matrix.php-version }})
34 | runs-on: ubuntu-latest
35 | strategy:
36 | fail-fast: false
37 | matrix:
38 | php-version: ['8.3', '8.4']
39 |
40 | steps:
41 | - name: Checkout code
42 | uses: actions/checkout@v3
43 |
44 | - name: Setup PHP
45 | uses: shivammathur/setup-php@v2
46 | with:
47 | php-version: ${{ matrix.php-version }}
48 | extensions: mbstring, xml, json, zlib
49 | coverage: xdebug
50 |
51 | - name: Cache Composer packages
52 | id: composer-cache
53 | uses: actions/cache@v3
54 | with:
55 | path: vendor
56 | key: ${{ runner.os }}-php-${{ matrix.php-version }}-${{ hashFiles('**/composer.lock') }}
57 | restore-keys: |
58 | ${{ runner.os }}-php-${{ matrix.php-version }}-
59 |
60 | - name: Install dependencies
61 | run: composer install --prefer-dist --no-progress
62 |
63 | - name: Run tests
64 | run: XDEBUG_MODE=coverage vendor/bin/phpunit --coverage-text --testsuite default
65 |
66 | test-lowest-dependencies:
67 | name: Test with lowest dependencies (PHP 8.3)
68 | runs-on: ubuntu-latest
69 |
70 | steps:
71 | - name: Checkout code
72 | uses: actions/checkout@v3
73 |
74 | - name: Setup PHP
75 | uses: shivammathur/setup-php@v2
76 | with:
77 | php-version: '8.3'
78 | extensions: mbstring, xml, json, zlib
79 | coverage: none
80 |
81 | - name: Install dependencies
82 | run: composer update --prefer-lowest --prefer-dist --no-progress
83 |
84 | - name: Run tests
85 | run: vendor/bin/phpunit --testsuite default
86 |
--------------------------------------------------------------------------------
/.github/workflows/integration-tests.yml:
--------------------------------------------------------------------------------
1 | name: Integration Tests
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | verbose:
7 | description: 'Enable verbose output'
8 | required: false
9 | default: 'false'
10 | type: choice
11 | options:
12 | - 'true'
13 | - 'false'
14 | push:
15 | branches: [ master ]
16 | paths:
17 | - 'src/**'
18 | - 'tests/Integration/**'
19 | - '.github/workflows/integration-tests.yml'
20 | pull_request:
21 | branches: [ master ]
22 | types: [ labeled ]
23 |
24 | jobs:
25 | integration-tests:
26 | name: Run Integration Tests
27 | runs-on: ubuntu-latest
28 | # Only run on manual trigger or push to master
29 | if: |
30 | github.event_name == 'workflow_dispatch' ||
31 | github.event_name == 'push'
32 |
33 | environment: integration-tests # Use GitHub environment for protection rules
34 |
35 | steps:
36 | - name: Checkout code
37 | uses: actions/checkout@v4
38 |
39 | - name: Setup PHP
40 | uses: shivammathur/setup-php@v2
41 | with:
42 | php-version: '8.3'
43 | extensions: mbstring, xml, json, zlib
44 | coverage: none
45 |
46 | - name: Validate composer.json and composer.lock
47 | run: composer validate --strict
48 |
49 | - name: Cache Composer packages
50 | id: composer-cache
51 | uses: actions/cache@v3
52 | with:
53 | path: vendor
54 | key: ${{ runner.os }}-php-8.3-${{ hashFiles('**/composer.lock') }}
55 | restore-keys: |
56 | ${{ runner.os }}-php-8.3-
57 |
58 | - name: Install dependencies
59 | run: composer install --prefer-dist --no-progress
60 |
61 | - name: Setup environment
62 | env:
63 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
64 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
65 | GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
66 | OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
67 | run: |
68 | # Create .env file from secrets
69 | cat > .env << EOF
70 | ANTHROPIC_API_KEY=$ANTHROPIC_API_KEY
71 | OPENAI_API_KEY=$OPENAI_API_KEY
72 | GEMINI_API_KEY=$GEMINI_API_KEY
73 | OPENROUTER_API_KEY=$OPENROUTER_API_KEY
74 | INTEGRATION_TEST_VERBOSE=${{ github.event.inputs.verbose || 'false' }}
75 | INTEGRATION_TEST_MAX_COST=5.00
76 | EOF
77 |
78 | - name: Run integration tests
79 | run: |
80 | vendor/bin/phpunit --testsuite integration --display-warnings --display-notices
81 | timeout-minutes: 10
82 |
83 | - name: Upload test results
84 | if: always()
85 | uses: actions/upload-artifact@v4
86 | with:
87 | path: .phpunit.result.cache
88 |
--------------------------------------------------------------------------------
/tests/Client/Gemini/GeminiClientTest.php:
--------------------------------------------------------------------------------
1 | mockHandler = new MockHandler();
27 | $this->requestHistory = [];
28 |
29 | $handlerStack = HandlerStack::create($this->mockHandler);
30 | $handlerStack->push(Middleware::history($this->requestHistory));
31 |
32 | $httpClient = new Client(['handler' => $handlerStack]);
33 |
34 | // Monkey patch the GeminiClient to use our mock HTTP client
35 | GeminiClient::$testHttpClient = $httpClient;
36 | }
37 |
38 | public function testSendRequestAsync(): void {
39 | // Mock response data
40 | $responseBody = json_encode([
41 | 'candidates' => [
42 | [
43 | 'content' => [
44 | 'parts' => [
45 | ['text' => 'This is a response from Gemini AI.'],
46 | ],
47 | ],
48 | 'finishReason' => 'STOP',
49 | ],
50 | ],
51 | 'usageMetadata' => [
52 | 'promptTokenCount' => 10,
53 | 'candidatesTokenCount' => 8,
54 | ],
55 | ]);
56 |
57 | // Queue the mock response
58 | $this->mockHandler->append(
59 | new Response(200, ['Content-Type' => 'application/json', 'X-Request-Duration-ms' => '150'], $responseBody)
60 | );
61 |
62 | $geminiClient = new GeminiClient('fake-api-key');
63 |
64 | // Create a conversation
65 | $conversation = new LLMConversation([
66 | LLMMessage::createFromUserString('Tell me a joke'),
67 | ]);
68 |
69 | $request = new LLMRequest(
70 | model: new Gemini20Flash(),
71 | conversation: $conversation
72 | );
73 |
74 | // Send the request
75 | $responsePromise = $geminiClient->sendRequestAsync($request);
76 | $response = $responsePromise->wait();
77 |
78 | // Assert that the response was properly decoded
79 | $this->assertEquals('This is a response from Gemini AI.', $response->getLastText());
80 | $this->assertEquals(10, $response->getInputTokens());
81 | $this->assertEquals(8, $response->getOutputTokens());
82 | $this->assertEquals(150, $response->getTotalTimeMs());
83 | }
84 |
85 | }
86 |
--------------------------------------------------------------------------------
/tests/Client/Anthropic/AnthropicEncoderTextTest.php:
--------------------------------------------------------------------------------
1 | encodeRequest($request);
32 |
33 | // Verify basic structure
34 | $this->assertEquals('claude-3-5-sonnet-20241022', $encoded['model']);
35 | $this->assertEquals(100, $encoded['max_tokens']);
36 | $this->assertEquals('You are a helpful assistant.', $encoded['system']);
37 |
38 | // Verify message format
39 | $this->assertCount(1, $encoded['messages']);
40 | $this->assertEquals('user', $encoded['messages'][0]['role']);
41 |
42 | // Verify message content
43 | $this->assertCount(1, $encoded['messages'][0]['content']);
44 | $this->assertEquals('text', $encoded['messages'][0]['content'][0]['type']);
45 | $this->assertEquals('Hello', $encoded['messages'][0]['content'][0]['text']);
46 | }
47 |
48 | public function testMultipleMessagesInConversation(): void {
49 | $encoder = new AnthropicEncoder();
50 |
51 | // Create a conversation with multiple messages
52 | $conversation = new LLMConversation([
53 | LLMMessage::createFromUserString('First message'),
54 | LLMMessage::createFromAssistantString('First response'),
55 | LLMMessage::createFromUserString('Second message'),
56 | ]);
57 |
58 | $request = new LLMRequest(
59 | model: new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
60 | conversation: $conversation
61 | );
62 |
63 | $encoded = $encoder->encodeRequest($request);
64 |
65 | // Verify message count
66 | $this->assertCount(3, $encoded['messages']);
67 |
68 | // Verify message roles
69 | $this->assertEquals('user', $encoded['messages'][0]['role']);
70 | $this->assertEquals('assistant', $encoded['messages'][1]['role']);
71 | $this->assertEquals('user', $encoded['messages'][2]['role']);
72 |
73 | // Verify messages content
74 | $this->assertEquals('First message', $encoded['messages'][0]['content'][0]['text']);
75 | $this->assertEquals('First response', $encoded['messages'][1]['content'][0]['text']);
76 | $this->assertEquals('Second message', $encoded['messages'][2]['content'][0]['text']);
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/tests/Client/Anthropic/AnthropicEncoderErrorsTest.php:
--------------------------------------------------------------------------------
1 | expectException(InvalidArgumentException::class);
35 | $this->expectExceptionMessage('Multiple system messages');
36 |
37 | $encoder->encodeRequest($request);
38 | }
39 |
40 | public function testNonTextSystemMessageThrowsException(): void {
41 | $encoder = new AnthropicEncoder();
42 |
43 | // Create a conversation with a non-text system message
44 | $conversation = new LLMConversation([
45 | LLMMessage::createFromSystem(new LLMMessageContents([new LLMMessageImage('base64', 'image/jpeg', 'data', false)])),
46 | LLMMessage::createFromUserString('Hello'),
47 | ]);
48 |
49 | $request = new LLMRequest(
50 | model: new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
51 | conversation: $conversation
52 | );
53 |
54 | $this->expectException(InvalidArgumentException::class);
55 | $this->expectExceptionMessage('Unsupported system message type');
56 |
57 | $encoder->encodeRequest($request);
58 | }
59 |
60 | public function testInvalidSystemMessageTypeThrowsException(): void {
61 | $encoder = new AnthropicEncoder();
62 |
63 | // Create a conversation with multiple content blocks in system message
64 | $conversation = new LLMConversation([
65 | LLMMessage::createFromSystem(new LLMMessageContents([
66 | new LLMMessageText('System message'),
67 | new LLMMessageText('Another system message'),
68 | ])),
69 | LLMMessage::createFromUserString('Hello'),
70 | ]);
71 |
72 | $request = new LLMRequest(
73 | model: new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
74 | conversation: $conversation
75 | );
76 |
77 | $this->expectException(InvalidArgumentException::class);
78 | $this->expectExceptionMessage('System message supports only one content block');
79 |
80 | $encoder->encodeRequest($request);
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/tests/Client/OpenAI/OpenAICompatibleClientTest.php:
--------------------------------------------------------------------------------
1 | mockHandler = new MockHandler();
26 | $this->requestHistory = [];
27 | }
28 |
29 | public function testSendRequestAsyncUsesCustomBaseUrlAndModel(): void {
30 | $this->mockHandler->append(
31 | new Response(200, ['Content-Type' => 'application/json'], json_encode([
32 | 'choices' => [
33 | [
34 | 'message' => [
35 | 'content' => 'Hello from custom API!',
36 | 'role' => 'assistant',
37 | ],
38 | 'finish_reason' => 'stop',
39 | ],
40 | ],
41 | 'usage' => [
42 | 'prompt_tokens' => 5,
43 | 'completion_tokens' => 4,
44 | 'total_tokens' => 9,
45 | ],
46 | ]))
47 | );
48 |
49 | // Create a custom middleware that uses our mock handler
50 | $handlerStack = HandlerStack::create($this->mockHandler);
51 | $handlerStack->push(Middleware::history($this->requestHistory));
52 |
53 | $customMiddleware = function (callable $handler) use ($handlerStack) {
54 | return function ($request, array $options) use ($handlerStack) {
55 | return $handlerStack($request, $options);
56 | };
57 | };
58 |
59 | $client = new OpenAICompatibleClient(
60 | apiKey: 'test-api-key',
61 | baseUrl: 'https://custom.api.com/v1',
62 | cache: null,
63 | customHttpMiddleware: $customMiddleware
64 | );
65 | $conversation = new LLMConversation([LLMMessage::createFromUserString('Hello')]);
66 | $request = new LLMRequest(
67 | model: new LocalModel('custom-model'),
68 | conversation: $conversation
69 | );
70 | $response = $client->sendRequestAsync($request)->wait();
71 |
72 | // Verify the response
73 | $this->assertEquals('Hello from custom API!', $response->getLastText());
74 | $this->assertCount(1, $this->requestHistory);
75 | $httpRequest = $this->requestHistory[0]['request'];
76 | $this->assertEquals('https://custom.api.com/v1/chat/completions', (string) $httpRequest->getUri());
77 |
78 | // Verify the model was injected into the request body
79 | $body = json_decode((string) $httpRequest->getBody(), true);
80 | $this->assertArrayHasKey('model', $body);
81 | $this->assertEquals('custom-model', $body['model']);
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/.php-cs-fixer.dist.php:
--------------------------------------------------------------------------------
1 | in(__DIR__)
5 | ->exclude(['vendor', 'node_modules'])
6 | ->name('*.php');
7 |
8 | return (new PhpCsFixer\Config())
9 | ->setRules([
10 | '@PSR12' => true,
11 | 'array_syntax' => ['syntax' => 'short'],
12 | 'binary_operator_spaces' => true,
13 | 'blank_line_after_opening_tag' => false,
14 | 'blank_line_before_statement' => [
15 | 'statements' => ['return', 'throw', 'try']
16 | ],
17 | 'blank_line_after_namespace' => true,
18 | 'blank_lines_before_namespace' => true,
19 | 'braces' => [
20 | 'allow_single_line_anonymous_class_with_empty_body' => false,
21 | 'allow_single_line_closure' => false,
22 | 'position_after_anonymous_constructs' => 'same',
23 | 'position_after_control_structures' => 'same',
24 | 'position_after_functions_and_oop_constructs' => 'same',
25 | ],
26 | 'class_attributes_separation' => [
27 | 'elements' => [
28 | 'method' => 'one',
29 | 'property' => 'one',
30 | 'const' => 'one',
31 | ]
32 | ],
33 | 'concat_space' => ['spacing' => 'one'],
34 | 'control_structure_continuation_position' => ['position' => 'same_line'],
35 | 'curly_braces_position' => [
36 | 'classes_opening_brace' => 'same_line',
37 | 'functions_opening_brace' => 'same_line',
38 | 'anonymous_functions_opening_brace' => 'same_line',
39 | 'anonymous_classes_opening_brace' => 'same_line',
40 | ],
41 | 'declare_parentheses' => true,
42 | 'function_declaration' => [
43 | 'closure_function_spacing' => 'one',
44 | ],
45 | 'function_typehint_space' => true,
46 | 'include' => true,
47 | 'indentation_type' => true,
48 | 'method_argument_space' => [
49 | 'on_multiline' => 'ensure_fully_multiline',
50 | 'keep_multiple_spaces_after_comma' => false
51 | ],
52 | 'method_chaining_indentation' => false,
53 | 'multiline_whitespace_before_semicolons' => [
54 | 'strategy' => 'no_multi_line',
55 | ],
56 | 'no_extra_blank_lines' => [
57 | 'tokens' => [
58 | 'extra',
59 | 'throw',
60 | 'use',
61 | 'use_trait',
62 | ]
63 | ],
64 | 'no_spaces_after_function_name' => true,
65 | 'no_spaces_around_offset' => true,
66 | 'no_spaces_inside_parenthesis' => true,
67 | 'no_trailing_whitespace' => true,
68 | 'no_trailing_whitespace_in_comment' => true,
69 | 'operator_linebreak' => [
70 | 'only_booleans' => true,
71 | 'position' => 'beginning',
72 | ],
73 | 'single_line_comment_style' => [
74 | 'comment_types' => ['hash']
75 | ],
76 | 'single_space_after_construct' => true,
77 | 'space_after_semicolon' => true,
78 | 'switch_case_semicolon_to_colon' => true,
79 | 'switch_case_space' => true,
80 | 'ternary_operator_spaces' => true,
81 | 'whitespace_after_comma_in_array' => true,
82 | 'trailing_comma_in_multiline' => true,
83 | ])
84 | ->setIndent(" ")
85 | ->setLineEnding("\n")
86 | ->setFinder($finder);
87 |
--------------------------------------------------------------------------------
/src/Http/HttpClientFactory.php:
--------------------------------------------------------------------------------
1 | array_merge($headers, [
21 | 'Accept-encoding' => 'gzip',
22 | ]),
23 | ];
24 |
25 | $handler = HandlerStack::create();
26 | if ($customMiddleware) {
27 | $handler->push($customMiddleware);
28 | }
29 |
30 | self::addCacheMiddleware($handler, $cache);
31 |
32 | self::addRetryMiddleware($handler);
33 |
34 | $options['handler'] = $handler;
35 |
36 | return new Client($options);
37 | }
38 |
39 | private static function addCacheMiddleware(HandlerStack $handler, ?CacheInterface $cache): void {
40 | if ($cache) {
41 | $handler->push(function (callable $handler) use ($cache): callable {
42 | return static function (RequestInterface $request, array $options) use (&$handler, $cache) {
43 | $response = $cache->fetch($request);
44 |
45 | if ($response) {
46 | return Create::promiseFor($response);
47 | }
48 |
49 | $requestStart = microtime(true);
50 | /** @var PromiseInterface $promise */
51 | $promise = $handler($request, $options);
52 |
53 | return $promise->then(
54 | function (ResponseInterface $response) use ($request, $cache, $requestStart) {
55 | $response = $response->withHeader('X-Request-Duration-ms', (string) round((microtime(true) - $requestStart) * 1000));
56 | if ($response->getStatusCode() >= 200 && $response->getStatusCode() < 300) {
57 | $cache->store($request, $response);
58 | }
59 |
60 | return $response;
61 | }
62 | );
63 | };
64 | });
65 | }
66 | }
67 |
68 | private static function addRetryMiddleware(HandlerStack $handler): void {
69 | $decider = static function (int $retries, RequestInterface $request, ?ResponseInterface $response = null): bool {
70 | return
71 | $retries < self::MAX_RETRIES
72 | && null !== $response
73 | && in_array($response->getStatusCode(), [429, 529, 500, 502, 503, 504], true);
74 | };
75 |
76 | $delay = static function (int $retries, ResponseInterface $response): int {
77 | if (!$response->hasHeader('Retry-After')) {
78 | return RetryMiddleware::exponentialDelay($retries);
79 | }
80 |
81 | $retryAfter = $response->getHeaderLine('Retry-After');
82 |
83 | if (!is_numeric($retryAfter)) {
84 | $retryAfter = (new \DateTime($retryAfter))->getTimestamp() - time();
85 | }
86 |
87 | return (int) $retryAfter * 1000;
88 | };
89 |
90 |
91 | $handler->push(Middleware::retry($decider, $delay));
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/src/Client/Gemini/GeminiClient.php:
--------------------------------------------------------------------------------
1 | 'application/json',
35 | ];
36 | }
37 |
38 | private function getHttpClient(): Client {
39 | if (self::$testHttpClient !== null) {
40 | return self::$testHttpClient;
41 | }
42 |
43 | if (!$this->httpClient) {
44 | $this->httpClient = HttpClientFactory::createClient($this->customHttpMiddleware, null, $this->getHeaders());
45 | }
46 |
47 | return $this->httpClient;
48 | }
49 |
50 | private function getCachedHttpClient(): Client {
51 | if (self::$testHttpClient !== null) {
52 | return self::$testHttpClient;
53 | }
54 |
55 | if (!$this->cache) {
56 | return $this->getHttpClient();
57 | }
58 | if (!$this->cachedHttpClient) {
59 | $this->cachedHttpClient = HttpClientFactory::createClient($this->customHttpMiddleware, $this->cache, $this->getHeaders());
60 | }
61 |
62 | return $this->cachedHttpClient;
63 | }
64 |
65 | private function sendCachedRequestAsync(RequestInterface $httpRequest): PromiseInterface {
66 | return $this->getCachedHttpClient()->sendAsync($httpRequest)->then(function (ResponseInterface $response) {
67 | return new ModelResponse(json_decode((string) $response->getBody(), true, 512, JSON_THROW_ON_ERROR), (int) $response->getHeaderLine('X-Request-Duration-ms'));
68 | });
69 | }
70 |
71 | public function sendRequestAsync(LLMRequest $request): PromiseInterface {
72 | return $this->sendCachedRequestAsync($this->getGenerateContentRequest($request))->then(function (ModelResponse $modelResponse) use ($request) {
73 | $encodedResponseOrRequest = $this->decodeResponse($request, $modelResponse);
74 | if ($encodedResponseOrRequest instanceof LLMResponse) {
75 | return $encodedResponseOrRequest;
76 | }
77 |
78 | return $this->sendRequestAsync($encodedResponseOrRequest);
79 | });
80 | }
81 |
82 | private function getGenerateContentRequest(LLMRequest $request): RequestInterface {
83 | $url = "{$this->apiEndpoint}/models/{$request->getModel()->getCode()}:generateContent?key={$this->apiKey}";
84 |
85 | return new Request('POST', $url, [
86 | 'Content-Type' => 'application/json',
87 | 'accept-encoding' => 'gzip',
88 | ], json_encode($this->encodeRequest($request), JSON_THROW_ON_ERROR));
89 | }
90 |
91 | public function getCode(): string {
92 | return self::CODE;
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/tests/Client/Anthropic/AnthropicEncoderMediaTest.php:
--------------------------------------------------------------------------------
1 | encodeRequest($request);
36 |
37 | // Verify content types
38 | $this->assertCount(2, $encoded['messages'][0]['content']);
39 |
40 | // Check text content
41 | $this->assertEquals('text', $encoded['messages'][0]['content'][0]['type']);
42 | $this->assertEquals('Look at this image:', $encoded['messages'][0]['content'][0]['text']);
43 |
44 | // Check image content
45 | $this->assertEquals('image', $encoded['messages'][0]['content'][1]['type']);
46 | $this->assertEquals('base64', $encoded['messages'][0]['content'][1]['source']['type']);
47 | $this->assertEquals('image/jpeg', $encoded['messages'][0]['content'][1]['source']['media_type']);
48 | $this->assertEquals('imagedata123==', $encoded['messages'][0]['content'][1]['source']['data']);
49 |
50 | // Check cache control
51 | $this->assertArrayHasKey('cache_control', $encoded['messages'][0]['content'][1]);
52 | $this->assertEquals(['type' => 'ephemeral'], $encoded['messages'][0]['content'][1]['cache_control']);
53 | }
54 |
55 | public function testPdfContent(): void {
56 | $encoder = new AnthropicEncoder();
57 |
58 | // Create a message with PDF content
59 | $userMessage = LLMMessage::createFromUser(new LLMMessageContents([
60 | new LLMMessageText('Read this PDF:'),
61 | new LLMMessagePdf('base64', 'pdfdata123==', false),
62 | ]));
63 |
64 | $conversation = new LLMConversation([$userMessage]);
65 |
66 | $request = new LLMRequest(
67 | model: new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
68 | conversation: $conversation
69 | );
70 |
71 | $encoded = $encoder->encodeRequest($request);
72 |
73 | // Verify content types
74 | $this->assertCount(2, $encoded['messages'][0]['content']);
75 |
76 | // Check PDF content
77 | $this->assertEquals('document', $encoded['messages'][0]['content'][1]['type']);
78 | $this->assertEquals('base64', $encoded['messages'][0]['content'][1]['source']['type']);
79 | $this->assertEquals('application/pdf', $encoded['messages'][0]['content'][1]['source']['media_type']);
80 | $this->assertEquals('pdfdata123==', $encoded['messages'][0]['content'][1]['source']['data']);
81 |
82 | // Confirm no cache control since caching is not enabled
83 | $this->assertArrayNotHasKey('cache_control', $encoded['messages'][0]['content'][1]);
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/src/Message/LLMMessageContents.php:
--------------------------------------------------------------------------------
1 |
13 | * @implements ArrayAccess
14 | */
15 | class LLMMessageContents implements JsonSerializable, Iterator, ArrayAccess, Countable {
16 | public function __construct(private array $messages, private bool $isError = false) {
17 | foreach ($this->messages as $message) {
18 | if (!$message instanceof LLMMessageContent) {
19 | throw new InvalidArgumentException('All messages must implement LLMMessageContent interface - ' . get_class($message) . ' does not.');
20 | }
21 | }
22 | }
23 |
24 | /**
25 | * @return LLMMessageContent[]
26 | */
27 | public function getMessages(): array {
28 | return $this->messages;
29 | }
30 |
31 | public static function fromJson(array $data): self {
32 | /** @var LLMMessageContent[] $content */
33 | $content = [];
34 | foreach ($data as $item) {
35 | $class = $item['class'];
36 | if (!is_subclass_of($class, LLMMessageContent::class)) {
37 | throw new InvalidArgumentException("Class $class does not implement LLMMessageContent");
38 | }
39 | $result = $class::fromJson($item['data']);
40 |
41 | // Ensure the result implements LLMMessageContent
42 | if (!($result instanceof LLMMessageContent)) {
43 | throw new InvalidArgumentException("Class $class::fromJson() does not return LLMMessageContent");
44 | }
45 |
46 | $content[] = $result;
47 | }
48 |
49 | return new self($content);
50 | }
51 |
52 | public function jsonSerialize(): array {
53 | return array_map(static fn(LLMMessageContent $content) => ['class' => $content::class, 'data' => $content], $this->messages);
54 | }
55 |
56 | public static function fromString(string $content): self {
57 | return new self([new LLMMessageText($content)]);
58 | }
59 |
60 | public static function fromErrorString(string $content): self {
61 | return new self([new LLMMessageText($content)], true);
62 | }
63 |
64 | public static function fromArrayData(array $content): self {
65 | return new self([new LLMMessageArrayData($content)]);
66 | }
67 |
68 | public function current(): LLMMessageContent {
69 | return current($this->messages);
70 | }
71 |
72 | public function next(): void {
73 | next($this->messages);
74 | }
75 |
76 | public function key(): int {
77 | return key($this->messages);
78 | }
79 |
80 | public function valid(): bool {
81 | return key($this->messages) !== null;
82 | }
83 |
84 | public function rewind(): void {
85 | reset($this->messages);
86 | }
87 |
88 | public function offsetExists(mixed $offset): bool {
89 | return array_key_exists($offset, $this->messages);
90 | }
91 |
92 | public function offsetGet(mixed $offset): LLMMessageContent {
93 | return $this->messages[$offset];
94 | }
95 |
96 | public function offsetSet(mixed $offset, mixed $value): void {
97 | throw new InvalidArgumentException('Messages are readonly.');
98 | }
99 |
100 | public function offsetUnset(mixed $offset): void {
101 | throw new InvalidArgumentException('Messages are readonly.');
102 | }
103 |
104 | public function count(): int {
105 | return count($this->messages);
106 | }
107 |
108 | public function isError(): bool {
109 | return $this->isError;
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/tests/Client/OpenAI/OpenAIEncoderErrorsTest.php:
--------------------------------------------------------------------------------
1 | encoder = new OpenAIEncoder();
25 | }
26 |
27 | public function testUnsupportedMessageTypeThrowsException(): void {
28 | // Create a conversation with an unsupported message type
29 | $conversation = new LLMConversation([
30 | LLMMessage::createFromUser(new LLMMessageContents([
31 | new LLMMessageReasoning('This is reasoning', 'sig123', false),
32 | ])),
33 | ]);
34 |
35 | $request = new LLMRequest(
36 | model: new GPT41(GPT41::VERSION_2025_04_14),
37 | conversation: $conversation
38 | );
39 |
40 | $this->expectException(InvalidArgumentException::class);
41 | $this->expectExceptionMessage('Unsupported message content type');
42 |
43 | $this->encoder->encodeRequest($request);
44 | }
45 |
46 | public function testUnsupportedReasoningConfigThrowsException(): void {
47 | // Create a request with unsupported reasoning config
48 | $conversation = new LLMConversation([
49 | LLMMessage::createFromUserString('Hello'),
50 | ]);
51 |
52 | $request = new LLMRequest(
53 | model: new GPT41(GPT41::VERSION_2025_04_14),
54 | conversation: $conversation,
55 | reasoningConfig: new ReasoningBudget(1000) // OpenAI only supports ReasoningEffort
56 | );
57 |
58 | $this->expectException(InvalidArgumentException::class);
59 | $this->expectExceptionMessage('Unsupported reasoning config type');
60 |
61 | $this->encoder->encodeRequest($request);
62 | }
63 |
64 | public function testReasoningEffortValues(): void {
65 | // Test all enum values for ReasoningEffort
66 | $conversation = new LLMConversation([
67 | LLMMessage::createFromUserString('Hello'),
68 | ]);
69 |
70 | // Test with LOW
71 | $requestLow = new LLMRequest(
72 | model: new GPT41(GPT41::VERSION_2025_04_14),
73 | conversation: $conversation,
74 | reasoningConfig: ReasoningEffort::LOW
75 | );
76 |
77 | $encodedLow = $this->encoder->encodeRequest($requestLow);
78 | $this->assertEquals('low', $encodedLow['reasoning_effort']);
79 |
80 | // Test with MEDIUM
81 | $requestMedium = new LLMRequest(
82 | model: new GPT41(GPT41::VERSION_2025_04_14),
83 | conversation: $conversation,
84 | reasoningConfig: ReasoningEffort::MEDIUM
85 | );
86 |
87 | $encodedMedium = $this->encoder->encodeRequest($requestMedium);
88 | $this->assertEquals('medium', $encodedMedium['reasoning_effort']);
89 |
90 | // Test with HIGH
91 | $requestHigh = new LLMRequest(
92 | model: new GPT41(GPT41::VERSION_2025_04_14),
93 | conversation: $conversation,
94 | reasoningConfig: ReasoningEffort::HIGH
95 | );
96 |
97 | $encodedHigh = $this->encoder->encodeRequest($requestHigh);
98 | $this->assertEquals('high', $encodedHigh['reasoning_effort']);
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/src/LLMRequest.php:
--------------------------------------------------------------------------------
1 | conversation;
38 | }
39 |
40 | public function getModel(): ModelInterface {
41 | return $this->model;
42 | }
43 |
44 | public function getTemperature(): float {
45 | return $this->temperature;
46 | }
47 |
48 | public function getMaxTokens(): int {
49 | return $this->maxTokens;
50 | }
51 |
52 | public function getStopSequences(): array {
53 | return $this->stopSequences;
54 | }
55 |
56 | /**
57 | * @return ToolDefinition[]
58 | */
59 | public function getTools(): array {
60 | return $this->tools;
61 | }
62 |
63 | public function withMessage(LLMMessage $message): self {
64 | $clone = clone $this;
65 | $clone->conversation = $this->conversation->withMessage($message);
66 |
67 | return $clone;
68 | }
69 |
70 | public function getPreviousInputTokens(): int {
71 | return $this->previousInputTokens;
72 | }
73 |
74 | public function getPreviousOutputTokens(): int {
75 | return $this->previousOutputTokens;
76 | }
77 |
78 | public function getPreviousMaximumOutputTokens(): int {
79 | return $this->previousMaximumOutputTokens;
80 | }
81 |
82 | public function getPreviousInputCostUSD(): float {
83 | return $this->previousInputCostUSD;
84 | }
85 |
86 | public function getPreviousOutputCostUSD(): float {
87 | return $this->previousOutputCostUSD;
88 | }
89 |
90 | public function getPreviousTimeMs(): int {
91 | return $this->previousTimeMs;
92 | }
93 |
94 | public function withCost(int $inputTokens, int $outputTokens, float $previousInputCostUSD, float $previousOutputCostUSD): self {
95 | $clone = clone $this;
96 |
97 | $clone->previousInputTokens += $inputTokens;
98 | $clone->previousOutputTokens += $outputTokens;
99 | if ($outputTokens > $this->previousMaximumOutputTokens) {
100 | $clone->previousMaximumOutputTokens = $outputTokens;
101 | }
102 | $clone->previousInputCostUSD += $previousInputCostUSD;
103 | $clone->previousOutputCostUSD += $previousOutputCostUSD;
104 |
105 | return $clone;
106 | }
107 |
108 | public function withTime(int $timeMs): self {
109 | $clone = clone $this;
110 | $clone->previousTimeMs += $timeMs;
111 |
112 | return $clone;
113 | }
114 |
115 |
116 |
117 | public function getLastMessage(): LLMMessage {
118 | return $this->getConversation()->getMessages()[count($this->getConversation()->getMessages()) - 1];
119 | }
120 |
121 | public function getReasoningConfig(): ReasoningConfig|ReasoningEffort|null {
122 | return $this->reasoningConfig;
123 | }
124 |
125 | }
126 |
--------------------------------------------------------------------------------
/tests/Client/Gemini/GeminiEncoderTextTest.php:
--------------------------------------------------------------------------------
1 | encoder = new GeminiEncoder();
20 | }
21 |
22 | public function testSimpleTextRequest(): void {
23 | // Create a simple request with text only
24 | $conversation = new LLMConversation([
25 | LLMMessage::createFromSystemString('You are a helpful assistant.'),
26 | LLMMessage::createFromUserString('Hello, how are you?'),
27 | ]);
28 |
29 | $request = new LLMRequest(
30 | model: new Gemini20Flash(),
31 | conversation: $conversation,
32 | temperature: 0.7,
33 | maxTokens: 1000
34 | );
35 |
36 | $encoded = $this->encoder->encodeRequest($request);
37 |
38 | // Verify encoded structure
39 | $this->assertEquals(0.7, $encoded['generationConfig']['temperature']);
40 | $this->assertEquals(1000, $encoded['generationConfig']['maxOutputTokens']);
41 |
42 | // Verify system instruction
43 | $this->assertArrayHasKey('systemInstruction', $encoded);
44 | $this->assertEquals('You are a helpful assistant.', $encoded['systemInstruction']['parts'][0]['text']);
45 |
46 | // Verify messages structure (should only include user message, not system which is handled differently)
47 | $this->assertCount(1, $encoded['contents']);
48 |
49 | // Check user message
50 | $this->assertEquals('user', $encoded['contents'][0]['role']);
51 | $this->assertIsArray($encoded['contents'][0]['parts']);
52 | $this->assertCount(1, $encoded['contents'][0]['parts']);
53 | $this->assertEquals('Hello, how are you?', $encoded['contents'][0]['parts'][0]['text']);
54 | }
55 |
56 | public function testMultipleMessagesInConversation(): void {
57 | // Create a conversation with multiple messages
58 | $conversation = new LLMConversation([
59 | LLMMessage::createFromUserString('What is machine learning?'),
60 | LLMMessage::createFromAssistantString('Machine learning is a field of AI...'),
61 | LLMMessage::createFromUserString('Can you provide some examples?'),
62 | ]);
63 |
64 | $request = new LLMRequest(
65 | model: new Gemini20Flash(),
66 | conversation: $conversation
67 | );
68 |
69 | $encoded = $this->encoder->encodeRequest($request);
70 |
71 | // Verify message count
72 | $this->assertCount(3, $encoded['contents']);
73 |
74 | // Verify message roles (in Gemini, assistant is "model")
75 | $this->assertEquals('user', $encoded['contents'][0]['role']);
76 | $this->assertEquals('model', $encoded['contents'][1]['role']);
77 | $this->assertEquals('user', $encoded['contents'][2]['role']);
78 |
79 | // Verify messages content
80 | $this->assertEquals('What is machine learning?', $encoded['contents'][0]['parts'][0]['text']);
81 | $this->assertEquals('Machine learning is a field of AI...', $encoded['contents'][1]['parts'][0]['text']);
82 | $this->assertEquals('Can you provide some examples?', $encoded['contents'][2]['parts'][0]['text']);
83 | }
84 |
85 | public function testRequestWithStopSequences(): void {
86 | // Create a request with stop sequences
87 | $conversation = new LLMConversation([
88 | LLMMessage::createFromUserString('Tell me a story'),
89 | ]);
90 |
91 | $request = new LLMRequest(
92 | model: new Gemini20Flash(),
93 | conversation: $conversation,
94 | stopSequences: ['END', 'FINISH']
95 | );
96 |
97 | $encoded = $this->encoder->encodeRequest($request);
98 |
99 | // Verify stop sequences
100 | $this->assertArrayHasKey('generationConfig', $encoded);
101 | $this->assertArrayHasKey('stopSequences', $encoded['generationConfig']);
102 | $this->assertCount(2, $encoded['generationConfig']['stopSequences']);
103 | $this->assertEquals(['END', 'FINISH'], $encoded['generationConfig']['stopSequences']);
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/tests/Client/Gemini/GeminiEncoderMediaTest.php:
--------------------------------------------------------------------------------
1 | encoder = new GeminiEncoder();
24 | }
25 |
26 | public function testImageRequest(): void {
27 | // Create a request with an image
28 | $conversation = new LLMConversation([
29 | LLMMessage::createFromUser(new LLMMessageContents([
30 | new LLMMessageText('What is in this image?'),
31 | new LLMMessageImage('base64', 'image/jpeg', 'base64encodeddata'),
32 | ])),
33 | ]);
34 |
35 | $request = new LLMRequest(
36 | model: new Gemini20Flash(),
37 | conversation: $conversation
38 | );
39 |
40 | $encoded = $this->encoder->encodeRequest($request);
41 |
42 | // Verify message structure
43 | $this->assertCount(1, $encoded['contents']);
44 |
45 | // Verify user message with image
46 | $this->assertEquals('user', $encoded['contents'][0]['role']);
47 | $this->assertCount(2, $encoded['contents'][0]['parts']);
48 |
49 | // Verify text part
50 | $this->assertEquals('What is in this image?', $encoded['contents'][0]['parts'][0]['text']);
51 |
52 | // Verify image part
53 | $this->assertArrayHasKey('inline_data', $encoded['contents'][0]['parts'][1]);
54 | $this->assertEquals('image/jpeg', $encoded['contents'][0]['parts'][1]['inline_data']['mime_type']);
55 | $this->assertEquals('base64encodeddata', $encoded['contents'][0]['parts'][1]['inline_data']['data']);
56 | }
57 |
58 | public function testMixedMediaRequest(): void {
59 | // Create a request with text, then an image, then more text
60 | $conversation = new LLMConversation([
61 | LLMMessage::createFromUser(new LLMMessageContents([
62 | new LLMMessageText('Here is a picture of a cat:'),
63 | new LLMMessageImage('base64', 'image/jpeg', 'base64encodedcatimage'),
64 | new LLMMessageText('What breed is it?'),
65 | ])),
66 | ]);
67 |
68 | $request = new LLMRequest(
69 | model: new Gemini20Flash(),
70 | conversation: $conversation
71 | );
72 |
73 | $encoded = $this->encoder->encodeRequest($request);
74 |
75 | // Verify message structure
76 | $this->assertCount(1, $encoded['contents']);
77 |
78 | // Verify user message parts
79 | $this->assertEquals('user', $encoded['contents'][0]['role']);
80 | $this->assertCount(3, $encoded['contents'][0]['parts']);
81 |
82 | // Verify first text part
83 | $this->assertEquals('Here is a picture of a cat:', $encoded['contents'][0]['parts'][0]['text']);
84 |
85 | // Verify image part
86 | $this->assertArrayHasKey('inline_data', $encoded['contents'][0]['parts'][1]);
87 | $this->assertEquals('image/jpeg', $encoded['contents'][0]['parts'][1]['inline_data']['mime_type']);
88 | $this->assertEquals('base64encodedcatimage', $encoded['contents'][0]['parts'][1]['inline_data']['data']);
89 |
90 | // Verify second text part
91 | $this->assertEquals('What breed is it?', $encoded['contents'][0]['parts'][2]['text']);
92 | }
93 |
94 | public function testPdfRequestShouldThrowException(): void {
95 | // PDF is not supported by Gemini directly
96 | $conversation = new LLMConversation([
97 | LLMMessage::createFromUser(new LLMMessageContents([
98 | new LLMMessageText('Analyze this PDF:'),
99 | new LLMMessagePdf('base64', 'base64encodedpdf'),
100 | ])),
101 | ]);
102 |
103 | $request = new LLMRequest(
104 | model: new Gemini20Flash(),
105 | conversation: $conversation
106 | );
107 |
108 | $this->expectException(InvalidArgumentException::class);
109 | $this->expectExceptionMessage('PDF content type not supported for Gemini');
110 |
111 | $this->encoder->encodeRequest($request);
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/tests/Client/OpenAI/OpenAIEncoderTextTest.php:
--------------------------------------------------------------------------------
1 | encoder = new OpenAIEncoder();
20 | }
21 |
22 | public function testSimpleTextRequest(): void {
23 | // Create a simple request with text only
24 | $conversation = new LLMConversation([
25 | LLMMessage::createFromSystemString('You are a helpful assistant.'),
26 | LLMMessage::createFromUserString('Hello, how are you?'),
27 | ]);
28 |
29 | $request = new LLMRequest(
30 | model: new GPT41(GPT41::VERSION_2025_04_14),
31 | conversation: $conversation,
32 | temperature: 0.7,
33 | maxTokens: 1000
34 | );
35 |
36 | $encoded = $this->encoder->encodeRequest($request);
37 |
38 | // Verify encoded structure
39 | $this->assertEquals('gpt-4.1-2025-04-14', $encoded['model']);
40 | $this->assertEquals(1000, $encoded['max_completion_tokens']);
41 | $this->assertEquals(0.7, $encoded['temperature']);
42 |
43 | // Verify messages structure
44 | $this->assertCount(2, $encoded['messages']);
45 |
46 | // Check system message
47 | $this->assertEquals('system', $encoded['messages'][0]['role']);
48 | $this->assertIsArray($encoded['messages'][0]['content']);
49 | $this->assertCount(1, $encoded['messages'][0]['content']);
50 | $this->assertEquals('text', $encoded['messages'][0]['content'][0]['type']);
51 | $this->assertEquals('You are a helpful assistant.', $encoded['messages'][0]['content'][0]['text']);
52 |
53 | // Check user message
54 | $this->assertEquals('user', $encoded['messages'][1]['role']);
55 | $this->assertIsArray($encoded['messages'][1]['content']);
56 | $this->assertCount(1, $encoded['messages'][1]['content']);
57 | $this->assertEquals('text', $encoded['messages'][1]['content'][0]['type']);
58 | $this->assertEquals('Hello, how are you?', $encoded['messages'][1]['content'][0]['text']);
59 | }
60 |
61 | public function testMultipleMessagesInConversation(): void {
62 | // Create a conversation with multiple messages
63 | $conversation = new LLMConversation([
64 | LLMMessage::createFromUserString('What is machine learning?'),
65 | LLMMessage::createFromAssistantString('Machine learning is a field of AI...'),
66 | LLMMessage::createFromUserString('Can you provide some examples?'),
67 | ]);
68 |
69 | $request = new LLMRequest(
70 | model: new GPT41(GPT41::VERSION_2025_04_14),
71 | conversation: $conversation
72 | );
73 |
74 | $encoded = $this->encoder->encodeRequest($request);
75 |
76 | // Verify message count
77 | $this->assertCount(3, $encoded['messages']);
78 |
79 | // Verify message roles
80 | $this->assertEquals('user', $encoded['messages'][0]['role']);
81 | $this->assertEquals('assistant', $encoded['messages'][1]['role']);
82 | $this->assertEquals('user', $encoded['messages'][2]['role']);
83 |
84 | // Verify messages content
85 | $this->assertEquals('What is machine learning?', $encoded['messages'][0]['content'][0]['text']);
86 | $this->assertEquals('Machine learning is a field of AI...', $encoded['messages'][1]['content'][0]['text']);
87 | $this->assertEquals('Can you provide some examples?', $encoded['messages'][2]['content'][0]['text']);
88 | }
89 |
90 | public function testRequestWithStopSequences(): void {
91 | // Create a request with stop sequences
92 | $conversation = new LLMConversation([
93 | LLMMessage::createFromUserString('Tell me a story'),
94 | ]);
95 |
96 | $request = new LLMRequest(
97 | model: new GPT41(GPT41::VERSION_2025_04_14),
98 | conversation: $conversation,
99 | stopSequences: ['END', 'FINISH']
100 | );
101 |
102 | $encoded = $this->encoder->encodeRequest($request);
103 |
104 | // Verify stop sequences
105 | $this->assertArrayHasKey('stop', $encoded);
106 | $this->assertCount(2, $encoded['stop']);
107 | $this->assertEquals(['END', 'FINISH'], $encoded['stop']);
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/tests/Client/OpenAI/OpenAIEncoderMediaTest.php:
--------------------------------------------------------------------------------
1 | encoder = new OpenAIEncoder();
22 | }
23 |
24 | public function testImageContent(): void {
25 | // Create a message with image content
26 | $userMessage = LLMMessage::createFromUser(new LLMMessageContents([
27 | new LLMMessageText('Look at this image:'),
28 | new LLMMessageImage('base64', 'image/jpeg', 'imagedata123==', false),
29 | ]));
30 |
31 | $conversation = new LLMConversation([$userMessage]);
32 |
33 | $request = new LLMRequest(
34 | model: new GPT41(GPT41::VERSION_2025_04_14),
35 | conversation: $conversation
36 | );
37 |
38 | $encoded = $this->encoder->encodeRequest($request);
39 |
40 | // Verify content types
41 | $this->assertCount(1, $encoded['messages']);
42 | $this->assertEquals('user', $encoded['messages'][0]['role']);
43 | $this->assertCount(2, $encoded['messages'][0]['content']);
44 |
45 | // Check text content
46 | $this->assertEquals('text', $encoded['messages'][0]['content'][0]['type']);
47 | $this->assertEquals('Look at this image:', $encoded['messages'][0]['content'][0]['text']);
48 |
49 | // Check image content
50 | $this->assertEquals('image_url', $encoded['messages'][0]['content'][1]['type']);
51 | $this->assertStringStartsWith('data:image/jpeg;base64,', $encoded['messages'][0]['content'][1]['image_url']['url']);
52 | $this->assertStringContainsString('imagedata123==', $encoded['messages'][0]['content'][1]['image_url']['url']);
53 | }
54 |
55 | public function testMixedTextAndImageContent(): void {
56 | // Create a conversation with mixed content types across multiple messages
57 | $systemMessage = LLMMessage::createFromSystemString('You are a helpful image analyzer.');
58 |
59 | $userFirstMessage = LLMMessage::createFromUser(new LLMMessageContents([
60 | new LLMMessageText('Analyze this image:'),
61 | new LLMMessageImage('base64', 'image/png', 'pngdata123==', false),
62 | ]));
63 |
64 | $assistantResponse = LLMMessage::createFromAssistantString('This image appears to be a diagram of a process.');
65 |
66 | $userFollowUp = LLMMessage::createFromUserString('Can you explain in more detail?');
67 |
68 | $conversation = new LLMConversation([
69 | $systemMessage,
70 | $userFirstMessage,
71 | $assistantResponse,
72 | $userFollowUp,
73 | ]);
74 |
75 | $request = new LLMRequest(
76 | model: new GPT41(GPT41::VERSION_2025_04_14),
77 | conversation: $conversation
78 | );
79 |
80 | $encoded = $this->encoder->encodeRequest($request);
81 |
82 | // Verify message count
83 | $this->assertCount(4, $encoded['messages']);
84 |
85 | // Check system message
86 | $this->assertEquals('system', $encoded['messages'][0]['role']);
87 | $this->assertEquals('You are a helpful image analyzer.', $encoded['messages'][0]['content'][0]['text']);
88 |
89 | // Check first user message with image
90 | $this->assertEquals('user', $encoded['messages'][1]['role']);
91 | $this->assertCount(2, $encoded['messages'][1]['content']);
92 | $this->assertEquals('text', $encoded['messages'][1]['content'][0]['type']);
93 | $this->assertEquals('image_url', $encoded['messages'][1]['content'][1]['type']);
94 | $this->assertStringStartsWith('data:image/png;base64,', $encoded['messages'][1]['content'][1]['image_url']['url']);
95 | $this->assertStringContainsString('pngdata123==', $encoded['messages'][1]['content'][1]['image_url']['url']);
96 |
97 | // Check assistant response
98 | $this->assertEquals('assistant', $encoded['messages'][2]['role']);
99 | $this->assertEquals('This image appears to be a diagram of a process.', $encoded['messages'][2]['content'][0]['text']);
100 |
101 | // Check follow-up
102 | $this->assertEquals('user', $encoded['messages'][3]['role']);
103 | $this->assertEquals('Can you explain in more detail?', $encoded['messages'][3]['content'][0]['text']);
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/src/MarkdownFormatter.php:
--------------------------------------------------------------------------------
1 | ', '<'], ['>', '<'], $content->getText());
19 | }
20 |
21 | if ($content instanceof LLMMessageArrayData) {
22 | return "```json\n" . json_encode($content->getData(), JSON_THROW_ON_ERROR | JSON_PRETTY_PRINT | JSON_UNESCAPED_UNICODE | JSON_UNESCAPED_SLASHES) . "\n" . "```\n";
23 | }
24 |
25 | if ($content instanceof LLMMessageReasoning) {
26 | return "**Reasoning:**\n\n" . $content->getText();
27 | }
28 |
29 | if ($content instanceof LLMMessageImage) {
30 | $text = '**Image** (' . $content->getMediaType() . ' ' . $this->formatByteSize(strlen(base64_decode($content->getData()))) . ')';
31 | $text .= "\n\n";
32 | $text .= ' . ';' . $content->getEncoding() . ',' . $content->getData() . ')';
33 |
34 | return $text;
35 | }
36 |
37 | if ($content instanceof LLMMessagePdf) {
38 | return '**PDF** (' . $this->formatByteSize(strlen(base64_decode($content->getData()))) . ')';
39 | }
40 |
41 | throw new RuntimeException('Unknown message content type');
42 | }
43 |
44 | public function responseToMarkdown(LLMRequest|LLMResponse $requestOrResponse): string {
45 | if ($requestOrResponse instanceof LLMRequest) {
46 | $request = $requestOrResponse;
47 | $response = null;
48 | } else {
49 | $request = $requestOrResponse->getRequest();
50 | $response = $requestOrResponse;
51 | }
52 |
53 | $markdown = ' - **Model:** ' . $request->getModel()->getCode() . "\n";
54 | $markdown .= ' - **Temperature:** ' . $request->getTemperature() . "\n";
55 | $markdown .= ' - **Max tokens:** ' . $request->getMaxTokens() . "\n";
56 |
57 | foreach ($request->getConversation()->getMessages() as $message) {
58 | if ($message->isUser()) {
59 | $markdown .= '## User:' . "\n";
60 | } elseif ($message->isSystem()) {
61 | $markdown .= '## User:' . "\n";
62 | } elseif ($message->isAssistant()) {
63 | $markdown .= '## Assistant:' . "\n";
64 | } else {
65 | throw new RuntimeException('Unknown message role');
66 | }
67 | foreach ($message->getContents() as $content) {
68 | if ($content instanceof LLMMessageToolUse) {
69 | $markdown .= '**Tool use:** ' . $content->getName() . ' (' . $content->getId() . ')' . "\n";
70 | $markdown .= "```json\n";
71 | $markdown .= json_encode($content->getInput(), JSON_THROW_ON_ERROR | JSON_PRETTY_PRINT) . "\n";
72 | $markdown .= "```";
73 | } elseif ($content instanceof LLMMessageToolResult) {
74 | $markdown .= "**Tool result:** " . $content->getId() . "\n\n";
75 | foreach ($content->getContent()->getMessages() as $toolContent) {
76 | $markdown .= $this->messageContentToString($toolContent);
77 | }
78 | } else {
79 | $markdown .= $this->messageContentToString($content);
80 | }
81 | $markdown .= "\n\n";
82 | }
83 | }
84 |
85 | $markdown .= "\n\n";
86 |
87 | if (isset($response)) {
88 | $markdown .= '----------------------';
89 | $markdown .= "\n\n";
90 |
91 | $price = $response->getInputPriceUsd() + $response->getOutputPriceUsd();
92 | $markdown .= "##### Total stats\n\n";
93 | $markdown .= 'Finished in ' . number_format($response->getTotalTimeMs() / 1000, 3, '.') . 's' .
94 | ', prompt tokens: ' . $response->getInputTokens() .
95 | ', completion tokens: ' . $response->getOutputTokens() .
96 | ', maximum completion tokens: ' . $response->getMaximumOutputTokens() .
97 | ', total tokens: ' . ($response->getInputTokens() + $response->getOutputTokens()) .
98 | ', price: ' . $this->formatPrice($price) .
99 | "\n\n";
100 | }
101 |
102 | return $markdown;
103 | }
104 |
105 | private function formatPrice(float $price): string {
106 | return '$' . number_format(round($price, 3), 3);
107 | }
108 |
109 | private function formatByteSize(int $size): string {
110 | $units = ['B', 'KB', 'MB', 'GB', 'TB'];
111 | $unit = 0;
112 | while ($size >= 1024 && $unit < count($units) - 1) {
113 | $size /= 1024;
114 | $unit++;
115 | }
116 |
117 | return round($size, 2) . ' ' . $units[$unit];
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/src/Client/LLMAgentClient.php:
--------------------------------------------------------------------------------
1 | runAsync($client, $request, $feedbackCallback)->wait();
26 | }
27 |
28 | /**
29 | * @return PromiseInterface
30 | */
31 | public function runAsync(LLMClient $client, LLMRequest $request, ?callable $feedbackCallback = null): PromiseInterface {
32 | $this->logger?->requestStarted($request);
33 |
34 | return $this->sendAndProcessRequest($client, $request, $feedbackCallback);
35 | }
36 |
37 | /**
38 | * Helper method that handles the full request-response flow including tool use
39 | *
40 | * @return PromiseInterface
41 | */
42 | private function sendAndProcessRequest(LLMClient $client, LLMRequest $request, ?callable $feedbackCallback): PromiseInterface {
43 | return $client->sendRequestAsync($request)->then(function (LLMResponse $response) use ($client, $request, $feedbackCallback) {
44 | $this->logger?->requestFinished($response);
45 |
46 | // First check for and handle any tool use - this has highest priority
47 | if ($response->getStopReason() === StopReason::TOOL_USE) {
48 | return $this->processToolUseResponse($response, $client, $request, $feedbackCallback);
49 | }
50 |
51 | // If no tool use, then process other response types (feedback)
52 | return $this->postProcessResponse($response, $client, $feedbackCallback);
53 | });
54 | }
55 |
56 | /**
57 | * Process a response that contains tool use requests
58 | *
59 | * @return PromiseInterface
60 | */
61 | private function processToolUseResponse(LLMResponse $response, LLMClient $client, LLMRequest $request, ?callable $feedbackCallback): PromiseInterface {
62 | $toolResponseContents = [];
63 |
64 | foreach ($response->getConversation()->getLastMessage()->getContents() as $content) {
65 | if ($content instanceof LLMMessageToolUse) {
66 | foreach ($request->getTools() as $tool) {
67 | if ($tool->getName() === $content->getName()) {
68 | $input = $content->getInput();
69 | $noContent = empty($input) && empty($tool->getInputSchema()['required']);
70 |
71 | if (!$noContent) {
72 | try {
73 | Schema::import(json_decode(json_encode($tool->getInputSchema())))->in(json_decode(json_encode($input)));
74 | } catch (Exception $e) {
75 | $toolResponseContents[] = Create::promiseFor(new LLMMessageToolResult(
76 | $content->getId(),
77 | LLMMessageContents::fromErrorString('ERROR: Input is not matching expected schema: ' . $e->getMessage())
78 | ));
79 | continue;
80 | }
81 | }
82 |
83 | $toolResponse = $tool->handle($input);
84 | if ($toolResponse instanceof LLMMessageContents) {
85 | $toolResponse = Create::promiseFor($toolResponse);
86 | }
87 | $toolResponseContents[] = $toolResponse->then(function (LLMMessageContents $response) use ($content) {
88 | return new LLMMessageToolResult($content->getId(), $response);
89 | });
90 | }
91 | }
92 | }
93 | }
94 |
95 | $newRequest = $response->getRequest()->withMessage(LLMMessage::createFromUser(new LLMMessageContents(Utils::unwrap($toolResponseContents))));
96 | $this->logger?->requestStarted($newRequest);
97 |
98 | // Use sendAndProcessRequest to ensure full processing of the response, including potential nested tool uses
99 | return $this->sendAndProcessRequest($client, $newRequest, $feedbackCallback);
100 | }
101 |
102 | private function postProcessResponse(LLMResponse $llmResponse, LLMClient $LLMClient, ?callable $feedbackCallback): PromiseInterface {
103 | $request = $llmResponse->getRequest();
104 |
105 | if ($feedbackCallback) {
106 | $feedback = $feedbackCallback($llmResponse);
107 | if ($feedback !== null) {
108 | if (!$feedback instanceof LLMMessage) {
109 | throw new InvalidArgumentException('Feedback callback must return an instance of LLMMessage');
110 | }
111 | $request = $request->withMessage($feedback);
112 |
113 | return $this->sendAndProcessRequest($LLMClient, $request, $feedbackCallback);
114 | }
115 | }
116 |
117 | return Create::promiseFor($llmResponse);
118 | }
119 |
120 | }
121 |
--------------------------------------------------------------------------------
/src/Client/Anthropic/AnthropicClient.php:
--------------------------------------------------------------------------------
1 | httpClient) {
29 | $this->httpClient = HttpClientFactory::createClient($this->customHttpMiddleware);
30 | }
31 |
32 | return $this->httpClient;
33 | }
34 |
35 | private function getCachedHttpClient(): Client {
36 | if (!$this->cache) {
37 | return $this->getHttpClient();
38 | }
39 | if (!$this->cachedHttpClient) {
40 | $this->cachedHttpClient = HttpClientFactory::createClient($this->customHttpMiddleware, $this->cache);
41 | }
42 |
43 | return $this->cachedHttpClient;
44 | }
45 |
46 | private function getHeaders(): array {
47 | $headers = [
48 | 'accept-encoding' => 'gzip',
49 | 'anthropic-version' => '2023-06-01',
50 | 'x-api-key' => $this->apiKey,
51 | ];
52 | if (!empty($this->betaFeatures)) {
53 | $headers['anthropic-beta'] = implode(',', $this->betaFeatures);
54 | }
55 |
56 | return $headers;
57 | }
58 |
59 | private function invokeModel(array $data): PromiseInterface {
60 | return $this->getCachedHttpClient()->postAsync('https://api.anthropic.com/v1/messages', [
61 | 'headers' => $this->getHeaders(),
62 | 'json' => $data,
63 | ])->then(function (ResponseInterface $response) {
64 | return new ModelResponse(json_decode((string) $response->getBody(), true, 512, JSON_THROW_ON_ERROR), (int) $response->getHeaderLine('X-Request-Duration-ms'));
65 | }, function (\Throwable $e) {
66 | if ($e instanceof ClientException && $e->getResponse()->getStatusCode() === 400) {
67 | $data = json_decode((string) $e->getResponse()->getBody(), true, 512, JSON_THROW_ON_ERROR);
68 | if (isset($data['type'], $data['error']['type'], $data['error']['message']) && $data['type'] === 'error') {
69 | throw new LLMClientException($data['error']['type'] . ': ' . $data['error']['message'], 400, $e);
70 | }
71 | }
72 | throw $e;
73 | });
74 | }
75 |
76 | public function sendRequestAsync(LLMRequest $request): PromiseInterface {
77 | return $this->invokeModel($this->encodeRequest($request))->then(function (ModelResponse $modelResponse) use ($request): LLMResponse|PromiseInterface {
78 | $encodedResponseOrRequest = $this->decodeResponse($request, $modelResponse);
79 | if ($encodedResponseOrRequest instanceof LLMResponse) {
80 | return $encodedResponseOrRequest;
81 | }
82 |
83 | return $this->sendRequestAsync($encodedResponseOrRequest);
84 | });
85 | }
86 |
87 | /**
88 | * @param LLMRequest[] $requests
89 | * @return string batch id
90 | */
91 | public function createBatch(array $requests): string {
92 | $params = [];
93 | foreach ($requests as $customId => $request) {
94 | $params[] = [
95 | 'custom_id' => $customId,
96 | 'params' => $this->encodeRequest($request),
97 | ];
98 | }
99 |
100 | $response = $this->getHttpClient()->post('https://api.anthropic.com/v1/messages/batches', [
101 | 'headers' => $this->getHeaders(),
102 | 'json' => [
103 | 'requests' => $params,
104 | ],
105 | ]);
106 |
107 | return json_decode((string) $response->getBody(), true, 512, JSON_THROW_ON_ERROR)['id'];
108 | }
109 |
110 | public function retrieveBatch(string $batchId): ?array {
111 | $response = json_decode($this->getHttpClient()->get('https://api.anthropic.com/v1/messages/batches/' . $batchId, [
112 | 'headers' => $this->getHeaders(),
113 | ])->getBody(), true, 512, JSON_THROW_ON_ERROR);
114 | if ($response['processing_status'] === 'in_progress') {
115 | return null;
116 | }
117 | if ($response['processing_status'] === 'ended') {
118 | $results = explode("\n", trim($this->getHttpClient()->get($response['results_url'], ['headers' => $this->getHeaders()])->getBody()));
119 | $responses = [];
120 | foreach ($results as $row) {
121 | $result = json_decode($row, true, 512, JSON_THROW_ON_ERROR);
122 | $content = '';
123 | foreach ($result['result']['message']['content'] as $contentPart) {
124 | if (is_string($contentPart)) {
125 | $content .= $contentPart;
126 | } elseif ($contentPart['type'] === 'text') {
127 | $content .= $contentPart['text'];
128 | }
129 | }
130 | $responses[$result['custom_id']] = $content;
131 | }
132 |
133 | return $responses;
134 | }
135 |
136 | throw new \RuntimeException('Unexpected batch status ' . $response['status'] . " - " . json_encode($response, JSON_THROW_ON_ERROR));
137 | }
138 |
139 | public function getCode(): string {
140 | return self::CODE;
141 | }
142 | }
143 |
--------------------------------------------------------------------------------
/docs/guides/multimodal.md:
--------------------------------------------------------------------------------
1 | # Multimodal Support
2 |
3 | PHP LLM supports multimodal AI agents that can process both text and other content types like images and PDFs alongside your prompts.
4 |
5 | ## Sending Images
6 |
7 | AI agents can analyze images using the `LLMMessageImage` class. Images must be base64-encoded.
8 |
9 | ### From File Path
10 |
11 | ```php
12 | run(
110 | client: $anthropic,
111 | request: new LLMRequest(
112 | model: new AnthropicClaude45Sonnet(AnthropicClaude45Sonnet::VERSION_20250929),
113 | conversation: new LLMConversation([
114 | LLMMessage::createFromUser(new LLMMessageContents([
115 | new LLMMessageText('What objects are in this image?'),
116 | new LLMMessageImage('base64', 'image/jpeg', $imageData)
117 | ]))
118 | ]),
119 | )
120 | );
121 |
122 | echo $response->getLastText();
123 | ```
124 |
125 | ## Combining Multiple Media
126 |
127 | You can include multiple images and/or PDFs in a single message:
128 |
129 | ```php
130 | 'object',
31 | 'properties' => [
32 | 'location' => [
33 | 'type' => 'string',
34 | 'description' => 'City name',
35 | ],
36 | ],
37 | 'required' => ['location'],
38 | ],
39 | fn() => [] // Empty handler for test
40 | );
41 |
42 | // Create a simple request with tool
43 | $conversation = new LLMConversation([
44 | LLMMessage::createFromUserString('What is the weather?'),
45 | ]);
46 |
47 | $request = new LLMRequest(
48 | model: new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
49 | conversation: $conversation,
50 | tools: [$weatherTool]
51 | );
52 |
53 | $encoded = $encoder->encodeRequest($request);
54 |
55 | // Verify tool config
56 | $this->assertArrayHasKey('tools', $encoded);
57 | $this->assertCount(1, $encoded['tools']);
58 |
59 | // Check tool properties
60 | $this->assertEquals('weather', $encoded['tools'][0]['name']);
61 | $this->assertEquals('Get current weather', $encoded['tools'][0]['description']);
62 | $this->assertArrayHasKey('input_schema', $encoded['tools'][0]);
63 |
64 | // Check schema definition
65 | $schema = $encoded['tools'][0]['input_schema'];
66 | $this->assertEquals('object', $schema['type']);
67 | $this->assertArrayHasKey('properties', $schema);
68 | $this->assertArrayHasKey('location', $schema['properties']);
69 |
70 | // Verify tool_choice
71 | $this->assertArrayHasKey('tool_choice', $encoded);
72 | $this->assertEquals('auto', $encoded['tool_choice']['type']);
73 | }
74 |
75 | public function testToolUseAndResults(): void {
76 | $encoder = new AnthropicEncoder();
77 |
78 | // Create a conversation with tool use and results
79 | $userMessage = LLMMessage::createFromUserString('What is 2+2?');
80 |
81 | $assistantMessage = LLMMessage::createFromAssistant(new LLMMessageContents([
82 | new LLMMessageReasoning('I should use the calculator', 'sig123', false),
83 | new LLMMessageToolUse('tool-abc', 'calculator', ['expression' => '2+2'], false),
84 | ]));
85 |
86 | $userToolResultMessage = LLMMessage::createFromUser(new LLMMessageContents([
87 | new LLMMessageToolResult('tool-abc', LLMMessageContents::fromArrayData(['result' => 4]), false),
88 | ]));
89 |
90 | $conversation = new LLMConversation([
91 | $userMessage,
92 | $assistantMessage,
93 | $userToolResultMessage,
94 | ]);
95 |
96 | $request = new LLMRequest(
97 | model: new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
98 | conversation: $conversation
99 | );
100 |
101 | $encoded = $encoder->encodeRequest($request);
102 |
103 | // Verify message count
104 | $this->assertCount(3, $encoded['messages']);
105 |
106 | // Check assistant message with tool use
107 | $this->assertEquals('assistant', $encoded['messages'][1]['role']);
108 | $this->assertCount(2, $encoded['messages'][1]['content']);
109 |
110 | // Check reasoning
111 | $this->assertEquals('thinking', $encoded['messages'][1]['content'][0]['type']);
112 | $this->assertEquals('I should use the calculator', $encoded['messages'][1]['content'][0]['thinking']);
113 | $this->assertEquals('sig123', $encoded['messages'][1]['content'][0]['signature']);
114 |
115 | // Check tool use
116 | $this->assertEquals('tool_use', $encoded['messages'][1]['content'][1]['type']);
117 | $this->assertEquals('tool-abc', $encoded['messages'][1]['content'][1]['id']);
118 | $this->assertEquals('calculator', $encoded['messages'][1]['content'][1]['name']);
119 | $this->assertEquals(['expression' => '2+2'], $encoded['messages'][1]['content'][1]['input']);
120 |
121 | // Check tool result
122 | $this->assertEquals('user', $encoded['messages'][2]['role']);
123 | $this->assertEquals('tool_result', $encoded['messages'][2]['content'][0]['type']);
124 | $this->assertEquals('tool-abc', $encoded['messages'][2]['content'][0]['tool_use_id']);
125 | $this->assertSame([['type' => 'text', 'text' => '{"result":4}']], $encoded['messages'][2]['content'][0]['content']);
126 | }
127 |
128 | public function testReasoningConfig(): void {
129 | $encoder = new AnthropicEncoder();
130 |
131 | // Create a request with reasoning budget
132 | $conversation = new LLMConversation([
133 | LLMMessage::createFromUserString('Solve this complex problem'),
134 | ]);
135 |
136 | $request = new LLMRequest(
137 | model: new AnthropicClaude35Sonnet(AnthropicClaude35Sonnet::VERSION_20241022),
138 | conversation: $conversation,
139 | reasoningConfig: new ReasoningBudget(2000)
140 | );
141 |
142 | $encoded = $encoder->encodeRequest($request);
143 |
144 | // Verify reasoning config
145 | $this->assertArrayHasKey('thinking', $encoded);
146 | $this->assertEquals('enabled', $encoded['thinking']['type']);
147 | $this->assertEquals(2000, $encoded['thinking']['budget_tokens']);
148 | }
149 | }
150 |
--------------------------------------------------------------------------------
/docs/guides/reasoning.md:
--------------------------------------------------------------------------------
1 | # Reasoning Models
2 |
3 | Reasoning models like OpenAI's o3 and o4 series spend additional computation time thinking through problems before responding. This makes them particularly effective for complex tasks requiring deep analysis, mathematics, coding, and logical reasoning.
4 |
5 | ## Overview
6 |
7 | Traditional language models generate responses token-by-token immediately. Reasoning models add an internal "thinking" phase where they:
8 | - Break down complex problems
9 | - Consider multiple approaches
10 | - Verify their reasoning
11 | - Refine their answers
12 |
13 | This results in more accurate responses for challenging tasks, at the cost of higher latency and token usage.
14 |
15 | ## Configuring Reasoning
16 |
17 | PHP LLM provides two ways to configure reasoning models:
18 |
19 | ### Reasoning Effort
20 |
21 | Control how much computational effort the model spends reasoning:
22 |
23 | ```php
24 | run(
74 | client: $openai,
75 | request: new LLMRequest(
76 | model: new GPTo3(GPTo3::VERSION_2025_04_16),
77 | conversation: new LLMConversation([
78 | LLMMessage::createFromUserString(
79 | 'A farmer has 17 sheep. All but 9 die. How many sheep are left alive?'
80 | )
81 | ]),
82 | reasoningEffort: ReasoningEffort::HIGH
83 | )
84 | );
85 |
86 | echo $response->getLastText(); // "9 sheep are left alive"
87 | ```
88 |
89 | ## When to Use Reasoning Models
90 |
91 | **Ideal Use Cases:**
92 | - ✅ Complex mathematical problems
93 | - ✅ Advanced coding challenges
94 | - ✅ Logical puzzles and riddles
95 | - ✅ Scientific analysis
96 | - ✅ Multi-step problem solving
97 | - ✅ Tasks requiring verification
98 |
99 | **Not Ideal For:**
100 | - ❌ Simple queries
101 | - ❌ Creative writing
102 | - ❌ Casual conversation
103 | - ❌ Tasks requiring fast responses
104 | - ❌ Cost-sensitive applications
105 |
106 | ## Supported Models
107 |
108 | ### OpenAI Reasoning Models
109 |
110 | ```php
111 | run($client, $request);
151 | $usage = $response->getTokenUsage();
152 |
153 | echo "Input tokens: " . $usage->getInputTokens() . "\n";
154 | echo "Reasoning tokens: " . $usage->getReasoningTokens() . "\n";
155 | echo "Output tokens: " . $usage->getOutputTokens() . "\n";
156 | echo "Total cost: $" . $usage->getTotalCost() . "\n";
157 | ```
158 |
159 | ## Combining with Other Features
160 |
161 | ### With Tools
162 |
163 | Reasoning models work excellently with tools for complex agent workflows:
164 |
165 | ```php
166 | run(
182 | client: $openai,
183 | request: new LLMRequest(
184 | model: new GPTo3(GPTo3::VERSION_2025_04_16),
185 | conversation: $conversation,
186 | reasoningConfig: ReasoningEffort::HIGH
187 | ),
188 | feedbackCallback: function ($response) {
189 | // Validate the reasoning model's output
190 | return $isValid ? null : LLMMessage::createFromUserString('Please reconsider...');
191 | }
192 | );
193 | ```
194 |
195 | ## Best Practices
196 |
197 | 1. **Start with MEDIUM effort** - Only increase if needed
198 | 2. **Set budgets for production** - Prevent runaway costs
199 | 3. **Use for appropriate tasks** - Don't use reasoning models for simple queries
200 | 4. **Monitor costs closely** - Track token usage and adjust budgets
201 | 5. **Test with o4-mini first** - More cost-effective for development
202 |
203 | ## Provider Support
204 |
205 | - ✅ **OpenAI** - o3, o4-mini (native reasoning support)
206 | - ❌ **Anthropic** - Not available (Claude uses different architecture)
207 | - ❌ **Google Gemini** - Not available
208 | - ⚠️ **OpenAI-compatible** - Depends on provider
209 |
210 | ## See Also
211 |
212 | - [Configuration Guide](configuration.md) - All request configuration options
213 | - [Feedback Loops](feedback-loops.md) - Validate reasoning outputs
214 | - [OpenAI Provider Documentation](../providers/README.md) - OpenAI-specific features
215 |
--------------------------------------------------------------------------------
/docs/examples/best-practices.md:
--------------------------------------------------------------------------------
1 | # Best Practices
2 |
3 | Key patterns and principles for building robust, production-ready AI applications with PHP LLM.
4 |
5 | ---
6 |
7 | ## Caching
8 |
9 | ### Why Caching Matters
10 |
11 | Caching is essential for building efficient and cost-effective LLM applications:
12 |
13 | - **Cost savings**: Eliminate redundant API calls for identical requests, reducing costs significantly
14 | - **Performance**: Return cached responses instantly instead of waiting for API roundtrips
15 | - **Reliability**: Reduce dependency on external API availability and rate limits
16 | - **Development efficiency**: Speed up testing and development cycles with instant cached responses
17 |
18 | ### When to Use Caching
19 |
20 | Use caching when:
21 |
22 | - You have repeated identical requests (same model, same conversation, same parameters)
23 | - You're working in development/testing environments with repetitive queries
24 | - Cost optimization is a priority for your application
25 | - You have predictable query patterns that are likely to repeat
26 |
27 | ### Basic Implementation
28 |
29 | ```php
30 | run(
76 | client: $client,
77 | request: new LLMRequest(
78 | model: $model,
79 | conversation: $conversation
80 | ),
81 | feedbackCallback: function (LLMResponse $response) use (&$iteration, $maxIterations): ?LLMMessage {
82 | $iteration++;
83 |
84 | // CRITICAL: Stop after max attempts to prevent infinite loops
85 | if ($iteration >= $maxIterations) {
86 | return null; // Stop iteration
87 | }
88 |
89 | // Your validation logic here
90 | $text = $response->getLastText();
91 | if (!isValidJson($text)) {
92 | return LLMMessage::createFromUserString(
93 | 'The response was not valid JSON. Please provide a valid JSON response.'
94 | );
95 | }
96 |
97 | return null; // Validation passed
98 | }
99 | );
100 | ```
101 |
102 | Without a loop counter, a feedback loop can continue indefinitely if the LLM cannot satisfy the validation criteria, leading to excessive API costs and application hangs.
103 |
104 | ### Learn More
105 |
106 | For complete examples of validation patterns, nested LLM validation, progressive feedback strategies, and combining feedback loops with tools, see the [Feedback Loops Guide](../guides/feedback-loops.md).
107 |
108 | ---
109 |
110 | ## Async Operations for Parallel Tool Calls
111 |
112 | ### Why Async Operations Matter
113 |
114 | Async operations are crucial for performance and efficiency in LLM applications:
115 |
116 | - **Performance**: Process multiple requests concurrently instead of sequentially
117 | - **Efficiency**: Reduce total execution time when handling multiple independent operations
118 | - **Scalability**: Handle higher throughput with the same resources
119 | - **Tool calls**: Execute multiple independent tool calls in parallel, dramatically speeding up agentic workflows
120 |
121 | ### When to Use Async Operations
122 |
123 | Use async operations when:
124 |
125 | - You have multiple independent LLM requests to process
126 | - Tool calls can be executed in parallel (no dependencies between them)
127 | - Processing large batches of items
128 | - Building real-time applications that need low latency
129 |
130 | ### Parallel Tool Call Pattern
131 |
132 | The most important use case for async operations is parallel tool execution. When an LLM agent needs to call multiple tools that don't depend on each other's results, async operations allow them to execute simultaneously rather than waiting for each to complete sequentially.
133 |
134 | For example, if an agent needs to fetch data from three different sources, running them in parallel can reduce execution time from 9 seconds (3 × 3 seconds) to just 3 seconds.
135 |
136 | ```php
137 | runAsync(
143 | client: $client,
144 | request: new LLMRequest(
145 | model: $model,
146 | conversation: new LLMConversation([
147 | LLMMessage::createFromUserString("Analyze: {$item}")
148 | ])
149 | )
150 | );
151 | }
152 |
153 | // Wait for all to complete
154 | $responses = Promise\Utils::all($promises)->wait();
155 |
156 | // Process results
157 | foreach ($responses as $response) {
158 | echo $response->getLastText() . "\n";
159 | }
160 | ```
161 |
162 | ### Learn More
163 |
164 | For advanced async patterns, batch processing strategies, handling async tool execution, and error handling in concurrent operations, see:
165 |
166 | - [Tools & Function Calling Guide](../guides/tools.md) - Tool implementation with async support
167 | - [Batch Processing Guide](../guides/batch-processing.md) - Large-scale async operations
168 |
169 | ---
170 |
171 | ## See Also
172 |
173 | - [Caching Guide](../guides/caching.md) - Comprehensive caching documentation
174 | - [Feedback Loops Guide](../guides/feedback-loops.md) - Building self-correcting agents
175 | - [Tools Guide](../guides/tools.md) - Function calling and tool usage
176 | - [Batch Processing Guide](../guides/batch-processing.md) - High-volume processing
177 | - [State Management](state-management.md) - Managing conversation state
178 |
--------------------------------------------------------------------------------
/src/Client/OpenAI/AbstractOpenAIClient.php:
--------------------------------------------------------------------------------
1 | httpClient) {
34 | $this->httpClient = HttpClientFactory::createClient($this->customHttpMiddleware, null, $this->getHeaders());
35 | }
36 |
37 | return $this->httpClient;
38 | }
39 |
40 | private function getCachedHttpClient(): Client {
41 | if (!$this->cache) {
42 | return $this->getHttpClient();
43 | }
44 | if (!$this->cachedHttpClient) {
45 | $this->cachedHttpClient = HttpClientFactory::createClient($this->customHttpMiddleware, $this->cache, $this->getHeaders());
46 | }
47 |
48 | return $this->cachedHttpClient;
49 | }
50 |
51 | private function sendCachedRequestAsync(RequestInterface $httpRequest): PromiseInterface {
52 | return $this->getCachedHttpClient()->sendAsync($httpRequest)->then(function (ResponseInterface $response) {
53 | return new ModelResponse(json_decode((string) $response->getBody(), true, 512, JSON_THROW_ON_ERROR), (int) $response->getHeaderLine('X-Request-Duration-ms'));
54 | });
55 | }
56 |
57 | public function sendRequestAsync(LLMRequest $request): PromiseInterface {
58 | return $this->sendCachedRequestAsync($this->getChatRequest($request))->then(function (ModelResponse $modelResponse) use ($request) {
59 | $encodedResponseOrRequest = $this->decodeResponse($request, $modelResponse);
60 | if ($encodedResponseOrRequest instanceof LLMResponse) {
61 | return $encodedResponseOrRequest;
62 | }
63 |
64 | return $this->sendRequestAsync($encodedResponseOrRequest);
65 | });
66 | }
67 |
68 | private function getChatRequest(LLMRequest $request): RequestInterface {
69 | return new Request('POST', $this->getBaseUrl() . '/chat/completions', array_merge($this->getHeaders(), [
70 | 'Content-Type' => 'application/json',
71 | 'accept-encoding' => 'gzip',
72 | ]), json_encode($this->encodeRequest($request), JSON_THROW_ON_ERROR));
73 | }
74 |
75 | public function getBatchEmbeddings(array $texts, string $model = 'text-embedding-3-small', int $dimensions = 512): array {
76 | $results = [];
77 | $totalTokens = 0;
78 | foreach (array_chunk($texts, 100, true) as $chunk) {
79 | $keys = array_keys($chunk);
80 | $response = json_decode($this->getHttpClient()->post($this->getBaseUrl().'/embeddings', [
81 | 'json' => [
82 | 'model' => $model,
83 | 'dimensions' => $dimensions,
84 | 'input' => array_values($chunk),
85 | ],
86 | ])->getBody(), true, 512, JSON_THROW_ON_ERROR);
87 | foreach ($response['data'] as $embedding) {
88 | $results[$keys[$embedding['index']]] = $embedding['embedding'];
89 | }
90 | $totalTokens += $response['usage']['total_tokens'];
91 | }
92 |
93 | return $results;
94 | }
95 |
96 | /**
97 | * @param LLMRequest[] $requests
98 | * @return string batch id
99 | */
100 | public function createBatch(array $requests): string {
101 | $body = '';
102 | $endpoint = '/v1/chat/completions';
103 | foreach ($requests as $customId => $request) {
104 | $body .= json_encode([
105 | 'custom_id' => $customId,
106 | 'method' => 'POST',
107 | 'url' => $endpoint,
108 | 'body' => $this->encodeRequest($request),
109 | ], JSON_THROW_ON_ERROR) . "\n";
110 | }
111 |
112 | $fileResponse = $this->getHttpClient()->post($this->getBaseUrl().'/files', [
113 | 'multipart' => [
114 | ['name' => 'purpose', 'contents' => 'batch'],
115 | ['name' => 'file', 'contents' => $body, 'filename' => 'batch.jsonl'],
116 | ],
117 | ]);
118 |
119 | $file = json_decode((string) $fileResponse->getBody(), true, 512, JSON_THROW_ON_ERROR);
120 |
121 | $batchResult = $this->getHttpClient()->post($this->getBaseUrl().'/batches', [
122 | 'json' => [
123 | 'completion_window' => '24h',
124 | 'endpoint' => $endpoint,
125 | 'input_file_id' => $file['id'],
126 | ],
127 | ]);
128 |
129 | return json_decode((string) $batchResult->getBody(), true, 512, JSON_THROW_ON_ERROR)['id'];
130 | }
131 |
132 | public function retrieveBatch(string $batchId): ?array {
133 | $response = json_decode($this->getHttpClient()->get($this->getBaseUrl().'/batches/' . $batchId)->getBody(), true, 512, JSON_THROW_ON_ERROR);
134 | if ($response['status'] !== 'completed') {
135 | return null;
136 | }
137 |
138 | if ($response['output_file_id'] === null && $response['error_file_id']) {
139 | if ($response['completed_at'] < time() - 3 * 24 * 60 * 60) {
140 | return [];
141 | }
142 | $file = (string) $this->getHttpClient()->get($this->getBaseUrl().'/files/' . $response['error_file_id'] . '/content')->getBody();
143 |
144 | throw new \RuntimeException('Batch failed: ' . substr($file, 0, 1000));
145 | }
146 |
147 | $file = (string) $this->getHttpClient()->get($this->getBaseUrl().'/files/' . $response['output_file_id'] . '/content')->getBody();
148 | $results = explode("\n", trim($file));
149 | $responses = [];
150 | foreach ($results as $row) {
151 | $result = json_decode($row, true, 512, JSON_THROW_ON_ERROR);
152 | $content = '';
153 | foreach ($result['response']['body']['choices'] as $contentPart) {
154 | $content = $contentPart['message']['content'];
155 | if (is_string($content)) {
156 | $content .= $content;
157 | } elseif ($content['type'] === 'text') {
158 | $content .= $content['text'];
159 | }
160 | }
161 | $responses[$result['custom_id']] = $content;
162 | }
163 |
164 | return $responses;
165 | }
166 | }
167 |
--------------------------------------------------------------------------------
/tests/LLMConversationTest.php:
--------------------------------------------------------------------------------
1 | '2+2'], false);
24 | $toolResultContent = new LLMMessageToolResult('tool-123', LLMMessageContents::fromArrayData(['result' => 4]), false);
25 |
26 | // Create messages with different content types
27 | $systemMessage = LLMMessage::createFromSystem(new LLMMessageContents([$textContent]));
28 | $userMessage = LLMMessage::createFromUser(new LLMMessageContents([$textContent, $imageContent, $pdfContent]));
29 | $assistantMessage = LLMMessage::createFromAssistant(new LLMMessageContents([
30 | $textContent,
31 | $reasoningContent,
32 | $toolUseContent,
33 | $toolResultContent,
34 | ]));
35 |
36 | // Create a conversation with the messages
37 | $conversation = new LLMConversation([
38 | $systemMessage,
39 | $userMessage,
40 | $assistantMessage,
41 | ]);
42 |
43 | // Serialize to JSON and then back to array
44 | $json = json_encode($conversation, JSON_THROW_ON_ERROR);
45 | $data = json_decode($json, true, 512, JSON_THROW_ON_ERROR);
46 |
47 | // Create a new conversation from the JSON data
48 | $deserializedConversation = LLMConversation::fromJson($data);
49 |
50 | // Assert that the deserialized conversation has the same number of messages
51 | $originalMessages = $conversation->getMessages();
52 | $deserializedMessages = $deserializedConversation->getMessages();
53 |
54 | $this->assertCount(count($originalMessages), $deserializedMessages);
55 |
56 | // Test individual messages
57 | foreach ($originalMessages as $index => $originalMessage) {
58 | $deserializedMessage = $deserializedMessages[$index];
59 |
60 | // Check message type
61 | $this->assertEquals($originalMessage->isSystem(), $deserializedMessage->isSystem());
62 | $this->assertEquals($originalMessage->isUser(), $deserializedMessage->isUser());
63 | $this->assertEquals($originalMessage->isAssistant(), $deserializedMessage->isAssistant());
64 |
65 | // Check content
66 | $originalContents = $originalMessage->getContents();
67 | $deserializedContents = $deserializedMessage->getContents();
68 |
69 | $this->assertCount(count($originalContents), $deserializedContents);
70 |
71 | // Check each content type
72 | foreach ($originalContents as $contentIndex => $originalContent) {
73 | $deserializedContent = $deserializedContents[$contentIndex];
74 |
75 | // Check the type of content
76 | $this->assertInstanceOf(get_class($originalContent), $deserializedContent);
77 |
78 | // Check the cached property
79 | $this->assertEquals($originalContent->isCached(), $deserializedContent->isCached());
80 |
81 | // Specific assertions based on content type
82 | if ($originalContent instanceof LLMMessageText) {
83 | $this->assertEquals($originalContent->getText(), $deserializedContent->getText());
84 | } elseif ($originalContent instanceof LLMMessageImage) {
85 | $this->assertEquals($originalContent->getEncoding(), $deserializedContent->getEncoding());
86 | $this->assertEquals($originalContent->getMediaType(), $deserializedContent->getMediaType());
87 | $this->assertEquals($originalContent->getData(), $deserializedContent->getData());
88 | } elseif ($originalContent instanceof LLMMessagePdf) {
89 | $this->assertEquals($originalContent->getEncoding(), $deserializedContent->getEncoding());
90 | $this->assertEquals($originalContent->getData(), $deserializedContent->getData());
91 | } elseif ($originalContent instanceof LLMMessageReasoning) {
92 | $this->assertEquals($originalContent->getText(), $deserializedContent->getText());
93 | $this->assertEquals($originalContent->getSignature(), $deserializedContent->getSignature());
94 | } elseif ($originalContent instanceof LLMMessageToolUse) {
95 | $this->assertEquals($originalContent->getId(), $deserializedContent->getId());
96 | $this->assertEquals($originalContent->getName(), $deserializedContent->getName());
97 | $this->assertEquals($originalContent->getInput(), $deserializedContent->getInput());
98 | } elseif ($originalContent instanceof LLMMessageToolResult) {
99 | $this->assertEquals($originalContent->getId(), $deserializedContent->getId());
100 | $this->assertEquals($originalContent->getContent(), $deserializedContent->getContent());
101 | }
102 | }
103 | }
104 | }
105 |
106 | public function testWithMessage(): void {
107 | // Create initial conversation
108 | $textContent = new LLMMessageText('Initial message', false);
109 | $systemMessage = LLMMessage::createFromSystem(new LLMMessageContents([$textContent]));
110 | $conversation = new LLMConversation([$systemMessage]);
111 |
112 | // Add a new message
113 | $newTextContent = new LLMMessageText('New message', false);
114 | $userMessage = LLMMessage::createFromUser(new LLMMessageContents([$newTextContent]));
115 | $updatedConversation = $conversation->withMessage($userMessage);
116 |
117 | // Assert that the original conversation is unchanged
118 | $this->assertCount(1, $conversation->getMessages());
119 |
120 | // Assert that the new conversation has the additional message
121 | $this->assertCount(2, $updatedConversation->getMessages());
122 |
123 | // Verify the new message is in the updated conversation
124 | $messages = $updatedConversation->getMessages();
125 | $this->assertTrue($messages[0]->isSystem());
126 | $this->assertTrue($messages[1]->isUser());
127 |
128 | // Test JSON serialization and deserialization of updated conversation
129 | $json = json_encode($updatedConversation, JSON_THROW_ON_ERROR);
130 | $data = json_decode($json, true, 512, JSON_THROW_ON_ERROR);
131 | $deserializedConversation = LLMConversation::fromJson($data);
132 |
133 | $this->assertCount(2, $deserializedConversation->getMessages());
134 | }
135 | }
136 |
--------------------------------------------------------------------------------
/tests/Integration/IntegrationTestBase.php:
--------------------------------------------------------------------------------
1 | checkEnvironmentVariables();
39 |
40 | // Setup cache
41 | $cacheDir = sys_get_temp_dir() . '/llm-integration-tests';
42 | if (!is_dir($cacheDir)) {
43 | mkdir($cacheDir, 0777, true);
44 | }
45 | $this->cache = new FileCache($cacheDir);
46 |
47 | // Setup cost tracking
48 | $this->maxCost = (float) ($_ENV['INTEGRATION_TEST_MAX_COST'] ?? 1.0);
49 | $this->verbose = ($_ENV['INTEGRATION_TEST_VERBOSE'] ?? 'false') === 'true';
50 | }
51 |
52 | protected function tearDown(): void {
53 | parent::tearDown();
54 |
55 | if ($this->verbose && $this->totalCost > 0) {
56 | echo sprintf("\nTest cost: $%.4f\n", $this->totalCost);
57 | }
58 | }
59 |
60 | protected static function loadEnvironmentStatic(): void {
61 | if (self::$envLoaded) {
62 | return;
63 | }
64 |
65 | $envFile = dirname(__DIR__, 2) . '/.env';
66 | if (!file_exists($envFile)) {
67 | self::$envLoaded = true;
68 | return;
69 | }
70 |
71 | $lines = file($envFile, FILE_IGNORE_NEW_LINES | FILE_SKIP_EMPTY_LINES);
72 | foreach ($lines as $line) {
73 | if (strpos($line, '#') === 0) {
74 | continue;
75 | }
76 |
77 | if (strpos($line, '=') === false) {
78 | continue;
79 | }
80 |
81 | [$key, $value] = explode('=', $line, 2);
82 | $_ENV[trim($key)] = trim($value);
83 | }
84 |
85 | self::$envLoaded = true;
86 | }
87 |
88 | protected function checkEnvironmentVariables(): void {
89 | $requiredVars = $this->getRequiredEnvironmentVariables();
90 | $missing = [];
91 |
92 | foreach ($requiredVars as $var) {
93 | if (empty($_ENV[$var])) {
94 | $missing[] = $var;
95 | }
96 | }
97 |
98 | if (!empty($missing)) {
99 | $this->markTestSkipped(
100 | 'Integration tests require the following environment variables: ' .
101 | implode(', ', $missing) . '. ' .
102 | 'Copy .env.example to .env and fill in your API keys.'
103 | );
104 | }
105 | }
106 |
107 | /**
108 | * Get required environment variables for the specific test
109 | * @return array
110 | */
111 | protected function getRequiredEnvironmentVariables(): array {
112 | return ['ANTHROPIC_API_KEY', 'OPENAI_API_KEY', 'GEMINI_API_KEY', 'OPENROUTER_API_KEY'];
113 | }
114 |
115 | /**
116 | * Get all available LLM clients with their models for testing
117 | * @return array
118 | */
119 | protected function getAllClients(): array {
120 | // Ensure environment is loaded (in case called from data provider)
121 | self::loadEnvironmentStatic();
122 |
123 | // Initialize cache if not already done
124 | if ($this->cache === null) {
125 | $cacheDir = sys_get_temp_dir() . '/llm-integration-tests';
126 | if (!is_dir($cacheDir)) {
127 | mkdir($cacheDir, 0777, true);
128 | }
129 | $this->cache = new FileCache($cacheDir);
130 | }
131 |
132 | $clients = [];
133 |
134 | if (!empty($_ENV['ANTHROPIC_API_KEY'])) {
135 | $clients[] = [
136 | 'client' => new AnthropicClient($_ENV['ANTHROPIC_API_KEY'], $this->cache),
137 | 'model' => new AnthropicClaude35Haiku(AnthropicClaude35Haiku::VERSION_20241022),
138 | 'name' => 'Anthropic Claude 3.5 Haiku',
139 | ];
140 | }
141 |
142 | if (!empty($_ENV['OPENAI_API_KEY'])) {
143 | $clients[] = [
144 | 'client' => new OpenAIClient($_ENV['OPENAI_API_KEY'], '', $this->cache),
145 | 'model' => new GPT4oMini(GPT4oMini::VERSION_2024_07_18),
146 | 'name' => 'OpenAI GPT-4o Mini',
147 | ];
148 | }
149 |
150 | if (!empty($_ENV['GEMINI_API_KEY'])) {
151 | $clients[] = [
152 | 'client' => new GeminiClient($_ENV['GEMINI_API_KEY'], $this->cache),
153 | 'model' => new Gemini20Flash(),
154 | 'name' => 'Google Gemini 2.0 Flash',
155 | ];
156 | }
157 |
158 | if (!empty($_ENV['OPENROUTER_API_KEY'])) {
159 | $clients[] = [
160 | 'client' => new OpenAICompatibleClient($_ENV['OPENROUTER_API_KEY'], 'https://openrouter.ai/api/v1', $this->cache),
161 | 'model' => new LocalModel('openrouter/horizon-beta'),
162 | 'name' => 'OpenRouter',
163 | ];
164 | }
165 |
166 | return $clients;
167 | }
168 |
169 | /**
170 | * Track cost from a response
171 | */
172 | protected function trackCost(float $cost): void {
173 | $this->totalCost += $cost;
174 |
175 | if ($this->totalCost > $this->maxCost) {
176 | $this->fail(sprintf(
177 | 'Test exceeded maximum cost limit. Used: $%.4f, Limit: $%.4f',
178 | $this->totalCost,
179 | $this->maxCost
180 | ));
181 | }
182 | }
183 |
184 | /**
185 | * Assert that a string contains text (case-insensitive)
186 | */
187 | protected function assertContainsIgnoreCase(string $needle, string $haystack, string $message = ''): void {
188 | $this->assertStringContainsStringIgnoringCase($needle, $haystack, $message);
189 | }
190 |
191 | /**
192 | * Assert that the response contains any of the given strings
193 | */
194 | protected function assertContainsAny(array $needles, string $haystack, string $message = ''): void {
195 | foreach ($needles as $needle) {
196 | if (stripos($haystack, $needle) !== false) {
197 | // Found a match, assertion passes
198 | return;
199 | }
200 | }
201 |
202 | $this->fail(
203 | $message ?: sprintf(
204 | 'Failed asserting that "%s" contains any of: %s',
205 | substr($haystack, 0, 100) . '...',
206 | implode(', ', array_map(fn($n) => '"' . $n . '"', $needles))
207 | )
208 | );
209 | }
210 | }
211 |
--------------------------------------------------------------------------------
/tests/Client/Gemini/GeminiEncoderToolsTest.php:
--------------------------------------------------------------------------------
1 | encoder = new GeminiEncoder();
23 | }
24 |
25 | public function testRequestWithTools(): void {
26 | // Create a simple request with tools
27 | $conversation = new LLMConversation([
28 | LLMMessage::createFromUserString('What is the weather like in Prague?'),
29 | ]);
30 |
31 | $weatherTool = new CallbackToolDefinition(
32 | 'get_weather',
33 | 'Get the current weather for a location',
34 | [
35 | 'type' => 'object',
36 | 'properties' => [
37 | 'location' => [
38 | 'type' => 'string',
39 | 'description' => 'The city and state, e.g. Prague, CZ',
40 | ],
41 | ],
42 | 'required' => ['location'],
43 | ],
44 | function (array $input) {
45 | return ['temperature' => 22, 'condition' => 'sunny'];
46 | }
47 | );
48 |
49 | $request = new LLMRequest(
50 | model: new Gemini20Flash(),
51 | conversation: $conversation,
52 | tools: [$weatherTool]
53 | );
54 |
55 | $encoded = $this->encoder->encodeRequest($request);
56 |
57 | // Verify tools structure
58 | $this->assertArrayHasKey('tools', $encoded);
59 | $this->assertCount(1, $encoded['tools']);
60 | $this->assertArrayHasKey('functionDeclarations', $encoded['tools'][0]);
61 | $this->assertCount(1, $encoded['tools'][0]['functionDeclarations']);
62 |
63 | // Verify tool properties
64 | $functionDeclaration = $encoded['tools'][0]['functionDeclarations'][0];
65 | $this->assertEquals('get_weather', $functionDeclaration['name']);
66 | $this->assertEquals('Get the current weather for a location', $functionDeclaration['description']);
67 | $this->assertEquals('object', $functionDeclaration['parameters']['type']);
68 | $this->assertArrayHasKey('properties', $functionDeclaration['parameters']);
69 | $this->assertArrayHasKey('location', $functionDeclaration['parameters']['properties']);
70 | $this->assertEquals(['location'], $functionDeclaration['parameters']['required']);
71 | }
72 |
73 | public function testFunctionCallMessage(): void {
74 | // Test a conversation with a function call from the assistant
75 | $conversation = new LLMConversation([
76 | LLMMessage::createFromUserString('What is the weather like in Prague?'),
77 | LLMMessage::createFromAssistant(new LLMMessageContents([
78 | new LLMMessageToolUse(
79 | 'tool_1',
80 | 'get_weather',
81 | ['location' => 'Prague, CZ']
82 | ),
83 | ])),
84 | ]);
85 |
86 | $request = new LLMRequest(
87 | model: new Gemini20Flash(),
88 | conversation: $conversation
89 | );
90 |
91 | $encoded = $this->encoder->encodeRequest($request);
92 |
93 | // Verify function call message
94 | $this->assertCount(2, $encoded['contents']); // User + function call
95 |
96 | // Check function call structure
97 | $functionCall = $encoded['contents'][1];
98 | $this->assertEquals('model', $functionCall['role']);
99 | $this->assertCount(1, $functionCall['parts']);
100 | $this->assertArrayHasKey('function_call', $functionCall['parts'][0]);
101 | $this->assertEquals('get_weather', $functionCall['parts'][0]['function_call']['name']);
102 | $this->assertEquals(['location' => 'Prague, CZ'], $functionCall['parts'][0]['function_call']['args']);
103 | }
104 |
105 | public function testFunctionResultMessage(): void {
106 | // Test a conversation with a function result message
107 | $conversation = new LLMConversation([
108 | LLMMessage::createFromUserString('What is the weather like in Prague?'),
109 | LLMMessage::createFromAssistant(new LLMMessageContents([
110 | new LLMMessageToolUse(
111 | 'tool_1',
112 | 'get_weather',
113 | ['location' => 'Prague, CZ']
114 | ),
115 | ])),
116 | LLMMessage::createFromUser(new LLMMessageContents([
117 | new LLMMessageToolResult(
118 | 'tool_1',
119 | LLMMessageContents::fromArrayData(['temperature' => 22, 'condition' => 'sunny'])
120 | ),
121 | ])),
122 | ]);
123 |
124 | $request = new LLMRequest(
125 | model: new Gemini20Flash(),
126 | conversation: $conversation
127 | );
128 |
129 | $encoded = $this->encoder->encodeRequest($request);
130 |
131 | // Verify function result message
132 | $this->assertCount(3, $encoded['contents']); // User + function call + function result
133 |
134 | // Check function result structure
135 | $functionResult = $encoded['contents'][2];
136 | $this->assertEquals('function', $functionResult['role']);
137 | $this->assertCount(1, $functionResult['parts']);
138 | $this->assertArrayHasKey('function_response', $functionResult['parts'][0]);
139 | }
140 |
141 | public function testCompleteFunctionFlow(): void {
142 | // Test a complete conversation with a user query, function call, function result, and final answer
143 | $conversation = new LLMConversation([
144 | LLMMessage::createFromUserString('What is the weather like in Prague?'),
145 | LLMMessage::createFromAssistant(new LLMMessageContents([
146 | new LLMMessageToolUse(
147 | 'tool_1',
148 | 'get_weather',
149 | ['location' => 'Prague, CZ']
150 | ),
151 | ])),
152 | LLMMessage::createFromUser(new LLMMessageContents([
153 | new LLMMessageToolResult(
154 | 'tool_1',
155 | LLMMessageContents::fromArrayData(['temperature' => 22, 'condition' => 'sunny'])
156 | ),
157 | ])),
158 | LLMMessage::createFromAssistantString('The weather in Prague is sunny with a temperature of 22°C.'),
159 | ]);
160 |
161 | $request = new LLMRequest(
162 | model: new Gemini20Flash(),
163 | conversation: $conversation
164 | );
165 |
166 | $encoded = $this->encoder->encodeRequest($request);
167 |
168 | // Verify the entire conversation flow
169 | $this->assertCount(4, $encoded['contents']);
170 |
171 | // Check final assistant response
172 | $finalResponse = $encoded['contents'][3];
173 | $this->assertEquals('model', $finalResponse['role']);
174 | $this->assertCount(1, $finalResponse['parts']);
175 | $this->assertEquals(
176 | 'The weather in Prague is sunny with a temperature of 22°C.',
177 | $finalResponse['parts'][0]['text']
178 | );
179 | }
180 | }
181 |
--------------------------------------------------------------------------------
/docs/examples/quick-start.md:
--------------------------------------------------------------------------------
1 | # Quick Start Examples
2 |
3 | Get started with PHP LLM in minutes with these basic examples.
4 |
5 | ## Installation
6 |
7 | ```bash
8 | composer require soukicz/llm
9 | ```
10 |
11 | ## Simple Synchronous Request
12 |
13 | The most basic way to interact with an LLM. This pattern is perfect for simple one-off requests where you don't need conversation history.
14 |
15 | ```php
16 | run(
38 | client: $anthropic,
39 | request: new LLMRequest(
40 | model: new AnthropicClaude45Sonnet(AnthropicClaude45Sonnet::VERSION_20250929),
41 | conversation: new LLMConversation([
42 | LLMMessage::createFromUserString('What is PHP?')
43 | ]),
44 | )
45 | );
46 |
47 | // Get the AI's response text
48 | echo $response->getLastText();
49 | ```
50 |
51 | ## Async Request
52 |
53 | Use asynchronous requests when you need to make multiple LLM calls concurrently or when you want non-blocking execution. This uses promises under the hood.
54 |
55 | ```php
56 | runAsync(
61 | client: $anthropic,
62 | request: new LLMRequest(
63 | model: new AnthropicClaude45Sonnet(AnthropicClaude45Sonnet::VERSION_20250929),
64 | conversation: new LLMConversation([
65 | LLMMessage::createFromUserString('Explain async programming')
66 | ]),
67 | )
68 | );
69 |
70 | // Handle the response when it arrives
71 | $promise->then(function (LLMResponse $response) {
72 | echo $response->getLastText();
73 | });
74 |
75 | // You can do other work here while waiting for the response
76 | // Or make multiple async requests and wait for all to complete
77 | ```
78 |
79 | ## Multi-Turn Conversation
80 |
81 | This library uses **immutable objects** - methods like `withMessage()` return a new instance rather than modifying the original. This prevents accidental state mutations and makes your code more predictable.
82 |
83 | ```php
84 | run(
91 | client: $anthropic,
92 | request: new LLMRequest(
93 | model: new AnthropicClaude45Sonnet(AnthropicClaude45Sonnet::VERSION_20250929),
94 | conversation: $conversation,
95 | )
96 | );
97 |
98 | echo "AI: " . $response->getLastText() . "\n"; // "4"
99 |
100 | // Add AI response to conversation (returns new instance)
101 | $conversation = $conversation->withMessage($response->getLastMessage());
102 |
103 | // Add user's follow-up question (returns new instance)
104 | $conversation = $conversation->withMessage(
105 | LLMMessage::createFromUserString('What about 2 * 2?')
106 | );
107 |
108 | // Second turn
109 | $response = $agentClient->run(
110 | client: $anthropic,
111 | request: new LLMRequest(
112 | model: new AnthropicClaude45Sonnet(AnthropicClaude45Sonnet::VERSION_20250929),
113 | conversation: $conversation,
114 | )
115 | );
116 |
117 | echo "AI: " . $response->getLastText() . "\n"; // "4"
118 | ```
119 |
120 | ## Different Providers
121 |
122 | PHP LLM provides a unified interface across multiple LLM providers. Simply swap the client and model - the rest of your code stays the same.
123 |
124 | ### OpenAI
125 |
126 | ```php
127 | run(
134 | client: $openai,
135 | request: new LLMRequest(
136 | model: new GPT5(GPT5::VERSION_2025_08_07),
137 | conversation: $conversation,
138 | )
139 | );
140 | ```
141 |
142 | ### Google Gemini
143 |
144 | ```php
145 | run(
152 | client: $gemini,
153 | request: new LLMRequest(
154 | model: new Gemini25Pro(),
155 | conversation: $conversation,
156 | )
157 | );
158 | ```
159 |
160 | ### OpenRouter
161 |
162 | ```php
163 | run(
173 | client: $client,
174 | request: new LLMRequest(
175 | model: new LocalModel('anthropic/claude-3.5-sonnet'),
176 | conversation: $conversation,
177 | )
178 | );
179 | ```
180 |
181 | ## Using Environment Variables
182 |
183 | ```php
184 | run($client, $request);
212 | echo $response->getLastText();
213 | } catch (LLMClientException $e) {
214 | echo "Error: " . $e->getMessage();
215 | // Handle error: log, retry, fallback, etc.
216 | }
217 | ```
218 |
219 | ## Tracking Costs
220 |
221 | Every response includes token usage and cost information. This helps you monitor API expenses and optimize your prompts.
222 |
223 | ```php
224 | run($client, $request);
226 |
227 | // Token counts
228 | echo "Input tokens: " . $response->getInputTokens() . "\n";
229 | echo "Output tokens: " . $response->getOutputTokens() . "\n";
230 | echo "Total tokens: " . ($response->getInputTokens() + $response->getOutputTokens()) . "\n";
231 |
232 | // Cost breakdown (in USD, null if pricing unavailable)
233 | $inputCost = $response->getInputPriceUsd() ?? 0;
234 | $outputCost = $response->getOutputPriceUsd() ?? 0;
235 | $totalCost = $inputCost + $outputCost;
236 |
237 | echo "Input cost: $" . number_format($inputCost, 6) . "\n";
238 | echo "Output cost: $" . number_format($outputCost, 6) . "\n";
239 | echo "Total cost: $" . number_format($totalCost, 6) . "\n";
240 |
241 | // Performance metrics
242 | echo "Response time: " . $response->getTotalTimeMs() . "ms\n";
243 | ```
244 |
245 | ## Next Steps
246 |
247 | - [Tools Guide](../guides/tools.md) - Add function calling to your agents
248 | - [Multimodal](../guides/multimodal.md) - Process images and PDFs
249 | - [Feedback Loops](../guides/feedback-loops.md) - Build self-correcting agents
250 | - [Configuration](../guides/configuration.md) - Advanced configuration options
251 |
--------------------------------------------------------------------------------