├── src └── lmnr │ ├── py.typed │ ├── sdk │ ├── __init__.py │ ├── browser │ │ ├── __init__.py │ │ ├── utils.py │ │ └── bubus_otel.py │ ├── eval_control.py │ ├── client │ │ ├── synchronous │ │ │ └── resources │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── browser_events.py │ │ │ │ ├── tags.py │ │ │ │ └── evaluators.py │ │ └── asynchronous │ │ │ └── resources │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── browser_events.py │ │ │ ├── tags.py │ │ │ └── evaluators.py │ ├── log.py │ └── datasets │ │ ├── file_utils.py │ │ └── __init__.py │ ├── opentelemetry_lib │ ├── opentelemetry │ │ └── instrumentation │ │ │ ├── groq │ │ │ ├── version.py │ │ │ ├── config.py │ │ │ ├── event_models.py │ │ │ └── utils.py │ │ │ ├── openai │ │ │ ├── version.py │ │ │ ├── shared │ │ │ │ ├── config.py │ │ │ │ ├── event_models.py │ │ │ │ └── image_gen_wrappers.py │ │ │ └── __init__.py │ │ │ ├── anthropic │ │ │ ├── version.py │ │ │ ├── config.py │ │ │ └── event_models.py │ │ │ ├── google_genai │ │ │ ├── config.py │ │ │ └── schema_utils.py │ │ │ ├── cua_computer │ │ │ └── utils.py │ │ │ ├── kernel │ │ │ └── utils.py │ │ │ ├── langgraph │ │ │ └── utils.py │ │ │ └── opentelemetry │ │ │ └── __init__.py │ ├── .flake8 │ ├── utils │ │ ├── wrappers.py │ │ ├── json_encoder.py │ │ ├── package_check.py │ │ └── __init__.py │ ├── tracing │ │ ├── tracer.py │ │ └── attributes.py │ └── __init__.py │ ├── __init__.py │ ├── cli │ └── rules.py │ └── version.py ├── examples └── fastapi-app │ ├── .gitignore │ ├── .python-version │ ├── .env.example │ ├── pyproject.toml │ ├── src │ ├── schemas.py │ ├── main.py │ └── llm.py │ └── README.md ├── tests ├── test_instrumentations │ ├── test_openai │ │ └── traces │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── utils.py │ │ │ ├── test_exceptions.py │ │ │ ├── test_embedding_metrics_handler.py │ │ │ ├── cassettes │ │ │ ├── test_completions │ │ │ │ ├── test_completion_context_propagation.yaml │ │ │ │ ├── test_completion_context_propagation_with_events_with_content.yaml │ │ │ │ ├── test_async_completion_context_propagation.yaml │ │ │ │ ├── test_completion_context_propagation_with_events_with_no_content.yaml │ │ │ │ ├── test_async_completion_context_propagation_with_events_with_content.yaml │ │ │ │ ├── test_async_completion_context_propagation_with_events_with_no_content.yaml │ │ │ │ └── test_completion.yaml │ │ │ ├── test_chat │ │ │ │ ├── test_chat_async_context_propagation.yaml │ │ │ │ ├── test_chat_context_propagation.yaml │ │ │ │ ├── test_chat_async_context_propagation_with_events_with_content.yaml │ │ │ │ ├── test_chat_context_propagation_with_events_with_content.yaml │ │ │ │ ├── test_chat_async_context_propagation_with_events_with_no_content.yaml │ │ │ │ └── test_chat_context_propagation_with_events_with_no_content.yaml │ │ │ └── test_azure │ │ │ │ ├── test_chat.yaml │ │ │ │ ├── test_chat_content_filtering.yaml │ │ │ │ ├── test_chat_with_events_with_content.yaml │ │ │ │ ├── test_chat_with_events_with_no_content.yaml │ │ │ │ ├── test_chat_content_filtering_with_events_with_content.yaml │ │ │ │ └── test_chat_content_filtering_with_events_with_no_content.yaml │ │ │ └── test_streaming_with_api_usage.py │ ├── test_groq │ │ ├── __init__.py │ │ └── data │ │ │ └── logo.jpg │ ├── test_anthropic │ │ ├── __init__.py │ │ ├── data │ │ │ └── logo.jpg │ │ └── cassettes │ │ │ ├── test_completion │ │ │ ├── test_anthropic_completion_legacy.yaml │ │ │ ├── test_anthropic_completion_with_events_with_content.yaml │ │ │ └── test_anthropic_completion_with_events_with_no_content.yaml │ │ │ ├── test_messages │ │ │ ├── test_with_asyncio_run_legacy.yaml │ │ │ ├── test_with_asyncio_run_with_events_with_content.yaml │ │ │ ├── test_with_asyncio_run_with_events_with_no_content.yaml │ │ │ ├── test_async_anthropic_message_create_with_events_with_content.yaml │ │ │ ├── test_async_anthropic_message_create_with_events_with_no_content.yaml │ │ │ ├── test_anthropic_message_create_with_events_with_content.yaml │ │ │ └── test_anthropic_message_create_with_events_with_no_content.yaml │ │ │ └── test_bedrock_with_raw_response │ │ │ └── test_async_anthropic_bedrock_regular_create.yaml │ └── test_claude_agent │ │ ├── conftest.py │ │ ├── test_query_with_alias.py │ │ ├── test_query.py │ │ ├── test_claude_sdk_client.py │ │ └── test_tool.py ├── test_instrument_initializers.py ├── test_races │ └── conftest.py ├── cassettes │ ├── test_google_genai │ │ ├── test_google_genai.yaml │ │ ├── test_google_genai_string_contents.yaml │ │ ├── test_google_genai_reasoning_tokens.yaml │ │ ├── test_google_genai_reasoning_tokens_async.yaml │ │ ├── test_google_genai_tool_calls.yaml │ │ ├── test_google_genai_multiple_tool_calls.yaml │ │ ├── test_google_genai_tool_calls_and_text_part.yaml │ │ ├── test_google_genai_output_json_schema.yaml │ │ ├── test_google_genai_output_schema.yaml │ │ └── test_google_genai_reasoning_tokens_with_include_thoughts.yaml │ ├── test_litellm_gemini │ │ └── test_litellm_gemini_thinking.yaml │ └── test_litellm_anthropic │ │ ├── test_litellm_anthropic_basic.yaml │ │ ├── test_litellm_anthropic_text_block.yaml │ │ ├── test_litellm_anthropic_with_metadata.yaml │ │ ├── test_async_litellm_anthropic_with_computer_tools.yaml │ │ └── test_litellm_anthropic_with_computer_tools.yaml └── test_context.py ├── .gitignore └── .github └── workflows ├── build-package.yml ├── run-tests.yml ├── ensure-version-match.yml └── python-publish.yml /src/lmnr/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/lmnr/sdk/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/lmnr/sdk/browser/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/fastapi-app/.gitignore: -------------------------------------------------------------------------------- 1 | .env -------------------------------------------------------------------------------- /examples/fastapi-app/.python-version: -------------------------------------------------------------------------------- 1 | 3.13 2 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_groq/__init__.py: -------------------------------------------------------------------------------- 1 | """unit tests.""" 2 | -------------------------------------------------------------------------------- /examples/fastapi-app/.env.example: -------------------------------------------------------------------------------- 1 | LMNR_PROJECT_API_KEY= 2 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/__init__.py: -------------------------------------------------------------------------------- 1 | """unit tests.""" 2 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.41.0" 2 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.40.14" 2 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.41.0" 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/__pycache__ 2 | dist/ 3 | poetry.lock 4 | uv.lock 5 | lmnr_engine/ 6 | .vscode 7 | .venv 8 | .idea 9 | .env 10 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_groq/data/logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lmnr-ai/lmnr-python/HEAD/tests/test_instrumentations/test_groq/data/logo.jpg -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/data/logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lmnr-ai/lmnr-python/HEAD/tests/test_instrumentations/test_anthropic/data/logo.jpg -------------------------------------------------------------------------------- /src/lmnr/sdk/eval_control.py: -------------------------------------------------------------------------------- 1 | from contextvars import ContextVar 2 | 3 | 4 | PREPARE_ONLY: ContextVar[bool] = ContextVar("__lmnr_prepare_only", default=False) 5 | EVALUATION_INSTANCES = ContextVar("__lmnr_evaluation_instances") 6 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = 3 | .git, 4 | __pycache__, 5 | build, 6 | dist, 7 | .tox, 8 | venv, 9 | .venv, 10 | .pytest_cache 11 | max-line-length = 120 12 | per-file-ignores = __init__.py:F401 13 | -------------------------------------------------------------------------------- /tests/test_instrument_initializers.py: -------------------------------------------------------------------------------- 1 | from lmnr.opentelemetry_lib.tracing.instruments import ( 2 | Instruments, 3 | INSTRUMENTATION_INITIALIZERS, 4 | ) 5 | 6 | 7 | def test_same_number_of_instrumentation_initializers(): 8 | assert len(INSTRUMENTATION_INITIALIZERS) == len(Instruments) 9 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | 4 | class Config: 5 | enrich_token_usage = False 6 | exception_logger = None 7 | get_common_metrics_attributes: Callable[[], dict] = lambda: {} 8 | use_legacy_attributes = True 9 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture(scope="module") 5 | def vcr_config(): 6 | return { 7 | "filter_headers": ["authorization", "api-key"], 8 | "ignore_hosts": ["openaipublic.blob.core.windows.net"], 9 | } 10 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Coroutine 2 | 3 | 4 | class Config: 5 | exception_logger = None 6 | upload_base64_image: ( 7 | Callable[[str, str, str, str], Coroutine[None, None, str]] | None 8 | ) = None 9 | convert_image_to_openai_format: bool = True 10 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/synchronous/resources/__init__.py: -------------------------------------------------------------------------------- 1 | from lmnr.sdk.client.synchronous.resources.browser_events import BrowserEvents 2 | from lmnr.sdk.client.synchronous.resources.evals import Evals 3 | from lmnr.sdk.client.synchronous.resources.tags import Tags 4 | from lmnr.sdk.client.synchronous.resources.evaluators import Evaluators 5 | 6 | __all__ = ["Evals", "Evaluators", "BrowserEvents", "Tags"] 7 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/utils/wrappers.py: -------------------------------------------------------------------------------- 1 | # TODO: Remove the same thing from openai, anthropic, etc, and use this instead 2 | 3 | 4 | def _with_tracer_wrapper(func): 5 | def _with_tracer(tracer, to_wrap): 6 | def wrapper(wrapped, instance, args, kwargs): 7 | return func(tracer, to_wrap, wrapped, instance, args, kwargs) 8 | 9 | return wrapper 10 | 11 | return _with_tracer 12 | -------------------------------------------------------------------------------- /examples/fastapi-app/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "fastapi-app" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.9, <4" 7 | dependencies = [ 8 | "fastapi[standard]>=0.115.12", 9 | "lmnr[openai]", 10 | "openai>=1.76.0", 11 | "pydantic>=2.10.5", 12 | "python-dotenv>=1.1.0", 13 | ] 14 | 15 | [tool.uv.sources] 16 | lmnr = { workspace = true } 17 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/asynchronous/resources/__init__.py: -------------------------------------------------------------------------------- 1 | from lmnr.sdk.client.asynchronous.resources.browser_events import AsyncBrowserEvents 2 | from lmnr.sdk.client.asynchronous.resources.evals import AsyncEvals 3 | from lmnr.sdk.client.asynchronous.resources.tags import AsyncTags 4 | from lmnr.sdk.client.asynchronous.resources.evaluators import AsyncEvaluators 5 | 6 | __all__ = ["AsyncEvals", "AsyncBrowserEvents", "AsyncTags", "AsyncEvaluators"] 7 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import orjson 3 | 4 | 5 | def payload_to_base64url(payload_bytes: bytes) -> bytes: 6 | data = base64.b64encode(payload_bytes).decode("utf-8") 7 | url = f"data:image/png;base64,{data}" 8 | return orjson.dumps({"base64url": url}) 9 | 10 | 11 | def payload_to_placeholder(payload_bytes: bytes) -> str: 12 | return "" 13 | -------------------------------------------------------------------------------- /examples/fastapi-app/src/schemas.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from enum import Enum 3 | 4 | 5 | class TicketCategory(str, Enum): 6 | REFUND = "REFUND" 7 | BUG = "BUG" 8 | QUESTION = "QUESTION" 9 | OTHER = "OTHER" 10 | 11 | 12 | class Ticket(BaseModel): 13 | title: str 14 | description: str 15 | customer_email: str 16 | 17 | 18 | class TicketClassification(BaseModel): 19 | category: TicketCategory 20 | reasoning: str 21 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Optional 2 | 3 | from typing_extensions import Coroutine 4 | 5 | 6 | class Config: 7 | enrich_token_usage = False 8 | exception_logger = None 9 | get_common_metrics_attributes: Callable[[], dict] = lambda: {} 10 | upload_base64_image: Optional[ 11 | Callable[[str, str, str, str], Coroutine[None, None, str]] 12 | ] = None 13 | use_legacy_attributes = True 14 | -------------------------------------------------------------------------------- /.github/workflows/build-package.yml: -------------------------------------------------------------------------------- 1 | name: Build Python Package 2 | 3 | on: 4 | pull_request: 5 | types: [opened, synchronize] 6 | branches: ["main"] 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: Set up Python 14 | uses: actions/setup-python@v5 15 | with: 16 | python-version: '3.10' 17 | - name: Install dependencies 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install build 21 | - name: Build package 22 | run: python -m build 23 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/utils/json_encoder.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import json 3 | 4 | 5 | class JSONEncoder(json.JSONEncoder): 6 | def default(self, o): 7 | if isinstance(o, dict): 8 | if "callbacks" in o: 9 | del o["callbacks"] 10 | return o 11 | if dataclasses.is_dataclass(o): 12 | return dataclasses.asdict(o) 13 | 14 | if hasattr(o, "to_json"): 15 | return o.to_json() 16 | 17 | if hasattr(o, "json"): 18 | return o.json() 19 | 20 | return super().default(o) 21 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Optional 2 | 3 | from opentelemetry._events import EventLogger 4 | 5 | 6 | class Config: 7 | enrich_token_usage = False 8 | enrich_assistant = False 9 | exception_logger = None 10 | get_common_metrics_attributes: Callable[[], dict] = lambda: {} 11 | upload_base64_image: Callable[[str, str, str], str] = ( 12 | lambda trace_id, span_id, base64_image_url: "" 13 | ) 14 | enable_trace_context_propagation: bool = True 15 | use_legacy_attributes = True 16 | event_logger: Optional[EventLogger] = None 17 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/utils/package_check.py: -------------------------------------------------------------------------------- 1 | from importlib.metadata import distributions 2 | 3 | from typing import Optional 4 | 5 | installed_packages = { 6 | (dist.name or dist.metadata.get("Name", "")).lower() for dist in distributions() 7 | } 8 | 9 | 10 | def is_package_installed(package_name: str) -> bool: 11 | return package_name.lower() in installed_packages 12 | 13 | 14 | def get_package_version(package_name: str) -> Optional[str]: 15 | for dist in distributions(): 16 | if (dist.name or dist.metadata.get("Name", "")).lower() == package_name.lower(): 17 | return dist.version 18 | return None 19 | -------------------------------------------------------------------------------- /examples/fastapi-app/src/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from schemas import Ticket, TicketClassification 3 | from dotenv import load_dotenv 4 | from lmnr import Laminar 5 | from llm import model_classify_ticket 6 | 7 | load_dotenv(override=True) 8 | 9 | Laminar.initialize() 10 | 11 | app = FastAPI() 12 | 13 | 14 | @app.post("/api/v1/tickets/classify", response_model=TicketClassification) 15 | async def classify_ticket(ticket: Ticket): 16 | try: 17 | classification = model_classify_ticket(ticket) 18 | print(classification) 19 | return classification 20 | except Exception as e: 21 | raise HTTPException(status_code=500, detail=str(e)) 22 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/utils/__init__.py: -------------------------------------------------------------------------------- 1 | def cameltosnake(camel_string: str) -> str: 2 | if not camel_string: 3 | return "" 4 | elif camel_string[0].isupper(): 5 | return f"_{camel_string[0].lower()}{cameltosnake(camel_string[1:])}" 6 | else: 7 | return f"{camel_string[0]}{cameltosnake(camel_string[1:])}" 8 | 9 | 10 | def camel_to_snake(s): 11 | if len(s) <= 1: 12 | return s.lower() 13 | 14 | return cameltosnake(s[0].lower() + s[1:]) 15 | 16 | 17 | def is_notebook(): 18 | try: 19 | from IPython import get_ipython 20 | 21 | ip = get_ipython() 22 | if ip is None: 23 | return False 24 | return True 25 | except Exception: 26 | return False 27 | -------------------------------------------------------------------------------- /.github/workflows/run-tests.yml: -------------------------------------------------------------------------------- 1 | name: Run Tests 2 | 3 | on: 4 | pull_request: 5 | types: [opened, synchronize] 6 | branches: ["main"] 7 | 8 | jobs: 9 | test: 10 | runs-on: ubuntu-latest 11 | strategy: 12 | matrix: 13 | python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"] 14 | steps: 15 | - uses: actions/checkout@v5 16 | - name: Set up Python 17 | uses: actions/setup-python@v6 18 | with: 19 | python-version: ${{ matrix.python-version }} 20 | - name: Install uv 21 | uses: astral-sh/setup-uv@v6 22 | with: 23 | activate-environment: true 24 | - name: Install the project 25 | run: uv sync --all-extras --dev 26 | - name: Run tests 27 | run: uv run pytest 28 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from google.genai._api_client import BaseApiClient 3 | from google.genai._transformers import t_schema 4 | from google.genai.types import JSONSchemaType 5 | 6 | import json 7 | 8 | DUMMY_CLIENT = BaseApiClient(api_key="dummy") 9 | 10 | 11 | def process_schema(schema: Any) -> dict[str, Any]: 12 | # The only thing we need from the client is the t_schema function 13 | try: 14 | json_schema = t_schema(DUMMY_CLIENT, schema).json_schema.model_dump( 15 | exclude_unset=True, exclude_none=True 16 | ) 17 | except Exception: 18 | json_schema = {} 19 | return json_schema 20 | 21 | 22 | class SchemaJSONEncoder(json.JSONEncoder): 23 | def default(self, o: Any) -> Any: 24 | if isinstance(o, JSONSchemaType): 25 | return o.value 26 | return super().default(o) 27 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_claude_agent/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture(autouse=True) 5 | def cleanup_claude_proxy(): 6 | """Clean up Claude proxy server before and after each test.""" 7 | # Clean up before test 8 | _cleanup_proxy() 9 | yield 10 | # Clean up after test 11 | _cleanup_proxy() 12 | 13 | 14 | def start_claude_proxy(): 15 | """Start the Claude proxy server if it's not running.""" 16 | try: 17 | from lmnr_claude_code_proxy import run_server 18 | 19 | run_server() 20 | except Exception: 21 | # Ignore errors if the proxy couldn't be started 22 | pass 23 | 24 | 25 | def _cleanup_proxy(): 26 | """Stop the Claude proxy server if it's running.""" 27 | try: 28 | from lmnr_claude_code_proxy import stop_server 29 | 30 | stop_server() 31 | except Exception: 32 | # Ignore errors if the proxy wasn't running or module not available 33 | pass 34 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, List, Literal, Optional, TypedDict 3 | 4 | 5 | class _FunctionToolCall(TypedDict): 6 | function_name: str 7 | arguments: Optional[dict[str, Any]] 8 | 9 | 10 | class ToolCall(TypedDict): 11 | """Represents a tool call in the AI model.""" 12 | 13 | id: str 14 | function: _FunctionToolCall 15 | type: Literal["function"] 16 | 17 | 18 | class CompletionMessage(TypedDict): 19 | """Represents a message in the AI model.""" 20 | 21 | content: Any 22 | role: str = "assistant" 23 | 24 | 25 | @dataclass 26 | class MessageEvent: 27 | """Represents an input event for the AI model.""" 28 | 29 | content: Any 30 | role: str = "user" 31 | tool_calls: Optional[List[ToolCall]] = None 32 | 33 | 34 | @dataclass 35 | class ChoiceEvent: 36 | """Represents a completion event for the AI model.""" 37 | 38 | index: int 39 | message: CompletionMessage 40 | finish_reason: str = "unknown" 41 | tool_calls: Optional[List[ToolCall]] = None 42 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, List, Literal, Optional, TypedDict 3 | 4 | 5 | class _FunctionToolCall(TypedDict): 6 | function_name: str 7 | arguments: Optional[dict[str, Any]] 8 | 9 | 10 | class ToolCall(TypedDict): 11 | """Represents a tool call in the AI model.""" 12 | 13 | id: str 14 | function: _FunctionToolCall 15 | type: Literal["function"] 16 | 17 | 18 | class CompletionMessage(TypedDict): 19 | """Represents a message in the AI model.""" 20 | 21 | content: Any 22 | role: str = "assistant" 23 | 24 | 25 | @dataclass 26 | class MessageEvent: 27 | """Represents an input event for the AI model.""" 28 | 29 | content: Any 30 | role: str = "user" 31 | tool_calls: Optional[List[ToolCall]] = None 32 | 33 | 34 | @dataclass 35 | class ChoiceEvent: 36 | """Represents a completion event for the AI model.""" 37 | 38 | index: int 39 | message: CompletionMessage 40 | finish_reason: str = "unknown" 41 | tool_calls: Optional[List[ToolCall]] = None 42 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, List, Literal, Optional, TypedDict 3 | 4 | 5 | class _FunctionToolCall(TypedDict): 6 | function_name: str 7 | arguments: Optional[dict[str, Any]] 8 | 9 | 10 | class ToolCall(TypedDict): 11 | """Represents a tool call in the AI model.""" 12 | 13 | id: str 14 | function: _FunctionToolCall 15 | type: Literal["function"] 16 | 17 | 18 | class CompletionMessage(TypedDict): 19 | """Represents a message in the AI model.""" 20 | 21 | content: Any 22 | role: str = "assistant" 23 | 24 | 25 | @dataclass 26 | class MessageEvent: 27 | """Represents an input event for the AI model.""" 28 | 29 | content: Any 30 | role: str = "user" 31 | tool_calls: Optional[List[ToolCall]] = None 32 | 33 | 34 | @dataclass 35 | class ChoiceEvent: 36 | """Represents a completion event for the AI model.""" 37 | 38 | index: int 39 | message: CompletionMessage 40 | finish_reason: str = "unknown" 41 | tool_calls: Optional[List[ToolCall]] = None 42 | -------------------------------------------------------------------------------- /examples/fastapi-app/src/llm.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | import os 3 | from dotenv import load_dotenv 4 | from schemas import Ticket, TicketCategory, TicketClassification 5 | 6 | load_dotenv(override=True) 7 | 8 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) 9 | 10 | 11 | def model_classify_ticket(ticket: Ticket) -> TicketClassification: 12 | response = client.beta.chat.completions.parse( 13 | model="gpt-4.1-nano", 14 | response_format=TicketClassification, 15 | messages=[ 16 | { 17 | "role": "system", 18 | "content": """You are a support ticket classifier. 19 | Classify the ticket that a user sends you""", 20 | }, 21 | { 22 | "role": "user", 23 | "content": f"""Title: {ticket.title} 24 | Description: {ticket.description} 25 | Customer Email: {ticket.customer_email}""", 26 | }, 27 | ], 28 | ) 29 | 30 | return response.choices[0].message.parsed or TicketClassification( 31 | category=TicketCategory.OTHER, 32 | reasoning=response.choices[0].message.content, 33 | ) 34 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/synchronous/resources/base.py: -------------------------------------------------------------------------------- 1 | """Base class for resource objects.""" 2 | 3 | import httpx 4 | 5 | 6 | class BaseResource: 7 | """Base class for all API resources.""" 8 | 9 | def __init__(self, client: httpx.Client, base_url: str, project_api_key: str): 10 | """Initialize the resource. 11 | 12 | Args: 13 | client (httpx.Client): HTTP client instance 14 | base_url (str): Base URL for the API 15 | project_api_key (str): Project API key 16 | """ 17 | self._client = client 18 | self._base_url = base_url 19 | self._project_api_key = project_api_key 20 | 21 | def _headers(self) -> dict[str, str]: 22 | """Generate request headers with authentication. 23 | 24 | Returns: 25 | dict[str, str]: Headers dictionary 26 | """ 27 | assert self._project_api_key is not None, "Project API key is not set" 28 | return { 29 | "Authorization": "Bearer " + self._project_api_key, 30 | "Content-Type": "application/json", 31 | "Accept": "application/json", 32 | } 33 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/asynchronous/resources/base.py: -------------------------------------------------------------------------------- 1 | """Base class for resource objects.""" 2 | 3 | import httpx 4 | 5 | 6 | class BaseAsyncResource: 7 | """Base class for all API resources.""" 8 | 9 | def __init__(self, client: httpx.AsyncClient, base_url: str, project_api_key: str): 10 | """Initialize the resource. 11 | 12 | Args: 13 | client (httpx.AsyncClient): HTTP client instance 14 | base_url (str): Base URL for the API 15 | project_api_key (str): Project API key 16 | """ 17 | self._client = client 18 | self._base_url = base_url 19 | self._project_api_key = project_api_key 20 | 21 | def _headers(self) -> dict[str, str]: 22 | """Generate request headers with authentication. 23 | 24 | Returns: 25 | dict[str, str]: Headers dictionary 26 | """ 27 | assert self._project_api_key is not None, "Project API key is not set" 28 | return { 29 | "Authorization": "Bearer " + self._project_api_key, 30 | "Content-Type": "application/json", 31 | "Accept": "application/json", 32 | } 33 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_claude_agent/test_query_with_alias.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter 4 | from claude_agent_sdk import ClaudeAgentOptions, query as claude_query 5 | 6 | from mock_transport import MockClaudeTransport 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_claude_agent_query(span_exporter: InMemorySpanExporter): 11 | options = ClaudeAgentOptions( 12 | model="claude-sonnet-4-5", 13 | system_prompt="You are an expert software engineer.", 14 | permission_mode="acceptEdits", 15 | ) 16 | 17 | async for message in claude_query( 18 | prompt="What is the capital of France?", 19 | options=options, 20 | transport=MockClaudeTransport( 21 | auto_respond_on_connect=True, close_after_responses=True 22 | ), 23 | ): 24 | pass 25 | 26 | spans = span_exporter.get_finished_spans() 27 | assert len(spans) == 1 28 | assert spans[0].name == "query" 29 | assert spans[0].attributes["lmnr.span.path"] == ("query",) 30 | assert "Paris" in spans[0].attributes["lmnr.span.output"] 31 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_claude_agent/test_query.py: -------------------------------------------------------------------------------- 1 | import claude_agent_sdk 2 | import pytest 3 | 4 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter 5 | from claude_agent_sdk import ClaudeAgentOptions 6 | 7 | from mock_transport import MockClaudeTransport 8 | 9 | 10 | @pytest.mark.asyncio 11 | async def test_claude_agent_query(span_exporter: InMemorySpanExporter): 12 | options = ClaudeAgentOptions( 13 | model="claude-sonnet-4-5", 14 | system_prompt="You are an expert software engineer.", 15 | permission_mode="acceptEdits", 16 | ) 17 | 18 | async for message in claude_agent_sdk.query( 19 | prompt="What is the capital of France?", 20 | options=options, 21 | transport=MockClaudeTransport( 22 | auto_respond_on_connect=True, close_after_responses=True 23 | ), 24 | ): 25 | pass 26 | 27 | spans = span_exporter.get_finished_spans() 28 | assert len(spans) == 1 29 | assert spans[0].name == "query" 30 | assert spans[0].attributes["lmnr.span.path"] == ("query",) 31 | assert "Paris" in spans[0].attributes["lmnr.span.output"] 32 | -------------------------------------------------------------------------------- /.github/workflows/ensure-version-match.yml: -------------------------------------------------------------------------------- 1 | name: Ensure Version Match Between pyproject.toml and src/lmnr/version.py 2 | 3 | on: 4 | pull_request: 5 | types: [opened, synchronize] 6 | branches: ["main"] 7 | 8 | 9 | jobs: 10 | ensure-version-match: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4 15 | - name: Install uv 16 | uses: astral-sh/setup-uv@v6 17 | with: 18 | activate-environment: true 19 | - name: Install toml-cli 20 | run: uv add toml-cli 21 | - name: Ensure version match 22 | run: | 23 | SDK_VERSION=$(cat src/lmnr/version.py | grep __version__ | head -n1 | cut -d'=' -f2 | sed 's/[" '"'"']//g') 24 | PYPROJECT_VERSION=$(uv run toml get --toml-path=pyproject.toml project.version) 25 | if [ "$SDK_VERSION" != "$PYPROJECT_VERSION" ]; then 26 | echo "Version mismatch between src/lmnr/version.py and pyproject.toml" 27 | echo "LIB_VERSION: $LIB_VERSION" 28 | echo "PYPROJECT_VERSION: $PYPROJECT_VERSION" 29 | exit 1 30 | fi 31 | echo "Version match between src/lmnr/version.py and pyproject.toml" 32 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/utils.py: -------------------------------------------------------------------------------- 1 | import httpx 2 | from opentelemetry.sdk.trace import Span 3 | from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator 4 | from opentelemetry.trace.propagation import get_current_span 5 | from unittest.mock import MagicMock 6 | 7 | 8 | # from: https://stackoverflow.com/a/41599695/2749989 9 | def spy_decorator(method_to_decorate): 10 | mock = MagicMock() 11 | 12 | def wrapper(self, *args, **kwargs): 13 | mock(*args, **kwargs) 14 | return method_to_decorate(self, *args, **kwargs) 15 | 16 | wrapper.mock = mock 17 | return wrapper 18 | 19 | 20 | def assert_request_contains_tracecontext(request: httpx.Request, expected_span: Span): 21 | assert TraceContextTextMapPropagator._TRACEPARENT_HEADER_NAME in request.headers 22 | ctx = TraceContextTextMapPropagator().extract(request.headers) 23 | request_span_context = get_current_span(ctx).get_span_context() 24 | expected_span_context = expected_span.get_span_context() 25 | 26 | assert request_span_context.trace_id == expected_span_context.trace_id 27 | assert request_span_context.span_id == expected_span_context.span_id 28 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | runs-on: ubuntu-latest 21 | environment: 22 | name: pypi 23 | url: https://pypi.org/p/lmnr/ 24 | permissions: 25 | id-token: write 26 | steps: 27 | - uses: actions/checkout@v4 28 | - name: Set up Python 29 | uses: actions/setup-python@v5 30 | with: 31 | python-version: '3.10' 32 | - name: Install dependencies 33 | run: | 34 | python -m pip install --upgrade pip 35 | pip install build 36 | - name: Build package 37 | run: python -m build 38 | - name: Publish package 39 | uses: pypa/gh-action-pypi-publish@release/v1 40 | -------------------------------------------------------------------------------- /src/lmnr/__init__.py: -------------------------------------------------------------------------------- 1 | from .sdk.client.synchronous.sync_client import LaminarClient 2 | from .sdk.client.asynchronous.async_client import AsyncLaminarClient 3 | from .sdk.datasets import EvaluationDataset, LaminarDataset 4 | from .sdk.evaluations import evaluate 5 | from .sdk.laminar import Laminar 6 | from .sdk.types import SessionRecordingOptions, MaskInputOptions 7 | from .sdk.types import HumanEvaluator 8 | from .sdk.decorators import observe 9 | from .sdk.types import LaminarSpanContext 10 | from .opentelemetry_lib.litellm import LaminarLiteLLMCallback 11 | from .opentelemetry_lib.tracing.attributes import Attributes 12 | from .opentelemetry_lib.tracing.instruments import Instruments 13 | from .opentelemetry_lib.tracing.processor import LaminarSpanProcessor 14 | from .opentelemetry_lib.tracing.tracer import get_laminar_tracer_provider, get_tracer 15 | 16 | __all__ = [ 17 | "AsyncLaminarClient", 18 | "Attributes", 19 | "EvaluationDataset", 20 | "HumanEvaluator", 21 | "Instruments", 22 | "Laminar", 23 | "LaminarClient", 24 | "LaminarDataset", 25 | "LaminarLiteLLMCallback", 26 | "LaminarSpanContext", 27 | "LaminarSpanProcessor", 28 | "get_laminar_tracer_provider", 29 | "get_tracer", 30 | "evaluate", 31 | "observe", 32 | "SessionRecordingOptions", 33 | "MaskInputOptions", 34 | ] 35 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/synchronous/resources/browser_events.py: -------------------------------------------------------------------------------- 1 | """Resource for sending browser events.""" 2 | 3 | import gzip 4 | import json 5 | 6 | from lmnr.sdk.client.synchronous.resources.base import BaseResource 7 | 8 | from lmnr.version import PYTHON_VERSION, __version__ 9 | 10 | 11 | class BrowserEvents(BaseResource): 12 | """Resource for sending browser events.""" 13 | 14 | def send( 15 | self, 16 | session_id: str, 17 | trace_id: str, 18 | events: list[dict], 19 | ): 20 | url = self._base_url + "/v1/browser-sessions/events" 21 | payload = { 22 | "sessionId": session_id, 23 | "traceId": trace_id, 24 | "events": events, 25 | "source": f"python@{PYTHON_VERSION}", 26 | "sdkVersion": __version__, 27 | } 28 | compressed_payload = gzip.compress(json.dumps(payload).encode("utf-8")) 29 | response = self._client.post( 30 | url, 31 | content=compressed_payload, 32 | headers={ 33 | **self._headers(), 34 | "Content-Encoding": "gzip", 35 | }, 36 | ) 37 | if response.status_code != 200: 38 | raise ValueError( 39 | f"Failed to send events: [{response.status_code}] {response.text}" 40 | ) 41 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/asynchronous/resources/browser_events.py: -------------------------------------------------------------------------------- 1 | """Resource for sending browser events.""" 2 | 3 | import gzip 4 | import json 5 | 6 | from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource 7 | 8 | from lmnr.version import PYTHON_VERSION, __version__ 9 | 10 | 11 | class AsyncBrowserEvents(BaseAsyncResource): 12 | """Resource for sending browser events.""" 13 | 14 | async def send( 15 | self, 16 | session_id: str, 17 | trace_id: str, 18 | events: list[dict], 19 | ): 20 | url = self._base_url + "/v1/browser-sessions/events" 21 | payload = { 22 | "sessionId": session_id, 23 | "traceId": trace_id, 24 | "events": events, 25 | "source": f"python@{PYTHON_VERSION}", 26 | "sdkVersion": __version__, 27 | } 28 | 29 | compressed_payload = gzip.compress(json.dumps(payload).encode("utf-8")) 30 | response = await self._client.post( 31 | url, 32 | content=compressed_payload, 33 | headers={ 34 | **self._headers(), 35 | "Content-Encoding": "gzip", 36 | }, 37 | ) 38 | if response.status_code != 200: 39 | raise ValueError( 40 | f"Failed to send events: [{response.status_code}] {response.text}" 41 | ) 42 | -------------------------------------------------------------------------------- /src/lmnr/cli/rules.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import urllib.request 3 | import urllib.error 4 | import sys 5 | 6 | 7 | from lmnr.sdk.log import get_default_logger 8 | 9 | LOG = get_default_logger(__name__) 10 | 11 | 12 | def add_cursor_rules() -> None: 13 | """Download laminar.mdc file from a hardcoded public URL and save it to .cursor/rules/laminar.mdc""" 14 | # Hardcoded URL for the laminar.mdc file 15 | url = "https://raw.githubusercontent.com/lmnr-ai/lmnr/dev/rules/laminar.mdc" 16 | 17 | # Create .cursor/rules directory if it doesn't exist 18 | rules_dir = Path(".cursor/rules") 19 | rules_dir.mkdir(parents=True, exist_ok=True) 20 | 21 | # Define the target file path 22 | target_file = rules_dir / "laminar.mdc" 23 | 24 | try: 25 | LOG.info(f"Downloading laminar.mdc from {url}") 26 | 27 | # Download the file 28 | with urllib.request.urlopen(url) as response: 29 | content = response.read() 30 | 31 | # Write the content to the target file (this will overwrite if it exists) 32 | with open(target_file, "wb") as f: 33 | f.write(content) 34 | 35 | LOG.info(f"Successfully downloaded laminar.mdc to {target_file}") 36 | 37 | except urllib.error.URLError as e: 38 | LOG.error(f"Failed to download file from {url}: {e}") 39 | sys.exit(1) 40 | except Exception as e: 41 | LOG.error(f"Unexpected error: {e}") 42 | sys.exit(1) 43 | -------------------------------------------------------------------------------- /src/lmnr/version.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import httpx 3 | from packaging import version 4 | 5 | 6 | __version__ = "0.7.24" 7 | PYTHON_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}" 8 | 9 | 10 | def is_latest_version() -> bool: 11 | try: 12 | return version.parse(__version__) >= version.parse(get_latest_pypi_version()) 13 | except Exception: 14 | return True 15 | 16 | 17 | def get_latest_pypi_version() -> str: 18 | """ 19 | Get the latest stable version of lmnr package from PyPI. 20 | Returns the version string or raises an exception if unable to fetch. 21 | """ 22 | try: 23 | response = httpx.get("https://pypi.org/pypi/lmnr/json") 24 | response.raise_for_status() 25 | 26 | releases = response.json()["releases"] 27 | stable_versions = [ 28 | ver 29 | for ver in releases.keys() 30 | if not version.parse(ver).is_prerelease 31 | and not version.parse(ver).is_devrelease 32 | and not any(release.get("yanked", False) for release in releases[ver]) 33 | ] 34 | 35 | if not stable_versions: 36 | # do not scare the user, assume they are on 37 | # latest version 38 | return __version__ 39 | 40 | latest_version = max(stable_versions, key=version.parse) 41 | return latest_version 42 | 43 | except Exception: 44 | # do not scare the user, assume they are on 45 | # latest version 46 | return __version__ 47 | -------------------------------------------------------------------------------- /examples/fastapi-app/README.md: -------------------------------------------------------------------------------- 1 | # Example Fast API app instrumented with Laminar 2 | 3 | ## Installation 4 | 5 | ### 1. Clone the repository 6 | 7 | ``` 8 | git clone https://github.com/lmnr-ai/lmnr-python 9 | ``` 10 | 11 | ### 2. Open the directory 12 | 13 | ``` 14 | cd lmnr-python/examples/fastapi-app 15 | ``` 16 | 17 | ### 3. Set up the environment variables 18 | 19 | ``` 20 | cp .env.example .env 21 | ``` 22 | 23 | And then fill in the `.env` file. Get [Laminar project API key](https://docs.lmnr.ai/tracing/introduction#2-initialize-laminar-in-your-application). Get [OpenAI API key](https://platform.openai.com/api-keys) 24 | 25 | ### 4. Install the dependencies 26 | 27 | ``` 28 | uv venv 29 | ``` 30 | 31 | ``` 32 | source .venv/bin/activate 33 | ``` 34 | 35 | ``` 36 | uv lock && uv sync 37 | ``` 38 | 39 | You may use `pip` or any other dependency manager of your choice instead of `uv`. 40 | 41 | ## Run the app 42 | 43 | ``` 44 | fastapi dev src/main.py --port 8011 45 | ``` 46 | 47 | ## Test the call with curl 48 | 49 | ``` 50 | curl --location 'localhost:8011/api/v1/tickets/classify' \ 51 | --header 'Content-Type: application/json' \ 52 | --data-raw '{ 53 | "title": "Can'\''t access my account", 54 | "description": "I'\''ve been trying to log in for the past hour but keep getting an error message", 55 | "customer_email": "user@example.com" 56 | }' 57 | ``` 58 | 59 | ## See the results on Laminar dashboard 60 | 61 | In your browser, open https://www.lmnr.ai, navigate to your project's traces page, and you will see the auto-instrumented OpenAI span 62 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/test_exceptions.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import pytest 4 | 5 | 6 | def test_inner_exception_isnt_caught(openai_client): 7 | should_wrap = True 8 | 9 | def throw_exception(f): 10 | @functools.wraps(f) 11 | def wrapper(*args, **kwargs): 12 | if should_wrap: 13 | raise Exception("Test exception") 14 | else: 15 | return f(*args, **kwargs) 16 | raise Exception("Test exception") 17 | 18 | return wrapper 19 | 20 | with pytest.raises(Exception): 21 | throw_exception(openai_client.chat.completions.create)( 22 | model="gpt-3.5-turbo", 23 | messages=[ 24 | {"role": "user", "content": "Tell me a joke about opentelemetry"} 25 | ], 26 | ) 27 | 28 | should_wrap = False 29 | 30 | 31 | @pytest.mark.vcr 32 | def test_exception_in_instrumentation_suppressed(openai_client): 33 | should_wrap = True 34 | 35 | def scramble_response(f): 36 | @functools.wraps(f) 37 | def wrapper(*args, **kwargs): 38 | if should_wrap: 39 | response = f(*args, **kwargs) 40 | response = {} 41 | return response 42 | else: 43 | return f(*args, **kwargs) 44 | 45 | return wrapper 46 | 47 | scramble_response(openai_client.chat.completions.create)( 48 | model="gpt-3.5-turbo", 49 | messages=[{"role": "user", "content": "Tell me a joke about opentelemetry"}], 50 | ) 51 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py: -------------------------------------------------------------------------------- 1 | # import base64 2 | import base64 3 | from copy import deepcopy 4 | from typing import Any 5 | 6 | from lmnr.opentelemetry_lib.decorators import json_dumps 7 | from pydantic import BaseModel 8 | 9 | 10 | def screenshot_tool_output_formatter(output: Any) -> str: 11 | # output is of type BinaryAPIResponse, which implements 12 | # the iter_bytes method from httpx.Response 13 | 14 | return "" 15 | # The below implementation works, but it may consume the entire iterator, 16 | # making the response unusable after the formatter is called. 17 | # This is UNLESS somewhere in code output.read() (httpx.Response.read()) 18 | # is called. 19 | # We cannot rely on that now, so we return a placeholder. 20 | # response_bytes = [] 21 | # for chunk in output.iter_bytes(): 22 | # response_bytes.append(chunk) 23 | # response_base64 = base64.b64encode(response_bytes).decode("utf-8") 24 | # return f"data:image/png;base64,{response_base64}" 25 | 26 | 27 | def process_tool_output_formatter(output: Any) -> str: 28 | if not isinstance(output, (dict, BaseModel)): 29 | return json_dumps(output) 30 | 31 | output = output.model_dump() if isinstance(output, BaseModel) else deepcopy(output) 32 | if "stderr_b64" in output: 33 | output["stderr"] = base64.b64decode(output["stderr_b64"]).decode("utf-8") 34 | if "stdout_b64" in output: 35 | output["stdout"] = base64.b64decode(output["stdout_b64"]).decode("utf-8") 36 | return json_dumps(output) 37 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import traceback 3 | 4 | import pydantic 5 | from opentelemetry.trace import Span 6 | from typing import Any 7 | 8 | 9 | def set_span_attribute(span: Span, name: str, value: str): 10 | if value is not None: 11 | if value != "": 12 | span.set_attribute(name, value) 13 | return 14 | 15 | 16 | def dont_throw(func): 17 | """ 18 | A decorator that wraps the passed in function and logs exceptions instead of throwing them. 19 | 20 | @param func: The function to wrap 21 | @return: The wrapper function 22 | """ 23 | # Obtain a logger specific to the function's module 24 | logger = logging.getLogger(func.__module__) 25 | 26 | def wrapper(*args, **kwargs): 27 | try: 28 | return func(*args, **kwargs) 29 | except Exception: 30 | logger.debug( 31 | "Laminar failed to trace in %s, error: %s", 32 | func.__name__, 33 | traceback.format_exc(), 34 | ) 35 | 36 | return wrapper 37 | 38 | 39 | def to_dict(obj: pydantic.BaseModel | dict) -> dict[str, Any]: 40 | try: 41 | if isinstance(obj, pydantic.BaseModel): 42 | return obj.model_dump() 43 | elif isinstance(obj, dict): 44 | return obj 45 | else: 46 | return dict(obj) 47 | except Exception: 48 | return dict(obj) 49 | 50 | 51 | def with_tracer_wrapper(func): 52 | """Helper for providing tracer for wrapper functions.""" 53 | 54 | def _with_tracer(tracer, to_wrap): 55 | def wrapper(wrapped, instance, args, kwargs): 56 | return func(tracer, to_wrap, wrapped, instance, args, kwargs) 57 | 58 | return wrapper 59 | 60 | return _with_tracer 61 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/test_embedding_metrics_handler.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest.mock import MagicMock, patch 3 | from lmnr.opentelemetry_lib.opentelemetry.instrumentation.openai.shared.embeddings_wrappers import ( 4 | _set_embeddings_metrics, 5 | ) 6 | 7 | 8 | @pytest.mark.vcr 9 | def test_set_embeddings_metrics_handles_none_values(): 10 | # Mock the necessary arguments 11 | instance = MagicMock() 12 | token_counter = MagicMock() 13 | vector_size_counter = MagicMock() 14 | duration_histogram = MagicMock() 15 | response_dict = { 16 | "model": "text-embedding-ada-002", 17 | "usage": { 18 | "prompt_tokens": None, 19 | "completion_tokens": 10, 20 | }, 21 | "data": [{"embedding": [0.1, 0.2, 0.3]}], 22 | } 23 | duration = 1.23 24 | 25 | expected_attributes = { 26 | "gen_ai.system": "openai", 27 | "gen_ai.response.model": "text-embedding-ada-002", 28 | "gen_ai.operation.name": "embeddings", 29 | "server.address": "", 30 | "stream": False, 31 | "gen_ai.token.type": "output", 32 | } 33 | 34 | with patch("logging.error") as mock_logging_error: 35 | _set_embeddings_metrics( 36 | instance, 37 | token_counter, 38 | vector_size_counter, 39 | duration_histogram, 40 | response_dict, 41 | duration, 42 | ) 43 | 44 | # Check that logging.error was called for the None value 45 | mock_logging_error.assert_called_with( 46 | "Received None value for prompt_tokens in usage" 47 | ) 48 | 49 | # Ensure token_counter.record was called with the correct attributes 50 | token_counter.record.assert_called_once_with(10, attributes=expected_attributes) 51 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/tracing/tracer.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | from typing import Generator, Iterator, Tuple 3 | 4 | from opentelemetry import trace 5 | from opentelemetry.context import Context 6 | from lmnr.opentelemetry_lib.tracing import TracerWrapper 7 | from lmnr.opentelemetry_lib.tracing.span import LaminarSpan 8 | 9 | 10 | def get_laminar_tracer_provider() -> trace.TracerProvider: 11 | return TracerWrapper.instance.__tracer_provider or trace.get_tracer_provider() 12 | 13 | 14 | @contextmanager 15 | def get_tracer(flush_on_exit: bool = False): 16 | wrapper = TracerWrapper() 17 | try: 18 | yield LaminarTracer(wrapper.get_tracer()) 19 | finally: 20 | if flush_on_exit: 21 | wrapper.flush() 22 | 23 | 24 | @contextmanager 25 | def get_tracer_with_context( 26 | flush_on_exit: bool = False, 27 | ) -> Generator[Tuple[trace.Tracer, Context], None, None]: 28 | """Get tracer with isolated context. Returns (tracer, context) tuple.""" 29 | wrapper = TracerWrapper() 30 | try: 31 | tracer = LaminarTracer(wrapper.get_tracer()) 32 | context = wrapper.get_isolated_context() 33 | yield tracer, context 34 | finally: 35 | if flush_on_exit: 36 | wrapper.flush() 37 | 38 | 39 | class LaminarTracer(trace.Tracer): 40 | _instance: trace.Tracer 41 | 42 | def __init__(self, instance: trace.Tracer): 43 | self._instance = instance 44 | 45 | def start_span(self, *args, **kwargs) -> trace.Span: 46 | span = LaminarSpan(self._instance.start_span(*args, **kwargs)) 47 | return span 48 | 49 | @contextmanager 50 | def start_as_current_span(self, *args, **kwargs) -> Iterator[trace.Span]: 51 | with self._instance.start_as_current_span(*args, **kwargs) as span: 52 | yield LaminarSpan(span) 53 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_claude_agent/test_claude_sdk_client.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter 4 | from claude_agent_sdk import ClaudeSDKClient 5 | 6 | from mock_transport import MockClaudeTransport 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_claude_agent_query(span_exporter: InMemorySpanExporter): 11 | async with ClaudeSDKClient(transport=MockClaudeTransport()) as client: 12 | await client.query("What's the capital of France?") 13 | async for _ in client.receive_response(): 14 | pass 15 | 16 | await client.query("What's the population of that city?") 17 | async for _ in client.receive_response(): 18 | pass 19 | 20 | spans_tuple = span_exporter.get_finished_spans() 21 | spans = sorted(list(spans_tuple), key=lambda x: x.start_time) 22 | 23 | assert len(spans) == 8 24 | assert spans[0].name == "ClaudeSDKClient.connect" 25 | assert spans[1].name == "ClaudeSDKClient.query" 26 | assert spans[2].name == "ClaudeSDKClient.receive_response" 27 | assert spans[3].name == "ClaudeSDKClient.receive_messages" 28 | assert spans[4].name == "ClaudeSDKClient.query" 29 | assert spans[5].name == "ClaudeSDKClient.receive_response" 30 | assert spans[6].name == "ClaudeSDKClient.receive_messages" 31 | assert spans[7].name == "ClaudeSDKClient.disconnect" 32 | 33 | assert spans[1].attributes["lmnr.span.path"] == ("ClaudeSDKClient.query",) 34 | assert ( 35 | spans[1].attributes["lmnr.span.input"] 36 | == '{"prompt":"What\'s the capital of France?"}' 37 | ) 38 | assert spans[3].parent.trace_id == spans[2].context.trace_id 39 | assert spans[3].parent.span_id == spans[2].context.span_id 40 | assert "million" in str(spans[6].attributes["lmnr.span.output"]) 41 | -------------------------------------------------------------------------------- /tests/test_races/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Separate conftest for race condition testing. 3 | 4 | This file provides fixtures that don't interfere with race condition tests 5 | by avoiding global tracer initialization, but properly restores state 6 | after tests complete. 7 | 8 | To use this instead of the main conftest.py, run pytest with: 9 | pytest --confcutdir=tests/conftest_race_testing.py tests/test_tracer_wrapper_race_condition.py 10 | """ 11 | 12 | import pytest 13 | from lmnr.opentelemetry_lib.tracing import TracerWrapper 14 | from lmnr.opentelemetry_lib import TracerManager 15 | 16 | 17 | @pytest.fixture(scope="function", autouse=True) 18 | def clean_tracer_state(): 19 | """Clean tracer state before and after each test, properly saving/restoring global state.""" 20 | # Save current state before clearing 21 | saved_wrapper_instance = getattr(TracerWrapper, "instance", None) 22 | saved_manager_wrapper = getattr( 23 | TracerManager, "_TracerManager__tracer_wrapper", None 24 | ) 25 | 26 | # Clean before test 27 | if hasattr(TracerWrapper, "instance"): 28 | delattr(TracerWrapper, "instance") 29 | 30 | if hasattr(TracerManager, "_TracerManager__tracer_wrapper"): 31 | delattr(TracerManager, "_TracerManager__tracer_wrapper") 32 | 33 | yield 34 | 35 | # Clean after test 36 | if hasattr(TracerWrapper, "instance"): 37 | delattr(TracerWrapper, "instance") 38 | 39 | if hasattr(TracerManager, "_TracerManager__tracer_wrapper"): 40 | delattr(TracerManager, "_TracerManager__tracer_wrapper") 41 | 42 | # Restore saved state to avoid breaking other tests 43 | if saved_wrapper_instance is not None: 44 | TracerWrapper.instance = saved_wrapper_instance 45 | 46 | if saved_manager_wrapper is not None: 47 | TracerManager._TracerManager__tracer_wrapper = saved_manager_wrapper 48 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/tracing/attributes.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( 3 | GEN_AI_SYSTEM, 4 | GEN_AI_REQUEST_MODEL, 5 | GEN_AI_RESPONSE_MODEL, 6 | GEN_AI_USAGE_INPUT_TOKENS, 7 | GEN_AI_USAGE_OUTPUT_TOKENS, 8 | GEN_AI_RESPONSE_ID, 9 | ) 10 | from opentelemetry.semconv_ai import SpanAttributes 11 | 12 | SPAN_INPUT = "lmnr.span.input" 13 | SPAN_OUTPUT = "lmnr.span.output" 14 | SPAN_TYPE = "lmnr.span.type" 15 | SPAN_PATH = "lmnr.span.path" 16 | SPAN_IDS_PATH = "lmnr.span.ids_path" 17 | PARENT_SPAN_PATH = "lmnr.span.parent_path" 18 | PARENT_SPAN_IDS_PATH = "lmnr.span.parent_ids_path" 19 | SPAN_INSTRUMENTATION_SOURCE = "lmnr.span.instrumentation_source" 20 | SPAN_SDK_VERSION = "lmnr.span.sdk_version" 21 | SPAN_LANGUAGE_VERSION = "lmnr.span.language_version" 22 | HUMAN_EVALUATOR_OPTIONS = "lmnr.span.human_evaluator_options" 23 | 24 | ASSOCIATION_PROPERTIES = "lmnr.association.properties" 25 | SESSION_ID = "session_id" 26 | USER_ID = "user_id" 27 | TRACE_TYPE = "trace_type" 28 | TRACING_LEVEL = "tracing_level" 29 | 30 | 31 | # exposed to the user, configurable 32 | class Attributes(Enum): 33 | # == This is the minimum set of attributes for a proper LLM span == 34 | # 35 | INPUT_TOKEN_COUNT = GEN_AI_USAGE_INPUT_TOKENS 36 | OUTPUT_TOKEN_COUNT = GEN_AI_USAGE_OUTPUT_TOKENS 37 | TOTAL_TOKEN_COUNT = SpanAttributes.LLM_USAGE_TOTAL_TOKENS 38 | PROVIDER = GEN_AI_SYSTEM 39 | REQUEST_MODEL = GEN_AI_REQUEST_MODEL 40 | RESPONSE_MODEL = GEN_AI_RESPONSE_MODEL 41 | # 42 | ## == End of minimum set == 43 | # == Additional attributes == 44 | # 45 | INPUT_COST = "gen_ai.usage.input_cost" 46 | OUTPUT_COST = "gen_ai.usage.output_cost" 47 | TOTAL_COST = "gen_ai.usage.cost" 48 | RESPONSE_ID = GEN_AI_RESPONSE_ID 49 | # 50 | # == End of additional attributes == 51 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_completions/test_completion_context_propagation.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "meta-llama/Llama-3.2-1B-Instruct", "prompt": "Tell me a joke 4 | about opentelemetry"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '93' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-b7362c76f7461e8f92ed8bbaf6a3fc42-d4fa2ada978b45ce-01 20 | user-agent: 21 | - OpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/completions 40 | response: 41 | body: 42 | string: '{"id":"cmpl-2996bf68f7f142fa817bdd32af678df9","object":"text_completion","created":1732108316,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"text":"\n\nI 43 | want to share an interesting story about opentelemetry. I''d like","logprobs":null,"finish_reason":"length","stop_reason":null}],"usage":{"prompt_tokens":9,"total_tokens":25,"completion_tokens":16}}' 44 | headers: 45 | content-length: 46 | - '370' 47 | content-type: 48 | - application/json 49 | date: 50 | - Wed, 20 Nov 2024 13:11:55 GMT 51 | server: 52 | - uvicorn 53 | status: 54 | code: 200 55 | message: OK 56 | version: 1 57 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_completions/test_completion_context_propagation_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "meta-llama/Llama-3.2-1B-Instruct", "prompt": "Tell me a joke 4 | about opentelemetry"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '93' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-b7362c76f7461e8f92ed8bbaf6a3fc42-d4fa2ada978b45ce-01 20 | user-agent: 21 | - OpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/completions 40 | response: 41 | body: 42 | string: '{"id":"cmpl-2996bf68f7f142fa817bdd32af678df9","object":"text_completion","created":1732108316,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"text":"\n\nI 43 | want to share an interesting story about opentelemetry. I''d like","logprobs":null,"finish_reason":"length","stop_reason":null}],"usage":{"prompt_tokens":9,"total_tokens":25,"completion_tokens":16}}' 44 | headers: 45 | content-length: 46 | - '370' 47 | content-type: 48 | - application/json 49 | date: 50 | - Wed, 20 Nov 2024 13:11:55 GMT 51 | server: 52 | - uvicorn 53 | status: 54 | code: 200 55 | message: OK 56 | version: 1 57 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_completions/test_async_completion_context_propagation.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "meta-llama/Llama-3.2-1B-Instruct", "prompt": "Tell me a joke 4 | about opentelemetry"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '93' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-18eb7f064f1a21facfe00d2f261b2d10-a06931c3054de060-01 20 | user-agent: 21 | - AsyncOpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - async:asyncio 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/completions 40 | response: 41 | body: 42 | string: '{"id":"cmpl-4acc6171f6c34008af07ca8490da3b95","object":"text_completion","created":1732108316,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"text":"\n\nThere 43 | was a meter in a company that wanted to see improvement in the efficiency","logprobs":null,"finish_reason":"length","stop_reason":null}],"usage":{"prompt_tokens":9,"total_tokens":25,"completion_tokens":16}}' 44 | headers: 45 | content-length: 46 | - '383' 47 | content-type: 48 | - application/json 49 | date: 50 | - Wed, 20 Nov 2024 13:11:55 GMT 51 | server: 52 | - uvicorn 53 | status: 54 | code: 200 55 | message: OK 56 | version: 1 57 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_completions/test_completion_context_propagation_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "meta-llama/Llama-3.2-1B-Instruct", "prompt": "Tell me a joke 4 | about opentelemetry"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '93' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-b7362c76f7461e8f92ed8bbaf6a3fc42-d4fa2ada978b45ce-01 20 | user-agent: 21 | - OpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/completions 40 | response: 41 | body: 42 | string: '{"id":"cmpl-2996bf68f7f142fa817bdd32af678df9","object":"text_completion","created":1732108316,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"text":"\n\nI 43 | want to share an interesting story about opentelemetry. I''d like","logprobs":null,"finish_reason":"length","stop_reason":null}],"usage":{"prompt_tokens":9,"total_tokens":25,"completion_tokens":16}}' 44 | headers: 45 | content-length: 46 | - '370' 47 | content-type: 48 | - application/json 49 | date: 50 | - Wed, 20 Nov 2024 13:11:55 GMT 51 | server: 52 | - uvicorn 53 | status: 54 | code: 200 55 | message: OK 56 | version: 1 57 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_completions/test_async_completion_context_propagation_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "meta-llama/Llama-3.2-1B-Instruct", "prompt": "Tell me a joke 4 | about opentelemetry"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '93' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-18eb7f064f1a21facfe00d2f261b2d10-a06931c3054de060-01 20 | user-agent: 21 | - AsyncOpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - async:asyncio 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/completions 40 | response: 41 | body: 42 | string: '{"id":"cmpl-4acc6171f6c34008af07ca8490da3b95","object":"text_completion","created":1732108316,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"text":"\n\nThere 43 | was a meter in a company that wanted to see improvement in the efficiency","logprobs":null,"finish_reason":"length","stop_reason":null}],"usage":{"prompt_tokens":9,"total_tokens":25,"completion_tokens":16}}' 44 | headers: 45 | content-length: 46 | - '383' 47 | content-type: 48 | - application/json 49 | date: 50 | - Wed, 20 Nov 2024 13:11:55 GMT 51 | server: 52 | - uvicorn 53 | status: 54 | code: 200 55 | message: OK 56 | version: 1 57 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_completions/test_async_completion_context_propagation_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "meta-llama/Llama-3.2-1B-Instruct", "prompt": "Tell me a joke 4 | about opentelemetry"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '93' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-18eb7f064f1a21facfe00d2f261b2d10-a06931c3054de060-01 20 | user-agent: 21 | - AsyncOpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - async:asyncio 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/completions 40 | response: 41 | body: 42 | string: '{"id":"cmpl-4acc6171f6c34008af07ca8490da3b95","object":"text_completion","created":1732108316,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"text":"\n\nThere 43 | was a meter in a company that wanted to see improvement in the efficiency","logprobs":null,"finish_reason":"length","stop_reason":null}],"usage":{"prompt_tokens":9,"total_tokens":25,"completion_tokens":16}}' 44 | headers: 45 | content-length: 46 | - '383' 47 | content-type: 48 | - application/json 49 | date: 50 | - Wed, 20 Nov 2024 13:11:55 GMT 51 | server: 52 | - uvicorn 53 | status: 54 | code: 200 55 | message: OK 56 | version: 1 57 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_chat/test_chat_async_context_propagation.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "meta-llama/Llama-3.2-1B-Instruct"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '126' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-3b751724494d706cee7ab0146868ded5-c75e2c7466e5fcec-01 20 | user-agent: 21 | - AsyncOpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - async:asyncio 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/chat/completions 40 | response: 41 | body: 42 | string: '{"id":"chat-4db07f02ecae49cbafe1d359db1650df","object":"chat.completion","created":1732108315,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"A 43 | data scientist walks into an openTelemetry adoption meeting and says, \n\n\"I''m 44 | here to help track our progress, but I''m just a trace.\"","tool_calls":[]},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":43,"total_tokens":75,"completion_tokens":32}}' 45 | headers: 46 | content-length: 47 | - '487' 48 | content-type: 49 | - application/json 50 | date: 51 | - Wed, 20 Nov 2024 13:11:55 GMT 52 | server: 53 | - uvicorn 54 | status: 55 | code: 200 56 | message: OK 57 | version: 1 58 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_chat/test_chat_context_propagation.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "meta-llama/Llama-3.2-1B-Instruct"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '126' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-2dee24aaae40f1526fdcd2eb208791ae-74ca2d0c67549b95-01 20 | user-agent: 21 | - OpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/chat/completions 40 | response: 41 | body: 42 | string: '{"id":"chat-43f4347c3299481e9704ab77439fbdb8","object":"chat.completion","created":1732108311,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"Why 43 | did the OpenTelemetry metric go to therapy?\n\nBecause it was feeling a little 44 | \"trapped\" in its logging function, and wanted to \"release\" its stress.","tool_calls":[]},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":43,"total_tokens":79,"completion_tokens":36}}' 45 | headers: 46 | content-length: 47 | - '506' 48 | content-type: 49 | - application/json 50 | date: 51 | - Wed, 20 Nov 2024 13:11:50 GMT 52 | server: 53 | - uvicorn 54 | status: 55 | code: 200 56 | message: OK 57 | version: 1 58 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_chat/test_chat_async_context_propagation_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "meta-llama/Llama-3.2-1B-Instruct"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '126' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-3b751724494d706cee7ab0146868ded5-c75e2c7466e5fcec-01 20 | user-agent: 21 | - AsyncOpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - async:asyncio 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/chat/completions 40 | response: 41 | body: 42 | string: '{"id":"chat-4db07f02ecae49cbafe1d359db1650df","object":"chat.completion","created":1732108315,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"A 43 | data scientist walks into an openTelemetry adoption meeting and says, \n\n\"I''m 44 | here to help track our progress, but I''m just a trace.\"","tool_calls":[]},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":43,"total_tokens":75,"completion_tokens":32}}' 45 | headers: 46 | content-length: 47 | - '487' 48 | content-type: 49 | - application/json 50 | date: 51 | - Wed, 20 Nov 2024 13:11:55 GMT 52 | server: 53 | - uvicorn 54 | status: 55 | code: 200 56 | message: OK 57 | version: 1 58 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_chat/test_chat_context_propagation_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "meta-llama/Llama-3.2-1B-Instruct"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '126' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-2dee24aaae40f1526fdcd2eb208791ae-74ca2d0c67549b95-01 20 | user-agent: 21 | - OpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/chat/completions 40 | response: 41 | body: 42 | string: '{"id":"chat-43f4347c3299481e9704ab77439fbdb8","object":"chat.completion","created":1732108311,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"Why 43 | did the OpenTelemetry metric go to therapy?\n\nBecause it was feeling a little 44 | \"trapped\" in its logging function, and wanted to \"release\" its stress.","tool_calls":[]},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":43,"total_tokens":79,"completion_tokens":36}}' 45 | headers: 46 | content-length: 47 | - '506' 48 | content-type: 49 | - application/json 50 | date: 51 | - Wed, 20 Nov 2024 13:11:50 GMT 52 | server: 53 | - uvicorn 54 | status: 55 | code: 200 56 | message: OK 57 | version: 1 58 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_chat/test_chat_async_context_propagation_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "meta-llama/Llama-3.2-1B-Instruct"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '126' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-3b751724494d706cee7ab0146868ded5-c75e2c7466e5fcec-01 20 | user-agent: 21 | - AsyncOpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - async:asyncio 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/chat/completions 40 | response: 41 | body: 42 | string: '{"id":"chat-4db07f02ecae49cbafe1d359db1650df","object":"chat.completion","created":1732108315,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"A 43 | data scientist walks into an openTelemetry adoption meeting and says, \n\n\"I''m 44 | here to help track our progress, but I''m just a trace.\"","tool_calls":[]},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":43,"total_tokens":75,"completion_tokens":32}}' 45 | headers: 46 | content-length: 47 | - '487' 48 | content-type: 49 | - application/json 50 | date: 51 | - Wed, 20 Nov 2024 13:11:55 GMT 52 | server: 53 | - uvicorn 54 | status: 55 | code: 200 56 | message: OK 57 | version: 1 58 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_chat/test_chat_context_propagation_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "meta-llama/Llama-3.2-1B-Instruct"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '126' 14 | content-type: 15 | - application/json 16 | host: 17 | - localhost:8000 18 | traceparent: 19 | - 00-2dee24aaae40f1526fdcd2eb208791ae-74ca2d0c67549b95-01 20 | user-agent: 21 | - OpenAI/Python 1.51.2 22 | x-stainless-arch: 23 | - x64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Linux 30 | x-stainless-package-version: 31 | - 1.51.2 32 | x-stainless-retry-count: 33 | - '0' 34 | x-stainless-runtime: 35 | - CPython 36 | x-stainless-runtime-version: 37 | - 3.10.12 38 | method: POST 39 | uri: http://localhost:8000/v1/chat/completions 40 | response: 41 | body: 42 | string: '{"id":"chat-43f4347c3299481e9704ab77439fbdb8","object":"chat.completion","created":1732108311,"model":"meta-llama/Llama-3.2-1B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"Why 43 | did the OpenTelemetry metric go to therapy?\n\nBecause it was feeling a little 44 | \"trapped\" in its logging function, and wanted to \"release\" its stress.","tool_calls":[]},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":43,"total_tokens":79,"completion_tokens":36}}' 45 | headers: 46 | content-length: 47 | - '506' 48 | content-type: 49 | - application/json 50 | date: 51 | - Wed, 20 Nov 2024 13:11:50 GMT 52 | server: 53 | - uvicorn 54 | status: 55 | code: 200 56 | message: OK 57 | version: 1 58 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_completion/test_anthropic_completion_legacy.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens_to_sample": 2048, "model": "claude-instant-1.2", "prompt": 4 | "\n\nHuman:\nHello world\n\n\nAssistant:", "top_p": 0.1}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | anthropic-version: 11 | - '2023-06-01' 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '128' 16 | content-type: 17 | - application/json 18 | host: 19 | - api.anthropic.com 20 | user-agent: 21 | - Anthropic/Python 0.21.3 22 | x-stainless-arch: 23 | - other:amd64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Windows 30 | x-stainless-package-version: 31 | - 0.21.3 32 | x-stainless-runtime: 33 | - CPython 34 | x-stainless-runtime-version: 35 | - 3.9.13 36 | method: POST 37 | uri: https://api.anthropic.com/v1/complete 38 | response: 39 | body: 40 | string: !!binary | 41 | H4sIAAAAAAAAA4yOsQrCMBRFf0XvnIp1cMgoLRS6lIJOhRDap1TS92qTiCL+u1gUHB3vgXO4D4T7 42 | SNBoZRgdhV4YCn33JWad5ufjVF2r3NdZvS+z7UF2t1BC/Soai4KckyUUfJDRTGT9zOfl6RKJW4LC 43 | IB25d93Z2FHSsw+WQ5KuNh8VGg03XMTBsoaCk5P558/zBQAA//8DAJPkGKTLAAAA 44 | headers: 45 | CF-Cache-Status: 46 | - DYNAMIC 47 | CF-RAY: 48 | - 87084f286b44103c-LAX 49 | Connection: 50 | - keep-alive 51 | Content-Encoding: 52 | - gzip 53 | Content-Type: 54 | - application/json 55 | Date: 56 | - Sun, 07 Apr 2024 07:29:54 GMT 57 | Server: 58 | - cloudflare 59 | Transfer-Encoding: 60 | - chunked 61 | request-id: 62 | - req_01UeezAJTpSFWGDxF7RbHs7q 63 | via: 64 | - 1.1 google 65 | x-cloud-trace-context: 66 | - f50c80863bf690164a40dada9496b472 67 | status: 68 | code: 200 69 | message: OK 70 | version: 1 71 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_completion/test_anthropic_completion_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens_to_sample": 2048, "model": "claude-instant-1.2", "prompt": 4 | "\n\nHuman:\nHello world\n\n\nAssistant:", "top_p": 0.1}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | anthropic-version: 11 | - '2023-06-01' 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '128' 16 | content-type: 17 | - application/json 18 | host: 19 | - api.anthropic.com 20 | user-agent: 21 | - Anthropic/Python 0.21.3 22 | x-stainless-arch: 23 | - other:amd64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Windows 30 | x-stainless-package-version: 31 | - 0.21.3 32 | x-stainless-runtime: 33 | - CPython 34 | x-stainless-runtime-version: 35 | - 3.9.13 36 | method: POST 37 | uri: https://api.anthropic.com/v1/complete 38 | response: 39 | body: 40 | string: !!binary | 41 | H4sIAAAAAAAAA4yOsQrCMBRFf0XvnIp1cMgoLRS6lIJOhRDap1TS92qTiCL+u1gUHB3vgXO4D4T7 42 | SNBoZRgdhV4YCn33JWad5ufjVF2r3NdZvS+z7UF2t1BC/Soai4KckyUUfJDRTGT9zOfl6RKJW4LC 43 | IB25d93Z2FHSsw+WQ5KuNh8VGg03XMTBsoaCk5P558/zBQAA//8DAJPkGKTLAAAA 44 | headers: 45 | CF-Cache-Status: 46 | - DYNAMIC 47 | CF-RAY: 48 | - 87084f286b44103c-LAX 49 | Connection: 50 | - keep-alive 51 | Content-Encoding: 52 | - gzip 53 | Content-Type: 54 | - application/json 55 | Date: 56 | - Sun, 07 Apr 2024 07:29:54 GMT 57 | Server: 58 | - cloudflare 59 | Transfer-Encoding: 60 | - chunked 61 | request-id: 62 | - req_01UeezAJTpSFWGDxF7RbHs7q 63 | via: 64 | - 1.1 google 65 | x-cloud-trace-context: 66 | - f50c80863bf690164a40dada9496b472 67 | status: 68 | code: 200 69 | message: OK 70 | version: 1 71 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_completion/test_anthropic_completion_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens_to_sample": 2048, "model": "claude-instant-1.2", "prompt": 4 | "\n\nHuman:\nHello world\n\n\nAssistant:", "top_p": 0.1}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | anthropic-version: 11 | - '2023-06-01' 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '128' 16 | content-type: 17 | - application/json 18 | host: 19 | - api.anthropic.com 20 | user-agent: 21 | - Anthropic/Python 0.21.3 22 | x-stainless-arch: 23 | - other:amd64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Windows 30 | x-stainless-package-version: 31 | - 0.21.3 32 | x-stainless-runtime: 33 | - CPython 34 | x-stainless-runtime-version: 35 | - 3.9.13 36 | method: POST 37 | uri: https://api.anthropic.com/v1/complete 38 | response: 39 | body: 40 | string: !!binary | 41 | H4sIAAAAAAAAA4yOsQrCMBRFf0XvnIp1cMgoLRS6lIJOhRDap1TS92qTiCL+u1gUHB3vgXO4D4T7 42 | SNBoZRgdhV4YCn33JWad5ufjVF2r3NdZvS+z7UF2t1BC/Soai4KckyUUfJDRTGT9zOfl6RKJW4LC 43 | IB25d93Z2FHSsw+WQ5KuNh8VGg03XMTBsoaCk5P558/zBQAA//8DAJPkGKTLAAAA 44 | headers: 45 | CF-Cache-Status: 46 | - DYNAMIC 47 | CF-RAY: 48 | - 87084f286b44103c-LAX 49 | Connection: 50 | - keep-alive 51 | Content-Encoding: 52 | - gzip 53 | Content-Type: 54 | - application/json 55 | Date: 56 | - Sun, 07 Apr 2024 07:29:54 GMT 57 | Server: 58 | - cloudflare 59 | Transfer-Encoding: 60 | - chunked 61 | request-id: 62 | - req_01UeezAJTpSFWGDxF7RbHs7q 63 | via: 64 | - 1.1 google 65 | x-cloud-trace-context: 66 | - f50c80863bf690164a40dada9496b472 67 | status: 68 | code: 200 69 | message: OK 70 | version: 1 71 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "What is the capital of France?"}], "role": 4 | "user"}], "systemInstruction": {"parts": [{"text": "Be concise and to the point. 5 | Use tools as much as possible."}], "role": "user"}, "generationConfig": {}}' 6 | headers: 7 | accept: 8 | - '*/*' 9 | accept-encoding: 10 | - gzip, deflate, zstd 11 | connection: 12 | - keep-alive 13 | content-length: 14 | - '234' 15 | content-type: 16 | - application/json 17 | host: 18 | - generativelanguage.googleapis.com 19 | user-agent: 20 | - google-genai-sdk/1.19.0 gl-python/3.12.3 21 | x-goog-api-client: 22 | - google-genai-sdk/1.19.0 gl-python/3.12.3 23 | method: POST 24 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-05-20:generateContent 25 | response: 26 | body: 27 | string: !!binary | 28 | H4sIAAAAAAAC/2WQT0+EMBDF73wK0vN2RdyN0aOriZqIrIt/EuOhsbPQCC1pB8UQvrsFFrbEHpp2 29 | 3ut03q/xfJ98MskFZwiGXPrvtuL7Tb93mpIIEq0wlmyxZBqP3mE1ztlaEOruEYmZFmZJHLGdzh+L 30 | Y0utcuj8heKQj/Z2NJC9kMJkT8CMkp1tlzzGZFKF5FDbcuCNH/StSWVYCg+AzIZjUwRSalWUmKgv 31 | kBtV9eHCs6GZw2KuH2RUyPKZslot/nU11/ZPkbuIHHo2IssF/nY5kpu3hDgYcD7UyMFzcBHMVJVm 32 | OB/w9MI78BoQvoA2YmDV381JCoWFSMPlmu5zZjJaavgW8EODNQ2DfgiiwZRKGrjj/XDPV7csOo8i 33 | 9bqpTRBTvruvtlvitd4foolwBDgCAAA= 34 | headers: 35 | Alt-Svc: 36 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 37 | Content-Encoding: 38 | - gzip 39 | Content-Type: 40 | - application/json; charset=UTF-8 41 | Date: 42 | - Mon, 09 Jun 2025 20:13:01 GMT 43 | Server: 44 | - scaffolding on HTTPServer2 45 | Server-Timing: 46 | - gfet4t7; dur=375 47 | Transfer-Encoding: 48 | - chunked 49 | Vary: 50 | - Origin 51 | - X-Origin 52 | - Referer 53 | X-Content-Type-Options: 54 | - nosniff 55 | X-Frame-Options: 56 | - SAMEORIGIN 57 | X-XSS-Protection: 58 | - '0' 59 | status: 60 | code: 200 61 | message: OK 62 | version: 1 63 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_string_contents.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "What is the capital of France?"}], "role": 4 | "user"}], "systemInstruction": {"parts": [{"text": "Be concise and to the point. 5 | Use tools as much as possible."}], "role": "user"}, "generationConfig": {}}' 6 | headers: 7 | accept: 8 | - '*/*' 9 | accept-encoding: 10 | - gzip, deflate, zstd 11 | connection: 12 | - keep-alive 13 | content-length: 14 | - '234' 15 | content-type: 16 | - application/json 17 | host: 18 | - generativelanguage.googleapis.com 19 | user-agent: 20 | - google-genai-sdk/1.34.0 gl-python/3.13.5 21 | x-goog-api-client: 22 | - google-genai-sdk/1.34.0 gl-python/3.13.5 23 | method: POST 24 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-05-20:generateContent 25 | response: 26 | body: 27 | string: !!binary | 28 | H4sIAAAAAAAC/2WQXU+DMBSG7/srSK+HItsS9NaPZIlGVPxIjDHVnkEzaLE96AzZf7eFwUrsRdOe 29 | 9+3peZ+WBAH9ZJILzhAMPQtebSUI2m53mpIIEq0wlGyxZhoP3n613tlaELbuEU2ZFoZ62m48v80O 30 | HbUqwdkrxaEc7LvBQNdCClPcAzNKOttDdpvSURWSw9aWIzJ80LWmjWE53AAym42NCWitVVVjpjYg 31 | z1XTZYvnfTMPxUQ/2cuokJUTZZHM/nU1F/ZPUfqEPHg2IisF/roc2eVLRj0MOB1q4EA8XBQL1eQF 32 | TgeMF2TPq0f4BNqInlV3N8c5VBZiGB8tw3XJTBHWGr4F/ITRMoyjbgiqwdRKGlhx91A9zxu2+oqu 33 | knfc8Mf0tLn+SO4iSnbkD5aoXE83AgAA 34 | headers: 35 | Alt-Svc: 36 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 37 | Content-Encoding: 38 | - gzip 39 | Content-Type: 40 | - application/json; charset=UTF-8 41 | Date: 42 | - Tue, 14 Oct 2025 15:34:57 GMT 43 | Server: 44 | - scaffolding on HTTPServer2 45 | Server-Timing: 46 | - gfet4t7; dur=716 47 | Transfer-Encoding: 48 | - chunked 49 | Vary: 50 | - Origin 51 | - X-Origin 52 | - Referer 53 | X-Content-Type-Options: 54 | - nosniff 55 | X-Frame-Options: 56 | - SAMEORIGIN 57 | X-XSS-Protection: 58 | - '0' 59 | status: 60 | code: 200 61 | message: OK 62 | version: 1 63 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Initially copied over from openllmetry, commit 3 | b3a18c9f7e6ff2368c8fb0bc35fd9123f11121c4 4 | """ 5 | 6 | from typing import Callable, Collection, Optional 7 | 8 | from opentelemetry.instrumentation.instrumentor import BaseInstrumentor 9 | from .shared.config import Config 10 | from .utils import is_openai_v1 11 | from typing_extensions import Coroutine 12 | 13 | _instruments = ("openai >= 0.27.0",) 14 | 15 | 16 | class OpenAIInstrumentor(BaseInstrumentor): 17 | """An instrumentor for OpenAI's client library.""" 18 | 19 | def __init__( 20 | self, 21 | enrich_assistant: bool = False, 22 | enrich_token_usage: bool = False, 23 | exception_logger=None, 24 | get_common_metrics_attributes: Callable[[], dict] = lambda: {}, 25 | upload_base64_image: Optional[ 26 | Callable[[str, str, str, str], Coroutine[None, None, str]] 27 | ] = lambda *args: "", 28 | enable_trace_context_propagation: bool = True, 29 | use_legacy_attributes: bool = True, 30 | ): 31 | super().__init__() 32 | Config.enrich_assistant = enrich_assistant 33 | Config.enrich_token_usage = enrich_token_usage 34 | Config.exception_logger = exception_logger 35 | Config.get_common_metrics_attributes = get_common_metrics_attributes 36 | Config.upload_base64_image = upload_base64_image 37 | Config.enable_trace_context_propagation = enable_trace_context_propagation 38 | Config.use_legacy_attributes = use_legacy_attributes 39 | 40 | def instrumentation_dependencies(self) -> Collection[str]: 41 | return _instruments 42 | 43 | def _instrument(self, **kwargs): 44 | if is_openai_v1(): 45 | from .v1 import OpenAIV1Instrumentor 46 | 47 | OpenAIV1Instrumentor().instrument(**kwargs) 48 | else: 49 | from .v0 import OpenAIV0Instrumentor 50 | 51 | OpenAIV0Instrumentor().instrument(**kwargs) 52 | 53 | def _uninstrument(self, **kwargs): 54 | if is_openai_v1(): 55 | from .v1 import OpenAIV1Instrumentor 56 | 57 | OpenAIV1Instrumentor().uninstrument(**kwargs) 58 | else: 59 | from .v0 import OpenAIV0Instrumentor 60 | 61 | OpenAIV0Instrumentor().uninstrument(**kwargs) 62 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from opentelemetry import context as context_api 4 | from ..utils import is_openai_v1 5 | from ..shared import ( 6 | _get_openai_base_url, 7 | metric_shared_attributes, 8 | model_as_dict, 9 | ) 10 | from ..utils import ( 11 | _with_image_gen_metric_wrapper, 12 | ) 13 | from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY 14 | from opentelemetry.metrics import Counter, Histogram 15 | from opentelemetry.semconv_ai import SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY 16 | 17 | 18 | @_with_image_gen_metric_wrapper 19 | def image_gen_metrics_wrapper( 20 | duration_histogram: Histogram, 21 | exception_counter: Counter, 22 | wrapped, 23 | instance, 24 | args, 25 | kwargs, 26 | ): 27 | if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( 28 | SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY 29 | ): 30 | return wrapped(*args, **kwargs) 31 | 32 | try: 33 | # record time for duration 34 | start_time = time.time() 35 | response = wrapped(*args, **kwargs) 36 | end_time = time.time() 37 | except Exception as e: # pylint: disable=broad-except 38 | end_time = time.time() 39 | duration = end_time - start_time if "start_time" in locals() else 0 40 | 41 | attributes = { 42 | "error.type": e.__class__.__name__, 43 | } 44 | 45 | if duration > 0 and duration_histogram: 46 | duration_histogram.record(duration, attributes=attributes) 47 | if exception_counter: 48 | exception_counter.add(1, attributes=attributes) 49 | 50 | raise 51 | 52 | if is_openai_v1(): 53 | response_dict = model_as_dict(response) 54 | else: 55 | response_dict = response 56 | 57 | # not provide response.model in ImagesResponse response, use model in request kwargs 58 | shared_attributes = metric_shared_attributes( 59 | response_model=kwargs.get("model") or None, 60 | operation="image_gen", 61 | server_address=_get_openai_base_url(instance), 62 | ) 63 | 64 | duration = end_time - start_time 65 | if duration_histogram: 66 | duration_histogram.record(duration, attributes=shared_attributes) 67 | 68 | return response 69 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_reasoning_tokens.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "How many times does the letter ''r'' 4 | appear in the word strawberry?"}], "role": "user"}], "systemInstruction": {"parts": 5 | [{"text": "Think deep and thoroughly step by step."}], "role": "user"}, "generationConfig": 6 | {"thinkingConfig": {"thinkingBudget": 512}}}' 7 | headers: 8 | accept: 9 | - '*/*' 10 | accept-encoding: 11 | - gzip, deflate, zstd 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '290' 16 | content-type: 17 | - application/json 18 | host: 19 | - generativelanguage.googleapis.com 20 | user-agent: 21 | - google-genai-sdk/1.34.0 gl-python/3.13.5 22 | x-goog-api-client: 23 | - google-genai-sdk/1.34.0 gl-python/3.13.5 24 | method: POST 25 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-lite:generateContent 26 | response: 27 | body: 28 | string: !!binary | 29 | H4sIAAAAAAAC/2VR32vCMBB+718R8iIUK5ubm/NtbHtwKBOtY7DuIdrTZrZJSa6oiP/7LtZqywKX 30 | hPu++/XdwWOML4WKZSwQLB+wb/IwdjjdDtMKQSEBlYucuTB45ZbnUPsTBWHngvgIsGXZUhcKGSaQ 31 | DSIVKcsChmS+b3yfXkG2JVuQQc1fvXsXFCbAUkAEw1qmxUSegzCWOHfEQZmBZVK5GmyrTcwibtGI 32 | 7QKM2Ue8w2vtHS//n/Z1KKNTcB1nOoa0oh8rAl9JJW0yBWG1crRZ+DHhF1SqGHbkvvGqAqfUvLBi 33 | DWNAQfKKi4g8NzrLMdQbUC9OGkK6vTJZbRsN/P7pjKNGkTZDH7rtf3ntK1WVaX1NtQ3SkCKVuHeT 34 | hG9fIa8Jgc22KiW8mmAcE12sE2y2eNvve2fJShU/wVhZyrWGjAQMup1esEqFTQKqDqeq3IDNtbIw 35 | jB1xOgyFGP/O3x8LtYnnk/4I7OyZe0fvD6OyUt2sAgAA 36 | headers: 37 | Alt-Svc: 38 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 39 | Content-Encoding: 40 | - gzip 41 | Content-Type: 42 | - application/json; charset=UTF-8 43 | Date: 44 | - Mon, 29 Sep 2025 13:06:12 GMT 45 | Server: 46 | - scaffolding on HTTPServer2 47 | Server-Timing: 48 | - gfet4t7; dur=707 49 | Transfer-Encoding: 50 | - chunked 51 | Vary: 52 | - Origin 53 | - X-Origin 54 | - Referer 55 | X-Content-Type-Options: 56 | - nosniff 57 | X-Frame-Options: 58 | - SAMEORIGIN 59 | X-XSS-Protection: 60 | - '0' 61 | status: 62 | code: 200 63 | message: OK 64 | version: 1 65 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_reasoning_tokens_async.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "How many times does the letter ''r'' 4 | appear in the word strawberry?"}], "role": "user"}], "systemInstruction": {"parts": 5 | [{"text": "Think deep and thoroughly step by step."}], "role": "user"}, "generationConfig": 6 | {"thinkingConfig": {"thinkingBudget": 512}}}' 7 | headers: 8 | Content-Type: 9 | - application/json 10 | user-agent: 11 | - google-genai-sdk/1.34.0 gl-python/3.13.5 12 | x-goog-api-client: 13 | - google-genai-sdk/1.34.0 gl-python/3.13.5 14 | method: post 15 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-lite:generateContent 16 | response: 17 | body: 18 | string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": 19 | [\n {\n \"text\": \"Let's count them:\\n\\nThe word is 20 | \\\"strawberry\\\".\\nThe letter is 'r'.\\n\\n* st**r**awbe**rr**y\\n\\nThe 21 | letter 'r' appears **3** times in the word \\\"strawberry\\\".\"\n }\n 22 | \ ],\n \"role\": \"model\"\n },\n \"finishReason\": 23 | \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 24 | 25,\n \"candidatesTokenCount\": 50,\n \"totalTokenCount\": 265,\n \"promptTokensDetails\": 25 | [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 25\n 26 | \ }\n ],\n \"thoughtsTokenCount\": 190\n },\n \"modelVersion\": 27 | \"gemini-2.5-flash-lite\",\n \"responseId\": \"L4XaaIPGCcjtkdUPlInyiAM\"\n}\n" 28 | headers: 29 | Alt-Svc: 30 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 31 | Content-Encoding: 32 | - gzip 33 | Content-Type: 34 | - application/json; charset=UTF-8 35 | Date: 36 | - Mon, 29 Sep 2025 13:10:07 GMT 37 | Server: 38 | - scaffolding on HTTPServer2 39 | Server-Timing: 40 | - gfet4t7; dur=926 41 | Transfer-Encoding: 42 | - chunked 43 | Vary: 44 | - Origin 45 | - X-Origin 46 | - Referer 47 | X-Content-Type-Options: 48 | - nosniff 49 | X-Frame-Options: 50 | - SAMEORIGIN 51 | X-XSS-Protection: 52 | - '0' 53 | status: 54 | code: 200 55 | message: OK 56 | version: 1 57 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_tool_calls.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "What is the weather in Tokyo?"}], "role": 4 | "user"}], "systemInstruction": {"parts": [{"text": "Be concise and to the point. 5 | Use tools as much as possible."}], "role": "user"}, "tools": [{"functionDeclarations": 6 | [{"description": "Gets the weather in a given city.", "name": "get_weather", 7 | "parameters": {"properties": {"location": {"description": "The location to get 8 | the weather for.", "type": "STRING"}}, "required": ["location"], "type": "OBJECT"}}]}], 9 | "generationConfig": {}}' 10 | headers: 11 | accept: 12 | - '*/*' 13 | accept-encoding: 14 | - gzip, deflate, zstd 15 | connection: 16 | - keep-alive 17 | content-length: 18 | - '512' 19 | content-type: 20 | - application/json 21 | host: 22 | - generativelanguage.googleapis.com 23 | user-agent: 24 | - google-genai-sdk/1.31.0 gl-python/3.13.5 25 | x-goog-api-client: 26 | - google-genai-sdk/1.31.0 gl-python/3.13.5 27 | method: POST 28 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-lite:generateContent 29 | response: 30 | body: 31 | string: !!binary | 32 | H4sIAAAAAAAC/3VRXUvDMBR9z68oeXZSheHwTaYPgptThwgy5LLedmH5KElKV8b+u7dpu7ZD89Ck 33 | 55yce3PukUUR34JORAIeHb+PvgmJomP41pzRHrUnooMIzMH6Xtus4+BMkrTQWy+MnoOUo8str0Eh 34 | 4TxD/1Mi+B1afnUpApu5Py4TI80WavvaYm32leEXmhP7768/b/qC3BoZ+lEmQdmZnToBT4UWbveO 35 | 4JqaH+vX1blfLnSCB4Jj1hUI1rxwkOECPVC4cH4Iz61Ruae2Uc9NEcK9ixuzwSxG/M205b3xIEfU 36 | rKMGtu6Rigo5nNFgfPRGkMJXIbynr/UgePIfddUFEfYNayNpUvpE69oRZKgooMnt9XSSSnC7Cdlj 37 | sOUWXW60w+ekFr7sixIWVfwwrdxhOVuppRLlW8bZif0CYSQB0osCAAA= 38 | headers: 39 | Alt-Svc: 40 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 41 | Content-Encoding: 42 | - gzip 43 | Content-Type: 44 | - application/json; charset=UTF-8 45 | Date: 46 | - Thu, 28 Aug 2025 12:27:26 GMT 47 | Server: 48 | - scaffolding on HTTPServer2 49 | Server-Timing: 50 | - gfet4t7; dur=446 51 | Transfer-Encoding: 52 | - chunked 53 | Vary: 54 | - Origin 55 | - X-Origin 56 | - Referer 57 | X-Content-Type-Options: 58 | - nosniff 59 | X-Frame-Options: 60 | - SAMEORIGIN 61 | X-XSS-Protection: 62 | - '0' 63 | status: 64 | code: 200 65 | message: OK 66 | version: 1 67 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/synchronous/resources/tags.py: -------------------------------------------------------------------------------- 1 | """Resource for tagging traces.""" 2 | 3 | import json 4 | import uuid 5 | 6 | from lmnr.sdk.client.synchronous.resources.base import BaseResource 7 | from lmnr.sdk.log import get_default_logger 8 | from lmnr.sdk.utils import format_id 9 | 10 | logger = get_default_logger(__name__) 11 | 12 | 13 | class Tags(BaseResource): 14 | """Resource for tagging traces.""" 15 | 16 | def tag( 17 | self, 18 | trace_id: str | int | uuid.UUID, 19 | tags: list[str] | str, 20 | ): 21 | """Tag a trace with a list of tags. Note that the trace must be ended 22 | before tagging it. You may want to call `Laminar.flush()` after the 23 | trace that you want to tag. 24 | 25 | Args: 26 | trace_id (str | int | uuid.UUID): The trace id to tag. 27 | tags (list[str] | str): The tag or list of tags to add to the trace. 28 | 29 | Raises: 30 | ValueError: If the trace id is not a valid UUID. 31 | 32 | Returns: 33 | list[dict]: The response from the server. 34 | 35 | Example: 36 | ```python 37 | from lmnr import Laminar, LaminarClient, observe 38 | 39 | Laminar.initialize() 40 | client = LaminarClient() 41 | trace_id = None 42 | 43 | @observe() 44 | def foo(): 45 | trace_id = Laminar.get_trace_id() 46 | pass 47 | 48 | # make sure `foo` is called outside a trace context 49 | foo() 50 | 51 | # or make sure the trace is ended by this point 52 | Laminar.flush() 53 | 54 | client.tags.tag(trace_id, "my_tag") 55 | ``` 56 | """ 57 | trace_tags = tags if isinstance(tags, list) else [tags] 58 | formatted_trace_id = format_id(trace_id) 59 | 60 | url = self._base_url + "/v1/tag" 61 | payload = { 62 | "traceId": formatted_trace_id, 63 | "names": trace_tags, 64 | } 65 | response = self._client.post( 66 | url, 67 | content=json.dumps(payload), 68 | headers={ 69 | **self._headers(), 70 | }, 71 | ) 72 | 73 | if response.status_code == 404: 74 | logger.warning( 75 | f"Trace {formatted_trace_id} not found. The trace may have not been ended yet." 76 | ) 77 | return [] 78 | 79 | if response.status_code != 200: 80 | raise ValueError( 81 | f"Failed to tag trace: [{response.status_code}] {response.text}" 82 | ) 83 | return response.json() 84 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_multiple_tool_calls.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "What is the weather in Tokyo and Paris?"}], 4 | "role": "user"}], "systemInstruction": {"parts": [{"text": "Be concise and to 5 | the point. Use tools as much as possible."}], "role": "user"}, "tools": [{"functionDeclarations": 6 | [{"description": "Gets the weather in a given city.", "name": "get_weather", 7 | "parameters": {"properties": {"location": {"description": "The location to get 8 | the weather for.", "type": "STRING"}}, "required": ["location"], "type": "OBJECT"}}]}], 9 | "generationConfig": {}}' 10 | headers: 11 | accept: 12 | - '*/*' 13 | accept-encoding: 14 | - gzip, deflate, zstd 15 | connection: 16 | - keep-alive 17 | content-length: 18 | - '522' 19 | content-type: 20 | - application/json 21 | host: 22 | - generativelanguage.googleapis.com 23 | user-agent: 24 | - google-genai-sdk/1.20.0 gl-python/3.12.3 25 | x-goog-api-client: 26 | - google-genai-sdk/1.20.0 gl-python/3.12.3 27 | method: POST 28 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-05-20:generateContent 29 | response: 30 | body: 31 | string: !!binary | 32 | H4sIAAAAAAAC/81Sy07DMBC8+ysinxsIRS2IWxU4VAiIoEJICKFVs40tHDuynZa26r/jOo8mBQ7c 33 | yCGyd9az4/FsSRDQOciUp2DR0Kvg1VWCYOv/e0xJi9I6oCm5YgHaHnqrb9tZu5ZFKeeWKxmDEL3D 34 | NS4hR1enGdr3FYJlqOnguAl0Zn447BCh5rCn31PM1Mda0aOeHflttxv8C9UJaG7+oLpdvx0GUq2E 35 | 15OrFEVD1l6QLrjkhj0imGrm0+whafVSLlP8dOWINAM8NS0NZHiHFlwkoL0ILbTKC+vMRhmr0kfi 36 | YliRdRLUw8+jGrfKguhBZ+PLwTdec+2mctGNVid17pIguF37N795mXWcdwN6shonSMcwapkqM2b7 37 | EsdjUjtWmfiM2tQv5PfmNMPc2RgOT0bhQoBhYaFxyXEVRqNwGHkRVKMplDQ4TX06JAO4TeJJGd8v 38 | 02myKTYsnFxSsiNfLpXlNnADAAA= 39 | headers: 40 | Alt-Svc: 41 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 42 | Content-Encoding: 43 | - gzip 44 | Content-Type: 45 | - application/json; charset=UTF-8 46 | Date: 47 | - Tue, 24 Jun 2025 10:05:54 GMT 48 | Server: 49 | - scaffolding on HTTPServer2 50 | Server-Timing: 51 | - gfet4t7; dur=729 52 | Transfer-Encoding: 53 | - chunked 54 | Vary: 55 | - Origin 56 | - X-Origin 57 | - Referer 58 | X-Content-Type-Options: 59 | - nosniff 60 | X-Frame-Options: 61 | - SAMEORIGIN 62 | X-XSS-Protection: 63 | - '0' 64 | status: 65 | code: 200 66 | message: OK 67 | version: 1 68 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_tool_calls_and_text_part.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "What is the opposite of ''bright''? 4 | Also, what is the weather in Tokyo?"}], "role": "user"}], "systemInstruction": 5 | {"parts": [{"text": "Be concise and to the point"}], "role": "user"}, "tools": 6 | [{"functionDeclarations": [{"description": "Gets the weather in a given city.", 7 | "name": "get_weather", "parameters": {"properties": {"location": {"description": 8 | "The location to get the weather for.", "type": "STRING"}}, "required": ["location"], 9 | "type": "OBJECT"}}]}], "generationConfig": {}}' 10 | headers: 11 | accept: 12 | - '*/*' 13 | accept-encoding: 14 | - gzip, deflate, zstd 15 | connection: 16 | - keep-alive 17 | content-length: 18 | - '520' 19 | content-type: 20 | - application/json 21 | host: 22 | - generativelanguage.googleapis.com 23 | user-agent: 24 | - google-genai-sdk/1.31.0 gl-python/3.13.5 25 | x-goog-api-client: 26 | - google-genai-sdk/1.31.0 gl-python/3.13.5 27 | method: POST 28 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-lite:generateContent 29 | response: 30 | body: 31 | string: !!binary | 32 | H4sIAAAAAAAC/3VSTW/CMAy991dEufQCE6rEx3ZDjMOkbaANbZMmNHnUbSPSpGqMgCH++9K0hZaN 33 | HlrXz35+ec7BY4yvQIUiBELD79inzTB2cO8C04pQkQXqlE1mkNO5tnwOjdiWEO6KJr5IkOks00aQ 34 | DSLmf+ciTshnwjA/FKl/wxuNx851xmijViS0moCULTkVriDFYmKM9LVFoARz3rksgjw2/zRbROoV 35 | FPROtF7vNb+oOXrX/s7x8jyQ51o6PakOUdZkpwPySChhkhcEU858XczmJ71cqBB3Nt3z6gGOmm8M 36 | xPiEBHZdcDoIz3KdZmRlo5rojVvXMCjJGttt4UG/wkkTyBZ0O+z8oTX3dqiQza03LoQ9I0hBe2fe 37 | 9GPRMN7yt1TVRrjv0qssKV16w9xUK4gxtQZ1g5t+N5Jgkq6lR0fLczSZVgYfwqJw3Au2MHt8n44G 38 | P7vn0VzqeW8wXnPv6P0Cg3y1Id0CAAA= 39 | headers: 40 | Alt-Svc: 41 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 42 | Content-Encoding: 43 | - gzip 44 | Content-Type: 45 | - application/json; charset=UTF-8 46 | Date: 47 | - Thu, 28 Aug 2025 12:35:15 GMT 48 | Server: 49 | - scaffolding on HTTPServer2 50 | Server-Timing: 51 | - gfet4t7; dur=515 52 | Transfer-Encoding: 53 | - chunked 54 | Vary: 55 | - Origin 56 | - X-Origin 57 | - Referer 58 | X-Content-Type-Options: 59 | - nosniff 60 | X-Frame-Options: 61 | - SAMEORIGIN 62 | X-XSS-Protection: 63 | - '0' 64 | status: 65 | code: 200 66 | message: OK 67 | version: 1 68 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_output_json_schema.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "Alice and Bob are going to a science 4 | fair on Friday. Extract the event information."}], "role": "user"}], "generationConfig": 5 | {"responseMimeType": "application/json", "responseJsonSchema": {"type": "object", 6 | "title": "CalendarEvent", "properties": {"name": {"type": "string", "title": 7 | "Name"}, "dayOfWeek": {"type": "string", "title": "Dayofweek"}, "participants": 8 | {"type": "array", "title": "Participants", "items": {"type": "string"}}}, "required": 9 | ["name", "dayOfWeek", "participants"]}}}' 10 | headers: 11 | accept: 12 | - '*/*' 13 | accept-encoding: 14 | - gzip, deflate, zstd 15 | connection: 16 | - keep-alive 17 | content-length: 18 | - '526' 19 | content-type: 20 | - application/json 21 | host: 22 | - generativelanguage.googleapis.com 23 | user-agent: 24 | - google-genai-sdk/1.24.0 gl-python/3.12.9 25 | x-goog-api-client: 26 | - google-genai-sdk/1.24.0 gl-python/3.12.9 27 | method: POST 28 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-lite-preview-06-17:generateContent 29 | response: 30 | body: 31 | string: !!binary | 32 | H4sIAAAAAAAC/2VRy07DMBC85yusPSeoLSpU3HirB2iBCJDqHpZk01pN7Mh2oSXKv+MkTZoKH2xr 33 | ZnZnPS48xiBCGYsYLRm4YguHMFbUe8UpaUlaR7SQA3PU9qhtVtG7O4mlXVUEBZeMcYhxP0s+iDbc 34 | gRwetPPbc/AbVmJGDWEiQTIilqDQHV3ZiUjkKK2pZAsO16mIXInv2Bv1xWHJZQm9CcruvvSPc2uV 35 | UjVUpmJKW3nZCiARUpj1K6FRspK9hbM5dKyQMe0cPPBag7o1bA2u6IksugSxywlyrbLchmpD8lZt 36 | 6wSHk6ZZL/AT/nx04K2ymJ5Q44H/r625c6Yi7X9E74/cGzEVdl89JLz/DKGXgz2dqg2iPpfeIZIm 37 | pXfSRjRxrChzAQWjs3GQpGjWgWtPQa7pW9BPMLgIhpe1CWgyuZKGpnFVNn7LJc4fs+kk+N09T+Yr 38 | pFH2sgGv9P4AFIc+RX4CAAA= 39 | headers: 40 | Alt-Svc: 41 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 42 | Content-Encoding: 43 | - gzip 44 | Content-Type: 45 | - application/json; charset=UTF-8 46 | Date: 47 | - Fri, 04 Jul 2025 01:14:13 GMT 48 | Server: 49 | - scaffolding on HTTPServer2 50 | Server-Timing: 51 | - gfet4t7; dur=262 52 | Transfer-Encoding: 53 | - chunked 54 | Vary: 55 | - Origin 56 | - X-Origin 57 | - Referer 58 | X-Content-Type-Options: 59 | - nosniff 60 | X-Frame-Options: 61 | - SAMEORIGIN 62 | X-XSS-Protection: 63 | - '0' 64 | status: 65 | code: 200 66 | message: OK 67 | version: 1 68 | -------------------------------------------------------------------------------- /src/lmnr/sdk/browser/utils.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import time 4 | 5 | from lmnr.sdk.client.asynchronous.async_client import AsyncLaminarClient 6 | from lmnr.sdk.client.synchronous.sync_client import LaminarClient 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | def with_tracer_wrapper(func): 12 | """Helper for providing tracer for wrapper functions.""" 13 | 14 | def _with_tracer(tracer, to_wrap): 15 | def wrapper(wrapped, instance, args, kwargs): 16 | return func(tracer, to_wrap, wrapped, instance, args, kwargs) 17 | 18 | return wrapper 19 | 20 | return _with_tracer 21 | 22 | 23 | def with_tracer_and_client_wrapper(func): 24 | """Helper for providing tracer and client for wrapper functions.""" 25 | 26 | def _with_tracer_and_client( 27 | tracer, client: LaminarClient | AsyncLaminarClient, to_wrap 28 | ): 29 | def wrapper(wrapped, instance, args, kwargs): 30 | return func(tracer, client, to_wrap, wrapped, instance, args, kwargs) 31 | 32 | return wrapper 33 | 34 | return _with_tracer_and_client 35 | 36 | 37 | def retry_sync(func, retries=5, delay=0.5, error_message="Operation failed"): 38 | """Utility function for retry logic in synchronous operations""" 39 | for attempt in range(retries): 40 | try: 41 | result = func() 42 | if result: # If function returns truthy value, consider it successful 43 | return result 44 | if attempt == retries - 1: # Last attempt 45 | logger.debug(f"{error_message} after all retries") 46 | return None 47 | except Exception as e: 48 | if attempt == retries - 1: # Last attempt 49 | logger.error(f"{error_message}: {e}") 50 | return None 51 | time.sleep(delay) 52 | return None 53 | 54 | 55 | async def retry_async(func, retries=5, delay=0.5, error_message="Operation failed"): 56 | """Utility function for retry logic in asynchronous operations""" 57 | for attempt in range(retries): 58 | try: 59 | result = await func() 60 | if result: # If function returns truthy value, consider it successful 61 | return result 62 | if attempt == retries - 1: # Last attempt 63 | logger.error(f"{error_message} after all retries") 64 | return None 65 | except Exception as e: 66 | if attempt == retries - 1: # Last attempt 67 | logger.error(f"{error_message}: {e}") 68 | return None 69 | await asyncio.sleep(delay) 70 | return None 71 | -------------------------------------------------------------------------------- /src/lmnr/sdk/log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | class CustomFormatter(logging.Formatter): 5 | grey = "\x1b[38;20m" 6 | green = "\x1b[32;20m" 7 | yellow = "\x1b[33;20m" 8 | red = "\x1b[31;20m" 9 | bold_red = "\x1b[31;1m" 10 | reset = "\x1b[0m" 11 | fmt = "%(asctime)s::%(name)s::%(levelname)s: %(message)s (%(filename)s:%(lineno)d)" 12 | 13 | FORMATS = { 14 | logging.DEBUG: grey + fmt + reset, 15 | logging.INFO: green + fmt + reset, 16 | logging.WARNING: yellow + fmt + reset, 17 | logging.ERROR: red + fmt + reset, 18 | logging.CRITICAL: bold_red + fmt + reset, 19 | } 20 | 21 | def format(self, record: logging.LogRecord): 22 | log_fmt = self.FORMATS.get(record.levelno) 23 | formatter = logging.Formatter(log_fmt) 24 | return formatter.format(record) 25 | 26 | 27 | class ColorfulFormatter(logging.Formatter): 28 | grey = "\x1b[38;20m" 29 | green = "\x1b[32;20m" 30 | yellow = "\x1b[33;20m" 31 | red = "\x1b[31;20m" 32 | bold_red = "\x1b[31;1m" 33 | reset = "\x1b[0m" 34 | fmt = "Laminar %(levelname)s: %(message)s" 35 | 36 | FORMATS = { 37 | logging.DEBUG: grey + fmt + reset, 38 | logging.INFO: green + fmt + reset, 39 | logging.WARNING: yellow + fmt + reset, 40 | logging.ERROR: red + fmt + reset, 41 | logging.CRITICAL: bold_red + fmt + reset, 42 | } 43 | 44 | def format(self, record: logging.LogRecord): 45 | log_fmt = self.FORMATS.get(record.levelno) 46 | formatter = logging.Formatter(log_fmt) 47 | return formatter.format(record) 48 | 49 | 50 | # For StreamHandlers / console 51 | class VerboseColorfulFormatter(CustomFormatter): 52 | def format(self, record): 53 | return super().format(record) 54 | 55 | 56 | # For Verbose FileHandlers / files 57 | class VerboseFormatter(CustomFormatter): 58 | fmt = "%(asctime)s::%(name)s::%(levelname)s: %(message)s (%(filename)s:%(lineno)d)" 59 | 60 | def format(self, record): 61 | formatter = logging.Formatter(self.fmt) 62 | return formatter.format(record) 63 | 64 | 65 | def get_default_logger( 66 | name: str, level: int = logging.INFO, propagate: bool = False, verbose: bool = True 67 | ) -> logging.Logger: 68 | logger = logging.getLogger(name) 69 | logger.setLevel(level) 70 | console_log_handler = logging.StreamHandler() 71 | if verbose: 72 | console_log_handler.setFormatter(VerboseColorfulFormatter()) 73 | else: 74 | console_log_handler.setFormatter(ColorfulFormatter()) 75 | logger.addHandler(console_log_handler) 76 | logger.propagate = propagate 77 | return logger 78 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/asynchronous/resources/tags.py: -------------------------------------------------------------------------------- 1 | """Resource for tagging traces.""" 2 | 3 | import json 4 | import uuid 5 | 6 | from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource 7 | from lmnr.sdk.log import get_default_logger 8 | from lmnr.sdk.utils import format_id 9 | 10 | logger = get_default_logger(__name__) 11 | 12 | 13 | class AsyncTags(BaseAsyncResource): 14 | """Resource for tagging traces.""" 15 | 16 | async def tag( 17 | self, 18 | trace_id: str | int | uuid.UUID, 19 | tags: list[str] | str, 20 | ): 21 | """Tag a trace with a list of tags. Note that the trace must be ended 22 | before tagging it. You may want to call `Laminar.flush()` after the 23 | trace that you want to tag. 24 | 25 | Args: 26 | trace_id (str | int | uuid.UUID): The trace id to tag. 27 | tags (list[str] | str): The tag or list of tags to add to the trace. 28 | 29 | Raises: 30 | ValueError: If the trace id is not a valid UUID. 31 | 32 | Returns: 33 | list[dict]: The response from the server. 34 | 35 | Example: 36 | ```python 37 | from lmnr import Laminar, AsyncLaminarClient, observe 38 | 39 | Laminar.initialize() 40 | client = AsyncLaminarClient() 41 | trace_id = None 42 | 43 | @observe() 44 | def foo(): 45 | trace_id = Laminar.get_trace_id() 46 | pass 47 | 48 | # make sure `foo` is called outside a trace context 49 | foo() 50 | 51 | # or make sure the trace is ended by this point 52 | Laminar.flush() 53 | 54 | await client.tags.tag(trace_id, "my_tag") 55 | ``` 56 | """ 57 | trace_tags = tags if isinstance(tags, list) else [tags] 58 | formatted_trace_id = format_id(trace_id) 59 | 60 | url = self._base_url + "/v1/tag" 61 | payload = { 62 | "traceId": formatted_trace_id, 63 | "names": trace_tags, 64 | } 65 | response = await self._client.post( 66 | url, 67 | content=json.dumps(payload), 68 | headers={ 69 | **self._headers(), 70 | }, 71 | ) 72 | 73 | if response.status_code == 404: 74 | logger.warning( 75 | f"Trace {formatted_trace_id} not found. The trace may have not been ended yet." 76 | ) 77 | return [] 78 | 79 | if response.status_code != 200: 80 | raise ValueError( 81 | f"Failed to tag trace: [{response.status_code}] {response.text}" 82 | ) 83 | return response.json() 84 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_claude_agent/test_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from typing import Any 3 | from claude_agent_sdk import ( 4 | tool, 5 | create_sdk_mcp_server, 6 | ClaudeAgentOptions, 7 | ClaudeSDKClient, 8 | ) 9 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter 10 | 11 | from mock_transport import MockClaudeTransport 12 | 13 | 14 | @tool("calculate", "Perform mathematical calculations", {"expression": str}) 15 | async def calculate(args: dict[str, Any]) -> dict[str, Any]: 16 | try: 17 | result = eval(args["expression"], {"__builtins__": {}}) 18 | return {"content": [{"type": "text", "text": f"Result: {result}"}]} 19 | except Exception as e: 20 | return { 21 | "content": [{"type": "text", "text": f"Error: {str(e)}"}], 22 | "is_error": True, 23 | } 24 | 25 | 26 | @tool("get_time", "Get current time", {}) 27 | async def get_time(args: dict[str, Any]) -> dict[str, Any]: 28 | from datetime import datetime 29 | 30 | current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 31 | return {"content": [{"type": "text", "text": f"Current time: {current_time}"}]} 32 | 33 | 34 | @pytest.mark.asyncio 35 | async def test_claude_agent_tool(span_exporter: InMemorySpanExporter): 36 | my_server = create_sdk_mcp_server( 37 | name="utilities", version="1.0.0", tools=[calculate, get_time] 38 | ) 39 | 40 | options = ClaudeAgentOptions( 41 | mcp_servers={"utils": my_server}, 42 | allowed_tools=["mcp__utils__calculate", "mcp__utils__get_time"], 43 | ) 44 | 45 | async with ClaudeSDKClient( 46 | options=options, transport=MockClaudeTransport() 47 | ) as client: 48 | await client.query("What's 123 * 456?") 49 | async for _ in client.receive_response(): 50 | pass 51 | 52 | await client.query("What time is it now?") 53 | async for _ in client.receive_response(): 54 | pass 55 | 56 | spans_tuple = span_exporter.get_finished_spans() 57 | spans = sorted(list(spans_tuple), key=lambda x: x.start_time) 58 | 59 | assert len(spans) == 9 60 | assert spans[0].name == "create_sdk_mcp_server" 61 | assert spans[1].name == "ClaudeSDKClient.connect" 62 | assert spans[2].name == "ClaudeSDKClient.query" 63 | assert spans[3].name == "ClaudeSDKClient.receive_response" 64 | assert spans[4].name == "ClaudeSDKClient.receive_messages" 65 | assert spans[5].name == "ClaudeSDKClient.query" 66 | assert spans[6].name == "ClaudeSDKClient.receive_response" 67 | assert spans[7].name == "ClaudeSDKClient.receive_messages" 68 | assert spans[8].name == "ClaudeSDKClient.disconnect" 69 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_output_schema.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "Alice and Bob are going to a science 4 | fair on Friday. Extract the event information."}], "role": "user"}], "generationConfig": 5 | {"responseMimeType": "application/json", "responseSchema": {"properties": {"name": 6 | {"title": "Name", "type": "STRING"}, "dayOfWeek": {"title": "Dayofweek", "type": 7 | "STRING"}, "participants": {"items": {"type": "STRING"}, "title": "Participants", 8 | "type": "ARRAY"}}, "propertyOrdering": ["name", "dayOfWeek", "participants"], 9 | "required": ["name", "dayOfWeek", "participants"], "title": "CalendarEvent", 10 | "type": "OBJECT"}}}' 11 | headers: 12 | accept: 13 | - '*/*' 14 | accept-encoding: 15 | - gzip, deflate, zstd 16 | connection: 17 | - keep-alive 18 | content-length: 19 | - '581' 20 | content-type: 21 | - application/json 22 | host: 23 | - generativelanguage.googleapis.com 24 | user-agent: 25 | - google-genai-sdk/1.24.0 gl-python/3.12.9 26 | x-goog-api-client: 27 | - google-genai-sdk/1.24.0 gl-python/3.12.9 28 | method: POST 29 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-lite-preview-06-17:generateContent 30 | response: 31 | body: 32 | string: !!binary | 33 | H4sIAAAAAAAC/2WRUW+CMBSF3/kVTZ9hURen2dvcXLIsi26SbYn14Q4u2ggtaeuUEP77WhDEjAco 34 | 95yee/u19AihEYiYx2BQ03uythVCyvrtNCkMCmOFtmSLOShz8TZP2Vtbi8GT20RLJghhVECGzP4z 35 | qiOOIkKSAFeM+o0cQ7FIvhD3jedZ2XGKTnXteMRzEEY7w5rRh5RHNtC36kz+MLphoqK9CapuvfEv 36 | cyuZohsqkzGmrb1qDTThguvdB4KWwtlW4WJJO5WLGE+2PPDaBnU0PWjY4hsasASh40RzJbPchHKP 37 | 4lEeaoLDaRPWA36l347OupEG0itpPPD/xeon25Sn/Yvo3ZE9I6TcFO4g4fw7pD0O5nqqFkT93Xhn 38 | JA2lT1SaNzi2mFlAwehmHCQp6F1g4zHIFf5yPAaDu2A4qZtQhTqXQuNL7La9Tk4ZLFejmS4KoefL 39 | PYA8vk+pV3l/lsxVQX4CAAA= 40 | headers: 41 | Alt-Svc: 42 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 43 | Content-Encoding: 44 | - gzip 45 | Content-Type: 46 | - application/json; charset=UTF-8 47 | Date: 48 | - Thu, 03 Jul 2025 17:21:47 GMT 49 | Server: 50 | - scaffolding on HTTPServer2 51 | Server-Timing: 52 | - gfet4t7; dur=362 53 | Transfer-Encoding: 54 | - chunked 55 | Vary: 56 | - Origin 57 | - X-Origin 58 | - Referer 59 | X-Content-Type-Options: 60 | - nosniff 61 | X-Frame-Options: 62 | - SAMEORIGIN 63 | X-XSS-Protection: 64 | - '0' 65 | status: 66 | code: 200 67 | message: OK 68 | version: 1 69 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_messages/test_with_asyncio_run_legacy.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "What is 4 | the weather in San Francisco?"}], "model": "claude-3-5-sonnet-20240620", "system": 5 | [{"type": "text", "text": "You help generate concise summaries of news articles 6 | and blog posts that user sends you."}]}' 7 | headers: 8 | accept: 9 | - application/json 10 | accept-encoding: 11 | - gzip, deflate 12 | anthropic-version: 13 | - '2023-06-01' 14 | connection: 15 | - keep-alive 16 | content-length: 17 | - '273' 18 | content-type: 19 | - application/json 20 | host: 21 | - api.anthropic.com 22 | user-agent: 23 | - AsyncAnthropic/Python 0.36.2 24 | x-stainless-arch: 25 | - arm64 26 | x-stainless-async: 27 | - async:asyncio 28 | x-stainless-lang: 29 | - python 30 | x-stainless-os: 31 | - MacOS 32 | x-stainless-package-version: 33 | - 0.36.2 34 | x-stainless-retry-count: 35 | - '0' 36 | x-stainless-runtime: 37 | - CPython 38 | x-stainless-runtime-version: 39 | - 3.11.8 40 | method: POST 41 | uri: https://api.anthropic.com/v1/messages 42 | response: 43 | body: 44 | string: !!binary | 45 | H4sIAAAAAAAAA2ySwW4UMQyGX8XyhUtmtXRbJOZWUVZUXICWE0Irb+LuRM3aQ+x0KVXfHc3QdkHi 46 | FNmJP3+/lAfMCXvc226zfH3ePn1Z+4fPXz/G04urd+M6vk/DBQb0+5GnV2xGO8aAVcvUILNsTuIY 47 | cK+JC/YYC7XE3ao760xF2LuT5cnp8s3JEgNGFWdx7L89PEOdf07j89HjJdCoRXf5FwfYNodLSCqv 48 | HAa6Y6AY2QxcoTKVzvOe4cDkA1fIcqN1T55VFnBuQALnl1BIdo12DLNe+BfnAwNtc8l+PzHjwPEW 49 | YquVxV+4USXliWqgFW60ciRzW8C1wo59cmqVnIEkQRs71y5N5X+8pmm4IoF1JYnZoga41wYHbSWB 50 | MKejBkHlkmlbjqQDby07B6BxDJNM0UgFhA8Gpq1GXsD1wMZPlUEkgbHqXU78Z1H24SXgMVg4xgpz 51 | DH3aN+/tKhdyTpDYKRebU9jIMd/kODvMkAU+fg9oruOmMpkK9siSNt6q4NOF8Y/GEhl7aaUEbPN3 52 | 6h8wy9h843rLYtivVgG1+d+tt2ePj78BAAD//wMAfduzcKwCAAA= 53 | headers: 54 | CF-Cache-Status: 55 | - DYNAMIC 56 | CF-RAY: 57 | - 8da652224d8f8e44-TLV 58 | Connection: 59 | - keep-alive 60 | Content-Encoding: 61 | - gzip 62 | Content-Type: 63 | - application/json 64 | Date: 65 | - Tue, 29 Oct 2024 21:39:55 GMT 66 | Server: 67 | - cloudflare 68 | Transfer-Encoding: 69 | - chunked 70 | X-Robots-Tag: 71 | - none 72 | request-id: 73 | - req_01BBAgmGQDEfqkKaFMjBh1yK 74 | via: 75 | - 1.1 google 76 | status: 77 | code: 200 78 | message: OK 79 | version: 1 80 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_messages/test_with_asyncio_run_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "What is 4 | the weather in San Francisco?"}], "model": "claude-3-5-sonnet-20240620", "system": 5 | [{"type": "text", "text": "You help generate concise summaries of news articles 6 | and blog posts that user sends you."}]}' 7 | headers: 8 | accept: 9 | - application/json 10 | accept-encoding: 11 | - gzip, deflate 12 | anthropic-version: 13 | - '2023-06-01' 14 | connection: 15 | - keep-alive 16 | content-length: 17 | - '273' 18 | content-type: 19 | - application/json 20 | host: 21 | - api.anthropic.com 22 | user-agent: 23 | - AsyncAnthropic/Python 0.36.2 24 | x-stainless-arch: 25 | - arm64 26 | x-stainless-async: 27 | - async:asyncio 28 | x-stainless-lang: 29 | - python 30 | x-stainless-os: 31 | - MacOS 32 | x-stainless-package-version: 33 | - 0.36.2 34 | x-stainless-retry-count: 35 | - '0' 36 | x-stainless-runtime: 37 | - CPython 38 | x-stainless-runtime-version: 39 | - 3.11.8 40 | method: POST 41 | uri: https://api.anthropic.com/v1/messages 42 | response: 43 | body: 44 | string: !!binary | 45 | H4sIAAAAAAAAA2ySwW4UMQyGX8XyhUtmtXRbJOZWUVZUXICWE0Irb+LuRM3aQ+x0KVXfHc3QdkHi 46 | FNmJP3+/lAfMCXvc226zfH3ePn1Z+4fPXz/G04urd+M6vk/DBQb0+5GnV2xGO8aAVcvUILNsTuIY 47 | cK+JC/YYC7XE3ao760xF2LuT5cnp8s3JEgNGFWdx7L89PEOdf07j89HjJdCoRXf5FwfYNodLSCqv 48 | HAa6Y6AY2QxcoTKVzvOe4cDkA1fIcqN1T55VFnBuQALnl1BIdo12DLNe+BfnAwNtc8l+PzHjwPEW 49 | YquVxV+4USXliWqgFW60ciRzW8C1wo59cmqVnIEkQRs71y5N5X+8pmm4IoF1JYnZoga41wYHbSWB 50 | MKejBkHlkmlbjqQDby07B6BxDJNM0UgFhA8Gpq1GXsD1wMZPlUEkgbHqXU78Z1H24SXgMVg4xgpz 51 | DH3aN+/tKhdyTpDYKRebU9jIMd/kODvMkAU+fg9oruOmMpkK9siSNt6q4NOF8Y/GEhl7aaUEbPN3 52 | 6h8wy9h843rLYtivVgG1+d+tt2ePj78BAAD//wMAfduzcKwCAAA= 53 | headers: 54 | CF-Cache-Status: 55 | - DYNAMIC 56 | CF-RAY: 57 | - 8da652224d8f8e44-TLV 58 | Connection: 59 | - keep-alive 60 | Content-Encoding: 61 | - gzip 62 | Content-Type: 63 | - application/json 64 | Date: 65 | - Tue, 29 Oct 2024 21:39:55 GMT 66 | Server: 67 | - cloudflare 68 | Transfer-Encoding: 69 | - chunked 70 | X-Robots-Tag: 71 | - none 72 | request-id: 73 | - req_01BBAgmGQDEfqkKaFMjBh1yK 74 | via: 75 | - 1.1 google 76 | status: 77 | code: 200 78 | message: OK 79 | version: 1 80 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_messages/test_with_asyncio_run_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "What is 4 | the weather in San Francisco?"}], "model": "claude-3-5-sonnet-20240620", "system": 5 | [{"type": "text", "text": "You help generate concise summaries of news articles 6 | and blog posts that user sends you."}]}' 7 | headers: 8 | accept: 9 | - application/json 10 | accept-encoding: 11 | - gzip, deflate 12 | anthropic-version: 13 | - '2023-06-01' 14 | connection: 15 | - keep-alive 16 | content-length: 17 | - '273' 18 | content-type: 19 | - application/json 20 | host: 21 | - api.anthropic.com 22 | user-agent: 23 | - AsyncAnthropic/Python 0.36.2 24 | x-stainless-arch: 25 | - arm64 26 | x-stainless-async: 27 | - async:asyncio 28 | x-stainless-lang: 29 | - python 30 | x-stainless-os: 31 | - MacOS 32 | x-stainless-package-version: 33 | - 0.36.2 34 | x-stainless-retry-count: 35 | - '0' 36 | x-stainless-runtime: 37 | - CPython 38 | x-stainless-runtime-version: 39 | - 3.11.8 40 | method: POST 41 | uri: https://api.anthropic.com/v1/messages 42 | response: 43 | body: 44 | string: !!binary | 45 | H4sIAAAAAAAAA2ySwW4UMQyGX8XyhUtmtXRbJOZWUVZUXICWE0Irb+LuRM3aQ+x0KVXfHc3QdkHi 46 | FNmJP3+/lAfMCXvc226zfH3ePn1Z+4fPXz/G04urd+M6vk/DBQb0+5GnV2xGO8aAVcvUILNsTuIY 47 | cK+JC/YYC7XE3ao760xF2LuT5cnp8s3JEgNGFWdx7L89PEOdf07j89HjJdCoRXf5FwfYNodLSCqv 48 | HAa6Y6AY2QxcoTKVzvOe4cDkA1fIcqN1T55VFnBuQALnl1BIdo12DLNe+BfnAwNtc8l+PzHjwPEW 49 | YquVxV+4USXliWqgFW60ciRzW8C1wo59cmqVnIEkQRs71y5N5X+8pmm4IoF1JYnZoga41wYHbSWB 50 | MKejBkHlkmlbjqQDby07B6BxDJNM0UgFhA8Gpq1GXsD1wMZPlUEkgbHqXU78Z1H24SXgMVg4xgpz 51 | DH3aN+/tKhdyTpDYKRebU9jIMd/kODvMkAU+fg9oruOmMpkK9siSNt6q4NOF8Y/GEhl7aaUEbPN3 52 | 6h8wy9h843rLYtivVgG1+d+tt2ePj78BAAD//wMAfduzcKwCAAA= 53 | headers: 54 | CF-Cache-Status: 55 | - DYNAMIC 56 | CF-RAY: 57 | - 8da652224d8f8e44-TLV 58 | Connection: 59 | - keep-alive 60 | Content-Encoding: 61 | - gzip 62 | Content-Type: 63 | - application/json 64 | Date: 65 | - Tue, 29 Oct 2024 21:39:55 GMT 66 | Server: 67 | - cloudflare 68 | Transfer-Encoding: 69 | - chunked 70 | X-Robots-Tag: 71 | - none 72 | request-id: 73 | - req_01BBAgmGQDEfqkKaFMjBh1yK 74 | via: 75 | - 1.1 google 76 | status: 77 | code: 200 78 | message: OK 79 | version: 1 80 | -------------------------------------------------------------------------------- /src/lmnr/sdk/browser/bubus_otel.py: -------------------------------------------------------------------------------- 1 | from typing import Collection 2 | 3 | from lmnr import Laminar 4 | from lmnr.opentelemetry_lib.tracing.context import get_current_context 5 | from lmnr.sdk.log import get_default_logger 6 | 7 | from opentelemetry.instrumentation.instrumentor import BaseInstrumentor 8 | from opentelemetry.instrumentation.utils import unwrap 9 | from opentelemetry.trace import NonRecordingSpan, get_current_span 10 | from wrapt import wrap_function_wrapper 11 | 12 | 13 | _instruments = ("bubus >= 1.3.0",) 14 | event_id_to_span_context = {} 15 | logger = get_default_logger(__name__) 16 | 17 | 18 | def wrap_dispatch(wrapped, instance, args, kwargs): 19 | event = args[0] if args and len(args) > 0 else kwargs.get("event", None) 20 | if event and hasattr(event, "event_id"): 21 | event_id = event.event_id 22 | if event_id: 23 | span = get_current_span(get_current_context()) 24 | event_id_to_span_context[event_id] = span.get_span_context() 25 | return wrapped(*args, **kwargs) 26 | 27 | 28 | async def wrap_process_event(wrapped, instance, args, kwargs): 29 | event = args[0] if args and len(args) > 0 else kwargs.get("event", None) 30 | span_context = None 31 | if event and hasattr(event, "event_id"): 32 | event_id = event.event_id 33 | if event_id: 34 | span_context = event_id_to_span_context.get(event_id) 35 | if not span_context: 36 | return await wrapped(*args, **kwargs) 37 | if not Laminar.is_initialized(): 38 | return await wrapped(*args, **kwargs) 39 | with Laminar.use_span(NonRecordingSpan(span_context)): 40 | return await wrapped(*args, **kwargs) 41 | 42 | 43 | class BubusInstrumentor(BaseInstrumentor): 44 | def __init__(self): 45 | super().__init__() 46 | 47 | def instrumentation_dependencies(self) -> Collection[str]: 48 | return _instruments 49 | 50 | def _instrument(self, **kwargs): 51 | try: 52 | wrap_function_wrapper("bubus.service", "EventBus.dispatch", wrap_dispatch) 53 | except (ModuleNotFoundError, ImportError): 54 | pass 55 | try: 56 | wrap_function_wrapper( 57 | "bubus.service", "EventBus.process_event", wrap_process_event 58 | ) 59 | except (ModuleNotFoundError, ImportError): 60 | pass 61 | 62 | def _uninstrument(self, **kwargs): 63 | try: 64 | unwrap("bubus.service", "EventBus.dispatch") 65 | except (ModuleNotFoundError, ImportError): 66 | pass 67 | try: 68 | unwrap("bubus.service", "EventBus.process_event") 69 | except (ModuleNotFoundError, ImportError): 70 | pass 71 | event_id_to_span_context.clear() 72 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import traceback 4 | from importlib.metadata import version 5 | 6 | from opentelemetry import context as context_api 7 | from .config import Config 8 | from opentelemetry.semconv_ai import SpanAttributes 9 | 10 | GEN_AI_SYSTEM = "gen_ai.system" 11 | GEN_AI_SYSTEM_GROQ = "groq" 12 | 13 | _PYDANTIC_VERSION = version("pydantic") 14 | 15 | LMNR_TRACE_CONTENT = "LMNR_TRACE_CONTENT" 16 | 17 | 18 | def set_span_attribute(span, name, value): 19 | if value is not None and value != "": 20 | span.set_attribute(name, value) 21 | 22 | 23 | def should_send_prompts(): 24 | return ( 25 | os.getenv(LMNR_TRACE_CONTENT) or "true" 26 | ).lower() == "true" or context_api.get_value("override_enable_content_tracing") 27 | 28 | 29 | def dont_throw(func): 30 | """ 31 | A decorator that wraps the passed in function and logs exceptions instead of throwing them. 32 | 33 | @param func: The function to wrap 34 | @return: The wrapper function 35 | """ 36 | # Obtain a logger specific to the function's module 37 | logger = logging.getLogger(func.__module__) 38 | 39 | def wrapper(*args, **kwargs): 40 | try: 41 | return func(*args, **kwargs) 42 | except Exception as e: 43 | logger.debug( 44 | "OpenLLMetry failed to trace in %s, error: %s", 45 | func.__name__, 46 | traceback.format_exc(), 47 | ) 48 | if Config.exception_logger: 49 | Config.exception_logger(e) 50 | 51 | return wrapper 52 | 53 | 54 | @dont_throw 55 | def shared_metrics_attributes(response): 56 | response_dict = model_as_dict(response) 57 | 58 | common_attributes = Config.get_common_metrics_attributes() 59 | 60 | return { 61 | **common_attributes, 62 | GEN_AI_SYSTEM: GEN_AI_SYSTEM_GROQ, 63 | SpanAttributes.LLM_RESPONSE_MODEL: response_dict.get("model"), 64 | } 65 | 66 | 67 | @dont_throw 68 | def error_metrics_attributes(exception): 69 | return { 70 | GEN_AI_SYSTEM: GEN_AI_SYSTEM_GROQ, 71 | "error.type": exception.__class__.__name__, 72 | } 73 | 74 | 75 | def model_as_dict(model): 76 | if _PYDANTIC_VERSION < "2.0.0": 77 | return model.dict() 78 | if hasattr(model, "model_dump"): 79 | return model.model_dump() 80 | elif hasattr(model, "parse"): # Raw API response 81 | return model_as_dict(model.parse()) 82 | else: 83 | return model 84 | 85 | 86 | def should_emit_events() -> bool: 87 | """ 88 | Checks if the instrumentation isn't using the legacy attributes 89 | and if the event logger is not None. 90 | """ 91 | 92 | return not Config.use_legacy_attributes 93 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/test_streaming_with_api_usage.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from openai import OpenAI 3 | from opentelemetry.semconv_ai import SpanAttributes 4 | 5 | 6 | @pytest.fixture 7 | def api_usage_provider_client(): 8 | """Client for testing API providers that include usage information in streaming responses, use deepseek here""" 9 | return OpenAI( 10 | api_key="test-api-key", 11 | base_url="https://api.deepseek.com/beta" 12 | ) 13 | 14 | 15 | @pytest.mark.vcr 16 | def test_streaming_with_api_usage_capture( 17 | instrument_legacy, span_exporter, api_usage_provider_client 18 | ): 19 | """Test that streaming responses with API usage information are properly captured""" 20 | response = api_usage_provider_client.chat.completions.create( 21 | model="deepseek-chat", 22 | messages=[{"role": "user", "content": "Tell me a joke about opentelemetry"}], 23 | stream=True, 24 | ) 25 | 26 | response_content = "" 27 | for chunk in response: 28 | if chunk.choices and chunk.choices[0].delta.content: 29 | response_content += chunk.choices[0].delta.content 30 | 31 | spans = span_exporter.get_finished_spans() 32 | assert len(spans) == 1 33 | 34 | span = spans[0] 35 | assert span.name == "openai.chat" 36 | 37 | # Check that token usage is captured from API response 38 | assert span.attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) > 0 39 | assert span.attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) > 0 40 | assert span.attributes.get(SpanAttributes.LLM_USAGE_TOTAL_TOKENS) > 0 41 | 42 | # Verify that the response content is meaningful 43 | assert len(response_content) > 0 44 | assert span.attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) == "deepseek-chat" 45 | 46 | 47 | @pytest.mark.vcr 48 | def test_streaming_with_api_usage_and_events( 49 | instrument_with_content, span_exporter, log_exporter, api_usage_provider_client 50 | ): 51 | """Test that streaming responses with API usage work with event logging""" 52 | response = api_usage_provider_client.chat.completions.create( 53 | model="deepseek-chat", 54 | messages=[{"role": "user", "content": "What is OpenTelemetry?"}], 55 | stream=True, 56 | ) 57 | 58 | for chunk in response: 59 | pass # Just consume the stream 60 | 61 | spans = span_exporter.get_finished_spans() 62 | assert len(spans) == 1 63 | 64 | span = spans[0] 65 | 66 | # Check that usage metrics are captured from API response 67 | assert span.attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) > 0 68 | assert span.attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) > 0 69 | 70 | # Check event logs 71 | logs = log_exporter.get_finished_logs() 72 | assert len(logs) >= 2 # At least user message and assistant choice 73 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | from opentelemetry.sdk.trace.export import SpanExporter 5 | from opentelemetry.sdk.resources import SERVICE_NAME 6 | 7 | from lmnr.opentelemetry_lib.tracing.instruments import Instruments 8 | from lmnr.opentelemetry_lib.tracing import TracerWrapper 9 | from lmnr.sdk.types import SessionRecordingOptions 10 | 11 | MAX_MANUAL_SPAN_PAYLOAD_SIZE = 1024 * 1024 * 10 # 10MB 12 | 13 | 14 | class TracerManager: 15 | __tracer_wrapper: TracerWrapper 16 | 17 | @staticmethod 18 | def init( 19 | app_name: str | None = sys.argv[0], 20 | disable_batch=False, 21 | exporter: SpanExporter | None = None, 22 | resource_attributes: dict = {}, 23 | instruments: set[Instruments] | None = None, 24 | block_instruments: set[Instruments] | None = None, 25 | base_url: str = "https://api.lmnr.ai", 26 | port: int = 8443, 27 | http_port: int = 443, 28 | project_api_key: str | None = None, 29 | max_export_batch_size: int | None = None, 30 | force_http: bool = False, 31 | timeout_seconds: int = 30, 32 | set_global_tracer_provider: bool = True, 33 | otel_logger_level: int = logging.ERROR, 34 | session_recording_options: SessionRecordingOptions | None = None, 35 | ) -> None: 36 | enable_content_tracing = True 37 | 38 | # Tracer init 39 | resource_attributes.update({SERVICE_NAME: app_name}) 40 | TracerWrapper.set_static_params(resource_attributes, enable_content_tracing) 41 | TracerManager.__tracer_wrapper = TracerWrapper( 42 | disable_batch=disable_batch, 43 | exporter=exporter, 44 | instruments=instruments, 45 | block_instruments=block_instruments, 46 | base_url=base_url, 47 | port=port, 48 | http_port=http_port, 49 | project_api_key=project_api_key, 50 | max_export_batch_size=max_export_batch_size, 51 | force_http=force_http, 52 | timeout_seconds=timeout_seconds, 53 | set_global_tracer_provider=set_global_tracer_provider, 54 | otel_logger_level=otel_logger_level, 55 | session_recording_options=session_recording_options, 56 | ) 57 | 58 | @staticmethod 59 | def flush() -> bool: 60 | if not hasattr(TracerManager, "_TracerManager__tracer_wrapper"): 61 | return False 62 | return TracerManager.__tracer_wrapper.flush() 63 | 64 | @staticmethod 65 | def shutdown(): 66 | TracerManager.__tracer_wrapper.shutdown() 67 | 68 | @staticmethod 69 | def force_reinit_processor(): 70 | if not hasattr(TracerManager, "_TracerManager__tracer_wrapper"): 71 | return False 72 | return TracerManager.__tracer_wrapper.force_reinit_processor() 73 | -------------------------------------------------------------------------------- /tests/cassettes/test_litellm_gemini/test_litellm_gemini_thinking.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents":[{"role":"user","parts":[{"text":"How many times does the letter 4 | ''r'' appear in the word strawberry?"}]}],"system_instruction":{"parts":[{"text":"Think 5 | deep and thoroughly step by step."}]},"generationConfig":{"thinkingConfig":{"includeThoughts":true,"thinkingBudget":512}}}' 6 | headers: 7 | accept: 8 | - '*/*' 9 | accept-encoding: 10 | - gzip, deflate, zstd 11 | connection: 12 | - keep-alive 13 | content-length: 14 | - '285' 15 | content-type: 16 | - application/json 17 | host: 18 | - generativelanguage.googleapis.com 19 | user-agent: 20 | - litellm/1.77.1 21 | method: POST 22 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-lite:generateContent 23 | response: 24 | body: 25 | string: !!binary | 26 | H4sIAAAAAAAC/4VUXU/bMBR976+4y0ulKI2mln3QPSEGG9IYDKpp0jpNbnKbeHXsYDuEDPHfd2/S 27 | lrQghmTq+H6e4+N7PwAIEqFTmQqPLpjCTzoBuG//s81oj9qTYXNEh6Ww/tG3+7vv7cnF4x0HBWF4 28 | pIVq/kqdgc8RTi3eVKiTBswSBHxB79GG4VzP9cVKNBE4A2egEVPwBpYyqyyCqTzkpoZC6Aa8LNC1 29 | uVQbDEM7BEdmB1UJUrem2tgU5oHzVtQLtLaJ5wGc+aGjmnwos9wvja0FuZXWLBQWEVgUSjUxnErr 30 | fARnQ6WgpISpqbusXtgM/fT5ClSAeNxrLGZgMzpaoPMgSqolkhxyJFTSQaJQWNVAKi0mHhJTaU9M 31 | xfDVbDkwt2gTU5RKJnRFlJ8cHNTS59RwVilhAe9Ki85Jox0YSxQWCEvBJAuVGUuuxQcOdFxzIZxM 32 | oBTcoqYUicm09BQb820Ahf6pqFdHqqAYa6osfwR8K13FJLVQV4glEP5k1cK8Nh2uGiEzU+YGPFgi 33 | vIYFIO0sEEmta0gSGbohjECbEtfffu/b8neDLqLygq/OaIzXNtH5rr/qvcjFjhWfy+u5VZaDNrzd 34 | 1vC1ift+jPOJT24RX629mi57C+tIWRZW1PK1lCT87kqZ9jYohhnXJeVhtC9hEgeJwcFkrfCXlHwt 35 | SQ/YNkcPNFGVk7fMTRDtvcKcr48forcV9mwP0f+fLqmBwHYAqJNiyhAdc0crDOnVMj+0aloLWtg7 36 | 3/w2mwfwDNIwnJDPy2hJMEG/7+3+1yOCwBqF3HFhUlQb9y3EgG5CuvwKhTOa3a5nF5dbpgKpU7yj 37 | 49eDTYE2dVA5keE5ekGTUWznX0AvuCj9zKxQHzM1ZBkfdMl6g3THfnC4tnvjhdoNPXwbPcnrPlJV 38 | qfoTtjd8CaRQ0jeMZHbyY9a7cyqw09aGiUGPsI0mdlscjyeDNWUdi9/R8jzhIhkWROBoHL8ZLZVw 39 | +YiqY1s1oKlT0tDBs5Qdjw9+C3HxefxJyJN35+8vJzdvr9y382DwMPgHAjzTXWgGAAA= 40 | headers: 41 | Alt-Svc: 42 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 43 | Content-Encoding: 44 | - gzip 45 | Content-Type: 46 | - application/json; charset=UTF-8 47 | Date: 48 | - Mon, 29 Sep 2025 13:52:11 GMT 49 | Server: 50 | - scaffolding on HTTPServer2 51 | Server-Timing: 52 | - gfet4t7; dur=3030 53 | Transfer-Encoding: 54 | - chunked 55 | Vary: 56 | - Origin 57 | - X-Origin 58 | - Referer 59 | X-Content-Type-Options: 60 | - nosniff 61 | X-Frame-Options: 62 | - SAMEORIGIN 63 | X-XSS-Protection: 64 | - '0' 65 | status: 66 | code: 200 67 | message: OK 68 | version: 1 69 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_bedrock_with_raw_response/test_async_anthropic_bedrock_regular_create.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "Tell me 4 | a joke about OpenTelemetry"}], "anthropic_version": "bedrock-2023-05-31"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | authorization: 11 | - AWS4-HMAC-SHA256 Credential=AKIAQEMAC2MSQDTITCKK/20250812/us-east-1/bedrock/aws4_request, 12 | SignedHeaders=accept;accept-encoding;content-length;content-type;host;x-amz-date;x-stainless-arch;x-stainless-lang;x-stainless-os;x-stainless-package-version;x-stainless-read-timeout;x-stainless-retry-count;x-stainless-runtime;x-stainless-runtime-version;x-stainless-timeout, 13 | Signature=7eb3fdf5c7fe741fb5066e245bf529e9b385d7667eedfe0a955bdabec62ae1aa 14 | connection: 15 | - keep-alive 16 | content-length: 17 | - '144' 18 | content-type: 19 | - application/json 20 | host: 21 | - bedrock-runtime.us-east-1.amazonaws.com 22 | user-agent: 23 | - AsyncAnthropicBedrock/Python 0.49.0 24 | x-amz-date: 25 | - 20250812T130741Z 26 | x-stainless-arch: 27 | - arm64 28 | x-stainless-lang: 29 | - python 30 | x-stainless-os: 31 | - MacOS 32 | x-stainless-package-version: 33 | - 0.49.0 34 | x-stainless-read-timeout: 35 | - '600' 36 | x-stainless-retry-count: 37 | - '0' 38 | x-stainless-runtime: 39 | - CPython 40 | x-stainless-runtime-version: 41 | - 3.11.7 42 | x-stainless-timeout: 43 | - '600' 44 | method: POST 45 | uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1:0/invoke 46 | response: 47 | body: 48 | string: '{"id":"msg_bdrk_01BpNWhDaaMSLb423BECgv49","type":"message","role":"assistant","model":"claude-3-haiku-20240307","content":[{"type":"text","text":"Here''s 49 | an OpenTelemetry-themed joke for you:\n\nWhy did the developer have trouble 50 | instrumenting their application with OpenTelemetry?\n\nBecause they were constantly 51 | getting tripped up by all the Spans!"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":17,"output_tokens":50}}' 52 | headers: 53 | Connection: 54 | - keep-alive 55 | Content-Length: 56 | - '446' 57 | Content-Type: 58 | - application/json 59 | Date: 60 | - Tue, 12 Aug 2025 13:07:42 GMT 61 | X-Amzn-Bedrock-Input-Token-Count: 62 | - '17' 63 | X-Amzn-Bedrock-Invocation-Latency: 64 | - '735' 65 | X-Amzn-Bedrock-Output-Token-Count: 66 | - '50' 67 | x-amzn-RequestId: 68 | - 20a78996-8d60-4102-935c-e7d2e2217dc8 69 | status: 70 | code: 200 71 | message: OK 72 | version: 1 73 | -------------------------------------------------------------------------------- /tests/cassettes/test_litellm_anthropic/test_litellm_anthropic_basic.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "claude-3-5-haiku-latest", "messages": [{"role": "user", "content": 4 | [{"type": "text", "text": "What is the capital of France?"}]}], "max_tokens": 5 | 4096}' 6 | headers: 7 | accept: 8 | - application/json 9 | accept-encoding: 10 | - gzip, deflate, zstd 11 | anthropic-version: 12 | - '2023-06-01' 13 | connection: 14 | - keep-alive 15 | content-length: 16 | - '161' 17 | content-type: 18 | - application/json 19 | host: 20 | - api.anthropic.com 21 | user-agent: 22 | - litellm/1.72.6 23 | method: POST 24 | uri: https://api.anthropic.com/v1/messages 25 | response: 26 | body: 27 | string: !!binary | 28 | H4sIAAAAAAAAAwAAAP//ZJBBSwMxEIX/yvLOWdmuVTF37cFLQcGDSBiTaRt2O1mTiSil/1222EPx 29 | NPC+9x7DOyAGWOzL1nWLZ/lYqT4N6W41DPH2oa6n1/sVDPRn4tnFpdCWYZDTOAtUSixKojDYp8Aj 30 | LPxINXB73d60O4pDbfuuXy66voeBT6IsCvt2OHcqf8/p07F42XHjaYpKY5M2zWMm8dzE0qwpx3KF 31 | 47tB0TS5zFSSwIIlOK1Z8AcKf1YWz7BSx9Ggnj62B0SZqjpNA0uBXSwNPPkdO5+ZNCZxl4buzDNT 32 | +M9S1Yu6zqBw/oqenUbOsJh3CZQDjsdfAAAA//8DAMv8gQVlAQAA 33 | headers: 34 | CF-RAY: 35 | - 9512b2d1ca5f5781-LHR 36 | Connection: 37 | - keep-alive 38 | Content-Encoding: 39 | - gzip 40 | Content-Type: 41 | - application/json 42 | Date: 43 | - Tue, 17 Jun 2025 12:54:59 GMT 44 | Server: 45 | - cloudflare 46 | Transfer-Encoding: 47 | - chunked 48 | X-Robots-Tag: 49 | - none 50 | anthropic-organization-id: 51 | - 04aa8588-6567-40cb-9042-a54b20ebaf4f 52 | anthropic-ratelimit-input-tokens-limit: 53 | - '400000' 54 | anthropic-ratelimit-input-tokens-remaining: 55 | - '400000' 56 | anthropic-ratelimit-input-tokens-reset: 57 | - '2025-06-17T12:54:59Z' 58 | anthropic-ratelimit-output-tokens-limit: 59 | - '80000' 60 | anthropic-ratelimit-output-tokens-remaining: 61 | - '80000' 62 | anthropic-ratelimit-output-tokens-reset: 63 | - '2025-06-17T12:54:59Z' 64 | anthropic-ratelimit-requests-limit: 65 | - '4000' 66 | anthropic-ratelimit-requests-remaining: 67 | - '3999' 68 | anthropic-ratelimit-requests-reset: 69 | - '2025-06-17T12:54:57Z' 70 | anthropic-ratelimit-tokens-limit: 71 | - '480000' 72 | anthropic-ratelimit-tokens-remaining: 73 | - '480000' 74 | anthropic-ratelimit-tokens-reset: 75 | - '2025-06-17T12:54:59Z' 76 | cf-cache-status: 77 | - DYNAMIC 78 | request-id: 79 | - req_011CQDpzPJ7wVMiXMe93TVBF 80 | strict-transport-security: 81 | - max-age=31536000; includeSubDomains; preload 82 | via: 83 | - 1.1 google 84 | status: 85 | code: 200 86 | message: OK 87 | version: 1 88 | -------------------------------------------------------------------------------- /tests/cassettes/test_litellm_anthropic/test_litellm_anthropic_text_block.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "claude-3-5-haiku-latest", "messages": [{"role": "user", "content": 4 | [{"type": "text", "text": "What is the capital of France?"}]}], "max_tokens": 5 | 4096}' 6 | headers: 7 | accept: 8 | - application/json 9 | accept-encoding: 10 | - gzip, deflate, zstd 11 | anthropic-version: 12 | - '2023-06-01' 13 | connection: 14 | - keep-alive 15 | content-length: 16 | - '161' 17 | content-type: 18 | - application/json 19 | host: 20 | - api.anthropic.com 21 | user-agent: 22 | - litellm/1.72.6 23 | method: POST 24 | uri: https://api.anthropic.com/v1/messages 25 | response: 26 | body: 27 | string: !!binary | 28 | H4sIAAAAAAAAAwAAAP//ZJBBSwMxEIX/yvLOWdluK0iOgl4sUrEXEQlDMrah22SbmYil9L/LFnsQ 29 | TwPve+8xvBNigMVeNq6bLdfy9BL18HbUh6U+vx7v7lfzDAM9jjy5WIQ2DIOSh0kgkShKSWGwz4EH 30 | WPiBauB23t62W4q72vZdv5h1fQ8Dn5NyUtj307VT+XtKX47FesuNpzEqDU3+bB4LJc9NlGZFJcoN 31 | zh8Gonl0hUlyggWn4LSWhF8gfKicPMOmOgwG9fKxPSGmsarTvOMksLOFgSe/ZecLk8ac3F9Dd+WF 32 | Kfxnueqfus5AuHxFz04jF1hMuwQqAefzDwAAAP//AwAJeZViZQEAAA== 33 | headers: 34 | CF-RAY: 35 | - 9512ba646dd0cda2-LHR 36 | Connection: 37 | - keep-alive 38 | Content-Encoding: 39 | - gzip 40 | Content-Type: 41 | - application/json 42 | Date: 43 | - Tue, 17 Jun 2025 13:00:11 GMT 44 | Server: 45 | - cloudflare 46 | Transfer-Encoding: 47 | - chunked 48 | X-Robots-Tag: 49 | - none 50 | anthropic-organization-id: 51 | - 04aa8588-6567-40cb-9042-a54b20ebaf4f 52 | anthropic-ratelimit-input-tokens-limit: 53 | - '400000' 54 | anthropic-ratelimit-input-tokens-remaining: 55 | - '400000' 56 | anthropic-ratelimit-input-tokens-reset: 57 | - '2025-06-17T13:00:11Z' 58 | anthropic-ratelimit-output-tokens-limit: 59 | - '80000' 60 | anthropic-ratelimit-output-tokens-remaining: 61 | - '80000' 62 | anthropic-ratelimit-output-tokens-reset: 63 | - '2025-06-17T13:00:11Z' 64 | anthropic-ratelimit-requests-limit: 65 | - '4000' 66 | anthropic-ratelimit-requests-remaining: 67 | - '3999' 68 | anthropic-ratelimit-requests-reset: 69 | - '2025-06-17T13:00:07Z' 70 | anthropic-ratelimit-tokens-limit: 71 | - '480000' 72 | anthropic-ratelimit-tokens-remaining: 73 | - '480000' 74 | anthropic-ratelimit-tokens-reset: 75 | - '2025-06-17T13:00:11Z' 76 | cf-cache-status: 77 | - DYNAMIC 78 | request-id: 79 | - req_011CQDqPFKnWiE9NWyqU8z5a 80 | strict-transport-security: 81 | - max-age=31536000; includeSubDomains; preload 82 | via: 83 | - 1.1 google 84 | status: 85 | code: 200 86 | message: OK 87 | version: 1 88 | -------------------------------------------------------------------------------- /src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py: -------------------------------------------------------------------------------- 1 | from opentelemetry.instrumentation.instrumentor import BaseInstrumentor 2 | from opentelemetry.instrumentation.utils import unwrap 3 | from opentelemetry.trace import TraceFlags, SpanContext 4 | from typing import Collection 5 | from wrapt import wrap_function_wrapper 6 | import logging 7 | 8 | 9 | def _wrap_span_context(fn, instance, args, kwargs): 10 | """ 11 | DataDog does something to the OpenTelemetry Contexts, so that when any code 12 | tries to access the current active span, it returns a non-recording span. 13 | 14 | There is nothing wrong about that per se, but they create their 15 | NonRecordingSpan from an invalid SpanContext, because they don't 16 | wrap the trace flags int/bitmap into a TraceFlags object. 17 | 18 | It is an easy to miss bug, because `TraceFlags.SAMPLED` looks like an 19 | instance of `TraceFlags`, but is actually just an integer 1, and the 20 | proper way to create it is actually 21 | `TraceFlags(TraceFlags.SAMPLED)` or `TraceFlags(0x1)`. 22 | 23 | This is a problem because the trace flags are used to determine if a span 24 | is sampled or not. If the trace flags are not wrapped, then the check 25 | for sampling will fail, causing any span creation to fail, and sometimes 26 | breaking the entire application. 27 | 28 | Issue: https://github.com/DataDog/dd-trace-py/issues/12585 29 | PR: https://github.com/DataDog/dd-trace-py/pull/12596 30 | The PR only fixed the issue in one place, but it is still there in other places. 31 | https://github.com/DataDog/dd-trace-py/pull/12596#issuecomment-2718239507 32 | 33 | https://github.com/DataDog/dd-trace-py/blob/a8419a40fe9e73e0a84c4cab53094c384480a5a6/ddtrace/internal/opentelemetry/context.py#L83 34 | 35 | We patch the `get_span_context` method to return a valid SpanContext. 36 | """ 37 | res = fn(*args, **kwargs) 38 | 39 | new_span_context = SpanContext( 40 | trace_id=res.trace_id, 41 | span_id=res.span_id, 42 | is_remote=res.is_remote, 43 | trace_state=res.trace_state, 44 | trace_flags=TraceFlags(res.trace_flags), 45 | ) 46 | 47 | return new_span_context 48 | 49 | 50 | class OpentelemetryInstrumentor(BaseInstrumentor): 51 | def __init__(self): 52 | super().__init__() 53 | 54 | def instrumentation_dependencies(self) -> Collection[str]: 55 | return ("opentelemetry-api>=1.0.0",) 56 | 57 | def _instrument(self, **kwargs): 58 | try: 59 | wrap_function_wrapper( 60 | "opentelemetry.trace.span", 61 | "NonRecordingSpan.get_span_context", 62 | _wrap_span_context, 63 | ) 64 | 65 | except Exception as e: 66 | logging.debug(f"Error wrapping SpanContext: {e}") 67 | 68 | def _uninstrument(self, **kwargs): 69 | unwrap("opentelemetry.trace.span", "NonRecordingSpan.get_span_context") 70 | -------------------------------------------------------------------------------- /tests/cassettes/test_google_genai/test_google_genai_reasoning_tokens_with_include_thoughts.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"contents": [{"parts": [{"text": "How many times does the letter ''r'' 4 | appear in the word strawberry?"}], "role": "user"}], "systemInstruction": {"parts": 5 | [{"text": "Think deep and thoroughly step by step."}], "role": "user"}, "generationConfig": 6 | {"thinkingConfig": {"includeThoughts": true, "thinkingBudget": 512}}}' 7 | headers: 8 | accept: 9 | - '*/*' 10 | accept-encoding: 11 | - gzip, deflate, zstd 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '315' 16 | content-type: 17 | - application/json 18 | host: 19 | - generativelanguage.googleapis.com 20 | user-agent: 21 | - google-genai-sdk/1.34.0 gl-python/3.13.5 22 | x-goog-api-client: 23 | - google-genai-sdk/1.34.0 gl-python/3.13.5 24 | method: POST 25 | uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-lite:generateContent 26 | response: 27 | body: 28 | string: !!binary | 29 | H4sIAAAAAAAC/4VUUW+bMBB+z6+48hIJAdrabOr6VrWbVnVrq4ZNlZY9OHAEK2Az2xFlVf/77gxJ 30 | aDptkYLBd/7uu+/u/DQBCDKhcpkLhzY4gx+0A/Dkn2zTyqFyZNhu0WYjjNv79r+n0Tu5OHzkQ0EY 31 | piXC3BnRLtGYDqb3U7jQG+XCcKEW6rwyclW6CCp0UwtLg2INrpQWct2qBPj0xqKBVihnwWlYK91C 32 | Sf9aqA6crJG2yYsAHPlNzRRE06AwFqTyllabHBaB3ZFYBAnMZd1UCKj0ZlUm8EkaSyyuQCHmHOY3 33 | Gs0AusdYYwdYYU1a2LM9qnQWqwLiA3jaIE29m20wk4XMBn4RtKXMSqAEiWkCN7qNfBIc03bWYS2c 34 | zERVdWCpMLtQCas1d6S8VCsojK69aYkrqRRtMfeV3qqw7Ia3hJl5Qko3yF/Of1knq4r2EjgvI0Yy 35 | SMkQrSMvecFy9AzTUnBltKLTF9QNUm18uEUgFgEvbb8s+wVJ3CThYBHsn76QBglHKM3RGO/IY1Md 36 | 11zClrhcIzaUBeETxqBgf4p7wuRHcIemwMz1aujIa8Tp73RiHTLuLy5jaZDSUtYJlVEMXRx0ykCr 37 | 0Aa9CMTOtmSS4xoflJYnQhDmq6ajCjpRcZCTvi2JZBAdjEXJ7caT4cwGR7bn6P+z9MWPiE+Og9dn 38 | LIKNXRyGJgxjEbfxMsbhq3927JL+fTrC8CQMhwH6x6QEY5a79597voHRFTK/WudYbd13CQWFVNKW 39 | 9yisVuw2T2/vdroEUuX4SNtvJtsAHjrYWLHCr+gEXUxid/0EDTV+41K9RuVvEbIcv+vBRvfYC/vs 40 | w2D39Tk4ehq9wrWXFFVW4wtudPdRkqKSruNM0o8P6ajCFOAF9laJyUiwbQe8pPj2dDYZJOtV/I7G 41 | yl6uFdYkYHycvIuLStgypujoowYGbaOVxaucHe9nqRDXl/L8/cOvdf7tztx8vp2d62DyPPkDFZMx 42 | XucFAAA= 43 | headers: 44 | Alt-Svc: 45 | - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 46 | Content-Encoding: 47 | - gzip 48 | Content-Type: 49 | - application/json; charset=UTF-8 50 | Date: 51 | - Mon, 29 Sep 2025 13:06:15 GMT 52 | Server: 53 | - scaffolding on HTTPServer2 54 | Server-Timing: 55 | - gfet4t7; dur=2264 56 | Transfer-Encoding: 57 | - chunked 58 | Vary: 59 | - Origin 60 | - X-Origin 61 | - Referer 62 | X-Content-Type-Options: 63 | - nosniff 64 | X-Frame-Options: 65 | - SAMEORIGIN 66 | X-XSS-Protection: 67 | - '0' 68 | status: 69 | code: 200 70 | message: OK 71 | version: 1 72 | -------------------------------------------------------------------------------- /tests/cassettes/test_litellm_anthropic/test_litellm_anthropic_with_metadata.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "claude-3-5-haiku-latest", "messages": [{"role": "user", "content": 4 | [{"type": "text", "text": "What is the capital of France?"}]}], "max_tokens": 5 | 4096, "metadata": {"user_id": "test_user_id"}}' 6 | headers: 7 | accept: 8 | - application/json 9 | accept-encoding: 10 | - gzip, deflate, zstd 11 | anthropic-version: 12 | - '2023-06-01' 13 | connection: 14 | - keep-alive 15 | content-length: 16 | - '202' 17 | content-type: 18 | - application/json 19 | host: 20 | - api.anthropic.com 21 | user-agent: 22 | - litellm/1.76.2 23 | method: POST 24 | uri: https://api.anthropic.com/v1/messages 25 | response: 26 | body: 27 | string: !!binary | 28 | H4sIAAAAAAAAAwAAAP//dJBPSwMxFMS/yjLnrOyurWDOUj0WFTyIhGfy7MZmkzV/pLXsd5ctFlHx 29 | 9OD9ZoZhDrAGEkPaqKbtLtpnXt3t9uF1ff1w2X/c3F7FHQTyfuRZxSnRhiEQg5sflJJNmXyGwBAM 30 | O0hoR8VwfV4v657sttRd0y3apusgoIPP7DPk4+GUmXk3u49H4r7nStNoM7kqvFSrSF5zZVO1pmjT 31 | GaYngZTDqCJTCh4S7I3KJXp8gcRvhb1mSF+cEyjHxvIA68eSVQ5b9gmyXQho0j0rHZmyDV79FDQn 32 | HpnMf+zknfN57HngSE4th7/6b9r2v+kkEEr+0a4RSBzfrWaVLUdIzDMbigbT9AkAAP//AwDOppGy 33 | tAEAAA== 34 | headers: 35 | CF-RAY: 36 | - 97b6f0ed9f8f0050-LHR 37 | Connection: 38 | - keep-alive 39 | Content-Encoding: 40 | - gzip 41 | Content-Type: 42 | - application/json 43 | Date: 44 | - Sun, 07 Sep 2025 14:36:56 GMT 45 | Server: 46 | - cloudflare 47 | Transfer-Encoding: 48 | - chunked 49 | X-Robots-Tag: 50 | - none 51 | anthropic-organization-id: 52 | - 04aa8588-6567-40cb-9042-a54b20ebaf4f 53 | anthropic-ratelimit-input-tokens-limit: 54 | - '400000' 55 | anthropic-ratelimit-input-tokens-remaining: 56 | - '400000' 57 | anthropic-ratelimit-input-tokens-reset: 58 | - '2025-09-07T14:36:55Z' 59 | anthropic-ratelimit-output-tokens-limit: 60 | - '80000' 61 | anthropic-ratelimit-output-tokens-remaining: 62 | - '80000' 63 | anthropic-ratelimit-output-tokens-reset: 64 | - '2025-09-07T14:36:56Z' 65 | anthropic-ratelimit-requests-limit: 66 | - '4000' 67 | anthropic-ratelimit-requests-remaining: 68 | - '3999' 69 | anthropic-ratelimit-requests-reset: 70 | - '2025-09-07T14:36:55Z' 71 | anthropic-ratelimit-tokens-limit: 72 | - '480000' 73 | anthropic-ratelimit-tokens-remaining: 74 | - '480000' 75 | anthropic-ratelimit-tokens-reset: 76 | - '2025-09-07T14:36:55Z' 77 | cf-cache-status: 78 | - DYNAMIC 79 | request-id: 80 | - req_011CSuD6S5s3683v4pXtKAC5 81 | strict-transport-security: 82 | - max-age=31536000; includeSubDomains; preload 83 | via: 84 | - 1.1 google 85 | x-envoy-upstream-service-time: 86 | - '817' 87 | status: 88 | code: 200 89 | message: OK 90 | version: 1 91 | -------------------------------------------------------------------------------- /tests/cassettes/test_litellm_anthropic/test_async_litellm_anthropic_with_computer_tools.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model":"claude-sonnet-4-20250514","messages":[{"role":"user","content":[{"type":"text","text":"What 4 | is the capital of France?"}]}],"tools":[{"type":"computer_20250124","name":"computer","display_width_px":1024,"display_height_px":768,"display_number":1},{"type":"text_editor_20250124","name":"str_replace_editor"},{"type":"bash_20250124","name":"bash"}],"max_tokens":4096}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate, zstd 10 | anthropic-beta: 11 | - computer-use-2024-10-22,computer-use-2025-01-24 12 | anthropic-version: 13 | - '2023-06-01' 14 | connection: 15 | - keep-alive 16 | content-length: 17 | - '374' 18 | content-type: 19 | - application/json 20 | host: 21 | - api.anthropic.com 22 | user-agent: 23 | - litellm/1.76.2 24 | method: POST 25 | uri: https://api.anthropic.com/v1/messages 26 | response: 27 | body: 28 | string: !!binary | 29 | H4sIAAAAAAAAAwAAAP//dJDNTsMwEIRfJZqzi5Ko4eAHQBW9oILggJC1OAsxdezU3vBX5d1RKir+ 30 | xGml+WZ2R7uHa6HR50dTVucbos26213fNNv37f3TqlqtXy6hIG8Dzy7OmR4ZCin6WaCcXRYKAoU+ 31 | tuyhYT2NLS9yDIFlsVzUZd2UTbWEgo1BOAj07f64Uvh1Dh+GxlXHhaXBCfkiPhRniYLlwuXigpLL 32 | J5juFLLEwSSmHAM0OLRGxhTwCTLvRg6WocPovcJ4KKz3cGEYxUjccsjQ9WlZK1iyHRubmMTFYH5a 33 | yiNPTO1/7JidL/DQcc+JvGn6v/4vWnW/6aQQR/kuVaVC5vTsLBtxnKAx/7ml1GKaPgAAAP//AwDE 34 | lyQStQEAAA== 35 | headers: 36 | CF-RAY: 37 | - 97bea654bdf494b4-LHR 38 | Connection: 39 | - keep-alive 40 | Content-Encoding: 41 | - gzip 42 | Content-Type: 43 | - application/json 44 | Date: 45 | - Mon, 08 Sep 2025 13:04:09 GMT 46 | Server: 47 | - cloudflare 48 | Transfer-Encoding: 49 | - chunked 50 | Via: 51 | - 1.1 google 52 | X-Robots-Tag: 53 | - none 54 | anthropic-organization-id: 55 | - 04aa8588-6567-40cb-9042-a54b20ebaf4f 56 | anthropic-ratelimit-input-tokens-limit: 57 | - '2000000' 58 | anthropic-ratelimit-input-tokens-remaining: 59 | - '1999000' 60 | anthropic-ratelimit-input-tokens-reset: 61 | - '2025-09-08T13:04:08Z' 62 | anthropic-ratelimit-output-tokens-limit: 63 | - '400000' 64 | anthropic-ratelimit-output-tokens-remaining: 65 | - '400000' 66 | anthropic-ratelimit-output-tokens-reset: 67 | - '2025-09-08T13:04:08Z' 68 | anthropic-ratelimit-tokens-limit: 69 | - '2400000' 70 | anthropic-ratelimit-tokens-remaining: 71 | - '2399000' 72 | anthropic-ratelimit-tokens-reset: 73 | - '2025-09-08T13:04:08Z' 74 | cf-cache-status: 75 | - DYNAMIC 76 | request-id: 77 | - req_011CSvypociTM6a9PtaCycTy 78 | strict-transport-security: 79 | - max-age=31536000; includeSubDomains; preload 80 | x-envoy-upstream-service-time: 81 | - '3045' 82 | status: 83 | code: 200 84 | message: OK 85 | version: 1 86 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/synchronous/resources/evaluators.py: -------------------------------------------------------------------------------- 1 | """Evaluators resource for creating evaluator scores.""" 2 | 3 | import uuid 4 | from typing import Any 5 | 6 | from lmnr.sdk.client.synchronous.resources.base import BaseResource 7 | from lmnr.sdk.utils import format_id 8 | 9 | 10 | class Evaluators(BaseResource): 11 | """Resource for creating evaluator scores.""" 12 | 13 | def score( 14 | self, 15 | *, 16 | name: str, 17 | trace_id: str | int | uuid.UUID | None = None, 18 | span_id: str | int | uuid.UUID | None = None, 19 | metadata: dict[str, Any] | None = None, 20 | score: float, 21 | ) -> None: 22 | """Create a score for a span. 23 | 24 | Args: 25 | name (str): Name of the score 26 | trace_id (str | int | uuid.UUID | None, optional): The trace ID to score (will be attached to root span) 27 | span_id (str | int | uuid.UUID | None, optional): The span ID to score 28 | metadata (dict[str, Any] | None, optional): Additional metadata. Defaults to None. 29 | score (float): The score value (float) 30 | 31 | Raises: 32 | ValueError: If there's an error creating the score. 33 | 34 | Example: 35 | Score by trace ID (will attach to root span): 36 | 37 | >>> laminar_client.evaluators.score( 38 | ... name="quality", 39 | ... trace_id="trace-id-here", 40 | ... score=0.95, 41 | ... metadata={"model": "gpt-4"} 42 | ... ) 43 | 44 | Score by span ID: 45 | 46 | >>> laminar_client.evaluators.score( 47 | ... name="relevance", 48 | ... span_id="span-id-here", 49 | ... score=0.87 50 | ... ) 51 | """ 52 | if trace_id is not None and span_id is not None: 53 | raise ValueError("Cannot provide both trace_id and span_id. Please provide only one.") 54 | if trace_id is None and span_id is None: 55 | raise ValueError("Either 'trace_id' or 'span_id' must be provided.") 56 | 57 | if trace_id is not None: 58 | formatted_trace_id = format_id(trace_id) 59 | payload = { 60 | "name": name, 61 | "traceId": formatted_trace_id, 62 | "metadata": metadata, 63 | "score": score, 64 | "source": "Code", 65 | } 66 | else: 67 | formatted_span_id = format_id(span_id) 68 | payload = { 69 | "name": name, 70 | "spanId": formatted_span_id, 71 | "metadata": metadata, 72 | "score": score, 73 | "source": "Code", 74 | } 75 | 76 | response = self._client.post( 77 | self._base_url + "/v1/evaluators/score", 78 | json=payload, 79 | headers=self._headers(), 80 | ) 81 | 82 | if response.status_code != 200: 83 | if response.status_code == 401: 84 | raise ValueError("Unauthorized. Please check your project API key.") 85 | raise ValueError(f"Error creating evaluator score: {response.text}") -------------------------------------------------------------------------------- /src/lmnr/sdk/datasets/file_utils.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Any 3 | import csv 4 | import orjson 5 | 6 | from lmnr.sdk.log import get_default_logger 7 | 8 | LOG = get_default_logger(__name__, verbose=False) 9 | 10 | 11 | def _is_supported_file(file: Path) -> bool: 12 | """Check if a file is supported.""" 13 | return file.suffix in [".json", ".csv", ".jsonl"] 14 | 15 | 16 | def _collect_files(paths: list[Path], recursive: bool = False) -> list[Path]: 17 | """ 18 | Collect all supported files from the given paths. 19 | 20 | Handles both files and directories. If a path is a directory, 21 | collects all supported files within it (recursively if specified). 22 | """ 23 | collected_files = [] 24 | 25 | for path in paths: 26 | if path.is_file(): 27 | if _is_supported_file(path): 28 | collected_files.append(path) 29 | else: 30 | LOG.warning(f"Skipping unsupported file type: {path}") 31 | elif path.is_dir(): 32 | for item in path.iterdir(): 33 | if item.is_file() and _is_supported_file(item): 34 | collected_files.append(item) 35 | elif recursive and item.is_dir(): 36 | # Recursively collect files from subdirectories 37 | collected_files.extend(_collect_files([item], recursive=True)) 38 | else: 39 | LOG.warning(f"Path does not exist or is not accessible: {path}") 40 | 41 | return collected_files 42 | 43 | 44 | def _read_file(file: Path) -> list[dict[str, Any]]: 45 | """Read data from a single file and return as a list of dictionaries.""" 46 | if file.suffix == ".json": 47 | result = orjson.loads(file.read_bytes()) 48 | if isinstance(result, list): 49 | return result 50 | else: 51 | return [result] 52 | elif file.suffix == ".csv": 53 | return [dict(row) for row in csv.DictReader(file.read_text().splitlines())] 54 | elif file.suffix == ".jsonl": 55 | return [ 56 | orjson.loads(line) for line in file.read_text().splitlines() if line.strip() 57 | ] 58 | else: 59 | raise ValueError(f"Unsupported file type: {file.suffix}") 60 | 61 | 62 | def load_from_paths(paths: list[Path], recursive: bool = False) -> list[dict[str, Any]]: 63 | """ 64 | Load data from all files in the specified paths. 65 | 66 | First collects all file paths, then reads each file's data. 67 | """ 68 | files = _collect_files(paths, recursive) 69 | 70 | if not files: 71 | LOG.warning("No supported files found in the specified paths") 72 | return [] 73 | 74 | LOG.info(f"Found {len(files)} file(s) to read") 75 | 76 | result = [] 77 | for file in files: 78 | try: 79 | data = _read_file(file) 80 | result.extend(data) 81 | LOG.info(f"Read {len(data)} record(s) from {file}") 82 | except Exception as e: 83 | LOG.error(f"Error reading file {file}: {e}") 84 | raise 85 | 86 | return result 87 | 88 | 89 | def parse_paths(paths: list[str]) -> list[Path]: 90 | """Parse paths.""" 91 | return [Path(path) for path in paths] 92 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_azure/test_chat.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "openllmetry-testing"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '113' 14 | content-type: 15 | - application/json 16 | host: 17 | - traceloop-stg.openai.azure.com 18 | user-agent: 19 | - AzureOpenAI/Python 1.14.3 20 | x-stainless-arch: 21 | - arm64 22 | x-stainless-async: 23 | - 'false' 24 | x-stainless-lang: 25 | - python 26 | x-stainless-os: 27 | - MacOS 28 | x-stainless-package-version: 29 | - 1.14.3 30 | x-stainless-runtime: 31 | - CPython 32 | x-stainless-runtime-version: 33 | - 3.12.1 34 | method: POST 35 | uri: https://traceloop-stg.openai.azure.com/openai/deployments/openllmetry-testing/chat/completions?api-version=2024-02-01 36 | response: 37 | body: 38 | string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"content":"Why 39 | did the Opentelemetry developer refuse to share their candy?\n\nBecause they 40 | wanted to keep all the traces for themselves!","role":"assistant"}}],"created":1714036973,"id":"chatcmpl-9HpbZPf84KZFiQG6fdY0KVtIwHyIa","model":"gpt-35-turbo","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":24,"prompt_tokens":15,"total_tokens":39}} 41 | 42 | ' 43 | headers: 44 | Cache-Control: 45 | - no-cache, must-revalidate 46 | Content-Length: 47 | - '921' 48 | Content-Type: 49 | - application/json 50 | Date: 51 | - Thu, 25 Apr 2024 09:22:53 GMT 52 | Strict-Transport-Security: 53 | - max-age=31536000; includeSubDomains; preload 54 | access-control-allow-origin: 55 | - '*' 56 | apim-request-id: 57 | - 8a3ae7c6-2eb5-45ea-947e-b4218c7ca1a5 58 | azureml-model-session: 59 | - d060-20240328183338 60 | cmp-upstream-response-duration: 61 | - '544' 62 | x-accel-buffering: 63 | - 'no' 64 | x-content-type-options: 65 | - nosniff 66 | x-ms-client-request-id: 67 | - 8a3ae7c6-2eb5-45ea-947e-b4218c7ca1a5 68 | x-ms-rai-invoked: 69 | - 'true' 70 | x-ms-region: 71 | - East US 2 72 | x-ratelimit-remaining-requests: 73 | - '119' 74 | x-ratelimit-remaining-tokens: 75 | - '119984' 76 | x-request-id: 77 | - 0f63a0ff-049a-4f46-88e1-5e5c2e4c87c0 78 | status: 79 | code: 200 80 | message: OK 81 | version: 1 82 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_azure/test_chat_content_filtering.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: 4 | '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 5 | "model": "openllmetry-testing"}' 6 | headers: 7 | accept: 8 | - application/json 9 | accept-encoding: 10 | - gzip, deflate 11 | connection: 12 | - keep-alive 13 | content-length: 14 | - "113" 15 | content-type: 16 | - application/json 17 | host: 18 | - traceloop-stg.openai.azure.com 19 | user-agent: 20 | - AzureOpenAI/Python 1.14.3 21 | x-stainless-arch: 22 | - arm64 23 | x-stainless-async: 24 | - "false" 25 | x-stainless-lang: 26 | - python 27 | x-stainless-os: 28 | - MacOS 29 | x-stainless-package-version: 30 | - 1.14.3 31 | x-stainless-runtime: 32 | - CPython 33 | x-stainless-runtime-version: 34 | - 3.12.1 35 | method: POST 36 | uri: https://traceloop-stg.openai.azure.com/openai/deployments/openllmetry-testing/chat/completions?api-version=2024-02-01 37 | response: 38 | body: 39 | string: 40 | '{"choices":[{"content_filter_results":{"hate":{"filtered":true,"severity":"high"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"content_filter","index":0,"logprobs":null}],"created":1714038380,"id":"chatcmpl-9HpyGSWv1hoKdGaUaiFhfxzTEVlZo","model":"gpt-35-turbo","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":19,"prompt_tokens":15,"total_tokens":34}} 41 | 42 | ' 43 | headers: 44 | Cache-Control: 45 | - no-cache, must-revalidate 46 | Content-Length: 47 | - "882" 48 | Content-Type: 49 | - application/json 50 | Date: 51 | - Thu, 25 Apr 2024 09:46:20 GMT 52 | Strict-Transport-Security: 53 | - max-age=31536000; includeSubDomains; preload 54 | access-control-allow-origin: 55 | - "*" 56 | apim-request-id: 57 | - 6624fbc0-526e-4c5f-b98c-fb96daf3c582 58 | azureml-model-session: 59 | - d061-20240328190738 60 | x-accel-buffering: 61 | - "no" 62 | x-content-type-options: 63 | - nosniff 64 | x-ms-client-request-id: 65 | - 6624fbc0-526e-4c5f-b98c-fb96daf3c582 66 | x-ms-rai-invoked: 67 | - "true" 68 | x-ms-region: 69 | - East US 2 70 | x-ratelimit-remaining-requests: 71 | - "119" 72 | x-ratelimit-remaining-tokens: 73 | - "119984" 74 | x-request-id: 75 | - cc6ffd40-d056-43b3-9813-19c43abd66d6 76 | status: 77 | code: 200 78 | message: OK 79 | version: 1 80 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_azure/test_chat_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "openllmetry-testing"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '113' 14 | content-type: 15 | - application/json 16 | host: 17 | - traceloop-stg.openai.azure.com 18 | user-agent: 19 | - AzureOpenAI/Python 1.14.3 20 | x-stainless-arch: 21 | - arm64 22 | x-stainless-async: 23 | - 'false' 24 | x-stainless-lang: 25 | - python 26 | x-stainless-os: 27 | - MacOS 28 | x-stainless-package-version: 29 | - 1.14.3 30 | x-stainless-runtime: 31 | - CPython 32 | x-stainless-runtime-version: 33 | - 3.12.1 34 | method: POST 35 | uri: https://traceloop-stg.openai.azure.com/openai/deployments/openllmetry-testing/chat/completions?api-version=2024-02-01 36 | response: 37 | body: 38 | string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"content":"Why 39 | did the Opentelemetry developer refuse to share their candy?\n\nBecause they 40 | wanted to keep all the traces for themselves!","role":"assistant"}}],"created":1714036973,"id":"chatcmpl-9HpbZPf84KZFiQG6fdY0KVtIwHyIa","model":"gpt-35-turbo","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":24,"prompt_tokens":15,"total_tokens":39}} 41 | 42 | ' 43 | headers: 44 | Cache-Control: 45 | - no-cache, must-revalidate 46 | Content-Length: 47 | - '921' 48 | Content-Type: 49 | - application/json 50 | Date: 51 | - Thu, 25 Apr 2024 09:22:53 GMT 52 | Strict-Transport-Security: 53 | - max-age=31536000; includeSubDomains; preload 54 | access-control-allow-origin: 55 | - '*' 56 | apim-request-id: 57 | - 8a3ae7c6-2eb5-45ea-947e-b4218c7ca1a5 58 | azureml-model-session: 59 | - d060-20240328183338 60 | cmp-upstream-response-duration: 61 | - '544' 62 | x-accel-buffering: 63 | - 'no' 64 | x-content-type-options: 65 | - nosniff 66 | x-ms-client-request-id: 67 | - 8a3ae7c6-2eb5-45ea-947e-b4218c7ca1a5 68 | x-ms-rai-invoked: 69 | - 'true' 70 | x-ms-region: 71 | - East US 2 72 | x-ratelimit-remaining-requests: 73 | - '119' 74 | x-ratelimit-remaining-tokens: 75 | - '119984' 76 | x-request-id: 77 | - 0f63a0ff-049a-4f46-88e1-5e5c2e4c87c0 78 | status: 79 | code: 200 80 | message: OK 81 | version: 1 82 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_azure/test_chat_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 4 | "model": "openllmetry-testing"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | connection: 11 | - keep-alive 12 | content-length: 13 | - '113' 14 | content-type: 15 | - application/json 16 | host: 17 | - traceloop-stg.openai.azure.com 18 | user-agent: 19 | - AzureOpenAI/Python 1.14.3 20 | x-stainless-arch: 21 | - arm64 22 | x-stainless-async: 23 | - 'false' 24 | x-stainless-lang: 25 | - python 26 | x-stainless-os: 27 | - MacOS 28 | x-stainless-package-version: 29 | - 1.14.3 30 | x-stainless-runtime: 31 | - CPython 32 | x-stainless-runtime-version: 33 | - 3.12.1 34 | method: POST 35 | uri: https://traceloop-stg.openai.azure.com/openai/deployments/openllmetry-testing/chat/completions?api-version=2024-02-01 36 | response: 37 | body: 38 | string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"content":"Why 39 | did the Opentelemetry developer refuse to share their candy?\n\nBecause they 40 | wanted to keep all the traces for themselves!","role":"assistant"}}],"created":1714036973,"id":"chatcmpl-9HpbZPf84KZFiQG6fdY0KVtIwHyIa","model":"gpt-35-turbo","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":24,"prompt_tokens":15,"total_tokens":39}} 41 | 42 | ' 43 | headers: 44 | Cache-Control: 45 | - no-cache, must-revalidate 46 | Content-Length: 47 | - '921' 48 | Content-Type: 49 | - application/json 50 | Date: 51 | - Thu, 25 Apr 2024 09:22:53 GMT 52 | Strict-Transport-Security: 53 | - max-age=31536000; includeSubDomains; preload 54 | access-control-allow-origin: 55 | - '*' 56 | apim-request-id: 57 | - 8a3ae7c6-2eb5-45ea-947e-b4218c7ca1a5 58 | azureml-model-session: 59 | - d060-20240328183338 60 | cmp-upstream-response-duration: 61 | - '544' 62 | x-accel-buffering: 63 | - 'no' 64 | x-content-type-options: 65 | - nosniff 66 | x-ms-client-request-id: 67 | - 8a3ae7c6-2eb5-45ea-947e-b4218c7ca1a5 68 | x-ms-rai-invoked: 69 | - 'true' 70 | x-ms-region: 71 | - East US 2 72 | x-ratelimit-remaining-requests: 73 | - '119' 74 | x-ratelimit-remaining-tokens: 75 | - '119984' 76 | x-request-id: 77 | - 0f63a0ff-049a-4f46-88e1-5e5c2e4c87c0 78 | status: 79 | code: 200 80 | message: OK 81 | version: 1 82 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_azure/test_chat_content_filtering_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: 4 | '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 5 | "model": "openllmetry-testing"}' 6 | headers: 7 | accept: 8 | - application/json 9 | accept-encoding: 10 | - gzip, deflate 11 | connection: 12 | - keep-alive 13 | content-length: 14 | - "113" 15 | content-type: 16 | - application/json 17 | host: 18 | - traceloop-stg.openai.azure.com 19 | user-agent: 20 | - AzureOpenAI/Python 1.14.3 21 | x-stainless-arch: 22 | - arm64 23 | x-stainless-async: 24 | - "false" 25 | x-stainless-lang: 26 | - python 27 | x-stainless-os: 28 | - MacOS 29 | x-stainless-package-version: 30 | - 1.14.3 31 | x-stainless-runtime: 32 | - CPython 33 | x-stainless-runtime-version: 34 | - 3.12.1 35 | method: POST 36 | uri: https://traceloop-stg.openai.azure.com/openai/deployments/openllmetry-testing/chat/completions?api-version=2024-02-01 37 | response: 38 | body: 39 | string: 40 | '{"choices":[{"content_filter_results":{"hate":{"filtered":true,"severity":"high"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"content_filter","index":0,"logprobs":null}],"created":1714038380,"id":"chatcmpl-9HpyGSWv1hoKdGaUaiFhfxzTEVlZo","model":"gpt-35-turbo","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":19,"prompt_tokens":15,"total_tokens":34}} 41 | 42 | ' 43 | headers: 44 | Cache-Control: 45 | - no-cache, must-revalidate 46 | Content-Length: 47 | - "882" 48 | Content-Type: 49 | - application/json 50 | Date: 51 | - Thu, 25 Apr 2024 09:46:20 GMT 52 | Strict-Transport-Security: 53 | - max-age=31536000; includeSubDomains; preload 54 | access-control-allow-origin: 55 | - "*" 56 | apim-request-id: 57 | - 6624fbc0-526e-4c5f-b98c-fb96daf3c582 58 | azureml-model-session: 59 | - d061-20240328190738 60 | x-accel-buffering: 61 | - "no" 62 | x-content-type-options: 63 | - nosniff 64 | x-ms-client-request-id: 65 | - 6624fbc0-526e-4c5f-b98c-fb96daf3c582 66 | x-ms-rai-invoked: 67 | - "true" 68 | x-ms-region: 69 | - East US 2 70 | x-ratelimit-remaining-requests: 71 | - "119" 72 | x-ratelimit-remaining-tokens: 73 | - "119984" 74 | x-request-id: 75 | - cc6ffd40-d056-43b3-9813-19c43abd66d6 76 | status: 77 | code: 200 78 | message: OK 79 | version: 1 80 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_azure/test_chat_content_filtering_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: 4 | '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}], 5 | "model": "openllmetry-testing"}' 6 | headers: 7 | accept: 8 | - application/json 9 | accept-encoding: 10 | - gzip, deflate 11 | connection: 12 | - keep-alive 13 | content-length: 14 | - "113" 15 | content-type: 16 | - application/json 17 | host: 18 | - traceloop-stg.openai.azure.com 19 | user-agent: 20 | - AzureOpenAI/Python 1.14.3 21 | x-stainless-arch: 22 | - arm64 23 | x-stainless-async: 24 | - "false" 25 | x-stainless-lang: 26 | - python 27 | x-stainless-os: 28 | - MacOS 29 | x-stainless-package-version: 30 | - 1.14.3 31 | x-stainless-runtime: 32 | - CPython 33 | x-stainless-runtime-version: 34 | - 3.12.1 35 | method: POST 36 | uri: https://traceloop-stg.openai.azure.com/openai/deployments/openllmetry-testing/chat/completions?api-version=2024-02-01 37 | response: 38 | body: 39 | string: 40 | '{"choices":[{"content_filter_results":{"hate":{"filtered":true,"severity":"high"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"content_filter","index":0,"logprobs":null}],"created":1714038380,"id":"chatcmpl-9HpyGSWv1hoKdGaUaiFhfxzTEVlZo","model":"gpt-35-turbo","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":19,"prompt_tokens":15,"total_tokens":34}} 41 | 42 | ' 43 | headers: 44 | Cache-Control: 45 | - no-cache, must-revalidate 46 | Content-Length: 47 | - "882" 48 | Content-Type: 49 | - application/json 50 | Date: 51 | - Thu, 25 Apr 2024 09:46:20 GMT 52 | Strict-Transport-Security: 53 | - max-age=31536000; includeSubDomains; preload 54 | access-control-allow-origin: 55 | - "*" 56 | apim-request-id: 57 | - 6624fbc0-526e-4c5f-b98c-fb96daf3c582 58 | azureml-model-session: 59 | - d061-20240328190738 60 | x-accel-buffering: 61 | - "no" 62 | x-content-type-options: 63 | - nosniff 64 | x-ms-client-request-id: 65 | - 6624fbc0-526e-4c5f-b98c-fb96daf3c582 66 | x-ms-rai-invoked: 67 | - "true" 68 | x-ms-region: 69 | - East US 2 70 | x-ratelimit-remaining-requests: 71 | - "119" 72 | x-ratelimit-remaining-tokens: 73 | - "119984" 74 | x-request-id: 75 | - cc6ffd40-d056-43b3-9813-19c43abd66d6 76 | status: 77 | code: 200 78 | message: OK 79 | version: 1 80 | -------------------------------------------------------------------------------- /src/lmnr/sdk/client/asynchronous/resources/evaluators.py: -------------------------------------------------------------------------------- 1 | """Evaluators resource for creating evaluator scores.""" 2 | 3 | import uuid 4 | from typing import Any 5 | 6 | from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource 7 | from lmnr.sdk.utils import format_id 8 | 9 | 10 | class AsyncEvaluators(BaseAsyncResource): 11 | """Resource for creating evaluator scores.""" 12 | 13 | async def score( 14 | self, 15 | *, 16 | name: str, 17 | trace_id: str | int | uuid.UUID | None = None, 18 | span_id: str | int | uuid.UUID | None = None, 19 | metadata: dict[str, Any] | None = None, 20 | score: float, 21 | ) -> None: 22 | """Create a score for a span. 23 | 24 | Args: 25 | name (str): Name of the score 26 | trace_id (str | int | uuid.UUID | None, optional): The trace ID to score (will be attached to root span) 27 | span_id (str | int | uuid.UUID | None, optional): The span ID to score 28 | metadata (dict[str, Any] | None, optional): Additional metadata. Defaults to None. 29 | score (float): The score value (float) 30 | 31 | Raises: 32 | ValueError: If there's an error creating the score. 33 | 34 | Example: 35 | Score by trace ID (will attach to root span): 36 | 37 | >>> await laminar_client.evaluators.score( 38 | ... name="quality", 39 | ... trace_id="trace-id-here", 40 | ... score=0.95, 41 | ... metadata={"model": "gpt-4"} 42 | ... ) 43 | 44 | Score by span ID: 45 | 46 | >>> await laminar_client.evaluators.score( 47 | ... name="relevance", 48 | ... span_id="span-id-here", 49 | ... score=0.87 50 | ... ) 51 | """ 52 | if trace_id is not None and span_id is not None: 53 | raise ValueError("Cannot provide both trace_id and span_id. Please provide only one.") 54 | if trace_id is None and span_id is None: 55 | raise ValueError("Either 'trace_id' or 'span_id' must be provided.") 56 | 57 | if trace_id is not None: 58 | formatted_trace_id = format_id(trace_id) 59 | payload = { 60 | "name": name, 61 | "traceId": formatted_trace_id, 62 | "metadata": metadata, 63 | "score": score, 64 | "source": "Code", 65 | } 66 | else: 67 | formatted_span_id = format_id(span_id) 68 | payload = { 69 | "name": name, 70 | "spanId": formatted_span_id, 71 | "metadata": metadata, 72 | "score": score, 73 | "source": "Code", 74 | } 75 | 76 | response = await self._client.post( 77 | self._base_url + "/v1/evaluators/score", 78 | json=payload, 79 | headers=self._headers(), 80 | ) 81 | 82 | if response.status_code != 200: 83 | if response.status_code == 401: 84 | raise ValueError("Unauthorized. Please check your project API key.") 85 | raise ValueError(f"Error creating evaluator score: {response.text}") -------------------------------------------------------------------------------- /tests/test_context.py: -------------------------------------------------------------------------------- 1 | from opentelemetry.trace.span import INVALID_SPAN_ID 2 | from lmnr import Laminar, observe 3 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter 4 | 5 | 6 | def test_clear_context_observe(span_exporter: InMemorySpanExporter): 7 | @observe() 8 | def inner(): 9 | Laminar.set_trace_user_id("test_user_id_2") 10 | return "inner" 11 | 12 | @observe() 13 | def outer(): 14 | Laminar.set_trace_user_id("test_user_id_1") 15 | Laminar.force_flush() 16 | return inner() 17 | 18 | outer() 19 | spans = span_exporter.get_finished_spans() 20 | assert len(spans) == 2 21 | outer_span = [s for s in spans if s.name == "outer"][0] 22 | inner_span = [s for s in spans if s.name == "inner"][0] 23 | assert ( 24 | outer_span.attributes["lmnr.association.properties.user_id"] == "test_user_id_1" 25 | ) 26 | assert ( 27 | inner_span.attributes["lmnr.association.properties.user_id"] == "test_user_id_2" 28 | ) 29 | 30 | assert inner_span.parent is None or inner_span.parent.span_id == INVALID_SPAN_ID 31 | assert ( 32 | inner_span.get_span_context().trace_id != outer_span.get_span_context().trace_id 33 | ) 34 | 35 | 36 | def test_clear_context_start_as_current_span(span_exporter: InMemorySpanExporter): 37 | with Laminar.start_as_current_span("outer"): 38 | Laminar.set_trace_user_id("test_user_id_1") 39 | Laminar.force_flush() 40 | with Laminar.start_as_current_span("inner"): 41 | Laminar.set_trace_user_id("test_user_id_2") 42 | pass 43 | 44 | spans = span_exporter.get_finished_spans() 45 | assert len(spans) == 2 46 | outer_span = [s for s in spans if s.name == "outer"][0] 47 | inner_span = [s for s in spans if s.name == "inner"][0] 48 | assert ( 49 | outer_span.attributes["lmnr.association.properties.user_id"] == "test_user_id_1" 50 | ) 51 | assert ( 52 | inner_span.attributes["lmnr.association.properties.user_id"] == "test_user_id_2" 53 | ) 54 | 55 | assert inner_span.parent is None or inner_span.parent.span_id == INVALID_SPAN_ID 56 | assert ( 57 | inner_span.get_span_context().trace_id != outer_span.get_span_context().trace_id 58 | ) 59 | 60 | 61 | def test_clear_context_start_active_span(span_exporter: InMemorySpanExporter): 62 | span = Laminar.start_active_span("outer") 63 | Laminar.set_trace_user_id("test_user_id_1") 64 | Laminar.force_flush() 65 | span2 = Laminar.start_active_span("inner") 66 | Laminar.set_trace_user_id("test_user_id_2") 67 | span2.end() 68 | span.end() 69 | 70 | spans = span_exporter.get_finished_spans() 71 | assert len(spans) == 2 72 | outer_span = [s for s in spans if s.name == "outer"][0] 73 | inner_span = [s for s in spans if s.name == "inner"][0] 74 | assert ( 75 | outer_span.attributes["lmnr.association.properties.user_id"] == "test_user_id_1" 76 | ) 77 | assert ( 78 | inner_span.attributes["lmnr.association.properties.user_id"] == "test_user_id_2" 79 | ) 80 | 81 | assert inner_span.parent is None or inner_span.parent.span_id == INVALID_SPAN_ID 82 | assert ( 83 | inner_span.get_span_context().trace_id != outer_span.get_span_context().trace_id 84 | ) 85 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_messages/test_async_anthropic_message_create_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "Tell me 4 | a joke about OpenTelemetry"}], "model": "claude-3-opus-20240229"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | anthropic-version: 11 | - '2023-06-01' 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '136' 16 | content-type: 17 | - application/json 18 | host: 19 | - api.anthropic.com 20 | user-agent: 21 | - AsyncAnthropic/Python 0.21.3 22 | x-stainless-arch: 23 | - other:amd64 24 | x-stainless-async: 25 | - async:asyncio 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Windows 30 | x-stainless-package-version: 31 | - 0.21.3 32 | x-stainless-runtime: 33 | - CPython 34 | x-stainless-runtime-version: 35 | - 3.9.13 36 | method: POST 37 | uri: https://api.anthropic.com/v1/messages 38 | response: 39 | body: 40 | string: !!binary | 41 | H4sIAAAAAAAAA3xTXW/UQAz8K2ZfClJatQUEzQuiKgge+BJFCHGo8mWdZNuNHdbeS0PV/4427VEV 42 | JJ4uWs+M7fHclQve1W7Q7mz/4Mvrk5Nzfc1Pjz9+4vb9NOg7fvnNVc7mkQqKVLEjV7kksTygalBD 43 | Nle5RtiIzdXfr7Z4o8tSWX5q9zknqqCnRDsKCOdyQYBryQYfRuJTijSQpble8Yq/9jP44MF6Ak8b 44 | ijJSAk9N8AQmkJXus14U1jE1WCrW0wwTspEvYEvYLI8hAY5jDA1aEN5RoA2lGQbZ0IPCf3U5RuSl 45 | WK/4nj4EBWSQkXhXJaeGQNZKaYPrEIPN0CYcaJJ0AdajwZhkEzyVRYtBHpOHCecyTkdMCY0qaCRG 46 | aqwCZA90OUoysD8dPRrCw/IdGq0gSqc3yGUffQStJPBBLYV1LqvqrEaD7sFbA4xRJr3zTkvnwGop 47 | D8T2rxu6SHcYuMBC15tC4OJeT7CmHjdB0oIZKbWSBuTiQXurtO1dbDzt6ea6Y8RZQXjRmCR5WLll 48 | +GrlYOpD00OPxSIveR0JBkIO3C0Z2YO3N7wlV5dWWt27SLUVWzlI1N6uWBjbk9ye/mKZGhnj/GuJ 49 | AbRRpqKX6GcmNQXrk+SuL5P84+cevJGpBKVauMtifR4kSdY4g+auu9VA+yuvJYL6/wSu3F0Giyuo 50 | ENqbLe5wS/iK7yoMayoWNVGUYuFxMEnkQRK0Uq5Ofs9d/6jcIJ6iq10TMXvafbwrY9bdw/3DJ/uH 51 | h0eucmoyniVCFXa1I/ZnlhNvC1rM4YZczTnGyuXlr19fucBjtjOTC2J19cGzykm2e0/Pj66vfwMA 52 | AP//AwDpcnXwWQQAAA== 53 | headers: 54 | CF-Cache-Status: 55 | - DYNAMIC 56 | CF-RAY: 57 | - 87084fd0e8292b96-LAX 58 | Connection: 59 | - keep-alive 60 | Content-Encoding: 61 | - gzip 62 | Content-Type: 63 | - application/json 64 | Date: 65 | - Sun, 07 Apr 2024 07:30:30 GMT 66 | Server: 67 | - cloudflare 68 | Transfer-Encoding: 69 | - chunked 70 | anthropic-ratelimit-requests-limit: 71 | - '5' 72 | anthropic-ratelimit-requests-remaining: 73 | - '3' 74 | anthropic-ratelimit-requests-reset: 75 | - '2024-04-07T07:31:00Z' 76 | anthropic-ratelimit-tokens-limit: 77 | - '10000' 78 | anthropic-ratelimit-tokens-remaining: 79 | - '8000' 80 | anthropic-ratelimit-tokens-reset: 81 | - '2024-04-07T07:31:00Z' 82 | request-id: 83 | - req_01HBJrHQNkCXVEy7ctxYUHLu 84 | via: 85 | - 1.1 google 86 | x-cloud-trace-context: 87 | - 7576a88f449dc1524e86b032fde2dc7f 88 | status: 89 | code: 200 90 | message: OK 91 | version: 1 92 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_messages/test_async_anthropic_message_create_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "Tell me 4 | a joke about OpenTelemetry"}], "model": "claude-3-opus-20240229"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | anthropic-version: 11 | - '2023-06-01' 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '136' 16 | content-type: 17 | - application/json 18 | host: 19 | - api.anthropic.com 20 | user-agent: 21 | - AsyncAnthropic/Python 0.21.3 22 | x-stainless-arch: 23 | - other:amd64 24 | x-stainless-async: 25 | - async:asyncio 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Windows 30 | x-stainless-package-version: 31 | - 0.21.3 32 | x-stainless-runtime: 33 | - CPython 34 | x-stainless-runtime-version: 35 | - 3.9.13 36 | method: POST 37 | uri: https://api.anthropic.com/v1/messages 38 | response: 39 | body: 40 | string: !!binary | 41 | H4sIAAAAAAAAA3xTXW/UQAz8K2ZfClJatQUEzQuiKgge+BJFCHGo8mWdZNuNHdbeS0PV/4427VEV 42 | JJ4uWs+M7fHclQve1W7Q7mz/4Mvrk5Nzfc1Pjz9+4vb9NOg7fvnNVc7mkQqKVLEjV7kksTygalBD 43 | Nle5RtiIzdXfr7Z4o8tSWX5q9zknqqCnRDsKCOdyQYBryQYfRuJTijSQpble8Yq/9jP44MF6Ak8b 44 | ijJSAk9N8AQmkJXus14U1jE1WCrW0wwTspEvYEvYLI8hAY5jDA1aEN5RoA2lGQbZ0IPCf3U5RuSl 45 | WK/4nj4EBWSQkXhXJaeGQNZKaYPrEIPN0CYcaJJ0AdajwZhkEzyVRYtBHpOHCecyTkdMCY0qaCRG 46 | aqwCZA90OUoysD8dPRrCw/IdGq0gSqc3yGUffQStJPBBLYV1LqvqrEaD7sFbA4xRJr3zTkvnwGop 47 | D8T2rxu6SHcYuMBC15tC4OJeT7CmHjdB0oIZKbWSBuTiQXurtO1dbDzt6ea6Y8RZQXjRmCR5WLll 48 | +GrlYOpD00OPxSIveR0JBkIO3C0Z2YO3N7wlV5dWWt27SLUVWzlI1N6uWBjbk9ye/mKZGhnj/GuJ 49 | AbRRpqKX6GcmNQXrk+SuL5P84+cevJGpBKVauMtifR4kSdY4g+auu9VA+yuvJYL6/wSu3F0Giyuo 50 | ENqbLe5wS/iK7yoMayoWNVGUYuFxMEnkQRK0Uq5Ofs9d/6jcIJ6iq10TMXvafbwrY9bdw/3DJ/uH 51 | h0eucmoyniVCFXa1I/ZnlhNvC1rM4YZczTnGyuXlr19fucBjtjOTC2J19cGzykm2e0/Pj66vfwMA 52 | AP//AwDpcnXwWQQAAA== 53 | headers: 54 | CF-Cache-Status: 55 | - DYNAMIC 56 | CF-RAY: 57 | - 87084fd0e8292b96-LAX 58 | Connection: 59 | - keep-alive 60 | Content-Encoding: 61 | - gzip 62 | Content-Type: 63 | - application/json 64 | Date: 65 | - Sun, 07 Apr 2024 07:30:30 GMT 66 | Server: 67 | - cloudflare 68 | Transfer-Encoding: 69 | - chunked 70 | anthropic-ratelimit-requests-limit: 71 | - '5' 72 | anthropic-ratelimit-requests-remaining: 73 | - '3' 74 | anthropic-ratelimit-requests-reset: 75 | - '2024-04-07T07:31:00Z' 76 | anthropic-ratelimit-tokens-limit: 77 | - '10000' 78 | anthropic-ratelimit-tokens-remaining: 79 | - '8000' 80 | anthropic-ratelimit-tokens-reset: 81 | - '2024-04-07T07:31:00Z' 82 | request-id: 83 | - req_01HBJrHQNkCXVEy7ctxYUHLu 84 | via: 85 | - 1.1 google 86 | x-cloud-trace-context: 87 | - 7576a88f449dc1524e86b032fde2dc7f 88 | status: 89 | code: 200 90 | message: OK 91 | version: 1 92 | -------------------------------------------------------------------------------- /src/lmnr/sdk/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from pathlib import Path 3 | 4 | import uuid 5 | 6 | from lmnr.sdk.client.synchronous.sync_client import LaminarClient 7 | from lmnr.sdk.datasets.file_utils import load_from_paths 8 | from lmnr.sdk.log import get_default_logger 9 | from lmnr.sdk.types import Datapoint 10 | 11 | DEFAULT_FETCH_SIZE = 25 12 | LOG = get_default_logger(__name__, verbose=False) 13 | 14 | 15 | class EvaluationDataset(ABC): 16 | @abstractmethod 17 | def __init__(self, *args, **kwargs): 18 | pass 19 | 20 | @abstractmethod 21 | def __len__(self) -> int: 22 | pass 23 | 24 | @abstractmethod 25 | def __getitem__(self, idx) -> Datapoint: 26 | pass 27 | 28 | def slice(self, start: int, end: int): 29 | return [self[i] for i in range(max(start, 0), min(end, len(self)))] 30 | 31 | 32 | class LaminarDataset(EvaluationDataset): 33 | client: LaminarClient 34 | id: uuid.UUID | None = None 35 | 36 | def __init__( 37 | self, 38 | name: str | None = None, 39 | id: uuid.UUID | None = None, 40 | fetch_size: int = DEFAULT_FETCH_SIZE, 41 | ): 42 | self.name = name 43 | self.id = id 44 | if name is None and id is None: 45 | raise ValueError("Either name or id must be provided") 46 | if name is not None and id is not None: 47 | raise ValueError("Only one of name or id must be provided") 48 | self._len = None 49 | self._fetched_items = [] 50 | self._offset = 0 51 | self._fetch_size = fetch_size 52 | self._logger = get_default_logger(self.__class__.__name__) 53 | 54 | def _fetch_batch(self): 55 | self._logger.debug( 56 | f"dataset name: {self.name}, id: {self.id}. Fetching batch from {self._offset} to " 57 | + f"{self._offset + self._fetch_size}" 58 | ) 59 | identifier = {"id": self.id} if self.id is not None else {"name": self.name} 60 | resp = self.client.datasets.pull( 61 | **identifier, 62 | offset=self._offset, 63 | limit=self._fetch_size, 64 | ) 65 | self._fetched_items += resp.items 66 | self._offset = len(self._fetched_items) 67 | if self._len is None: 68 | self._len = resp.total_count 69 | 70 | def __len__(self) -> int: 71 | if self._len is None: 72 | self._fetch_batch() 73 | return self._len 74 | 75 | def __getitem__(self, idx) -> Datapoint: 76 | if idx >= len(self._fetched_items): 77 | self._fetch_batch() 78 | return self._fetched_items[idx] 79 | 80 | def set_client(self, client: LaminarClient): 81 | self.client = client 82 | 83 | def push(self, paths: str | list[str], recursive: bool = False): 84 | paths = [paths] if isinstance(paths, str) else paths 85 | paths = [Path(path) for path in paths] 86 | data = load_from_paths(paths, recursive) 87 | if len(data) == 0: 88 | LOG.warning("No data to push. Skipping") 89 | return 90 | identifier = {"id": self.id} if self.id is not None else {"name": self.name} 91 | self.client.datasets.push(data, **identifier) 92 | LOG.info( 93 | f"Successfully pushed {len(data)} datapoints to dataset [{identifier}]" 94 | ) 95 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_messages/test_anthropic_message_create_with_events_with_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "Tell me 4 | a joke about OpenTelemetry"}], "model": "claude-3-opus-20240229"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | anthropic-version: 11 | - '2023-06-01' 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '136' 16 | content-type: 17 | - application/json 18 | host: 19 | - api.anthropic.com 20 | user-agent: 21 | - Anthropic/Python 0.21.3 22 | x-stainless-arch: 23 | - other:amd64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Windows 30 | x-stainless-package-version: 31 | - 0.21.3 32 | x-stainless-runtime: 33 | - CPython 34 | x-stainless-runtime-version: 35 | - 3.9.13 36 | method: POST 37 | uri: https://api.anthropic.com/v1/messages 38 | response: 39 | body: 40 | string: !!binary | 41 | H4sIAAAAAAAAA5SUwW7cRgyGX4WZi1tAazjboml1KVIghxyMuqiBFogKY1biSvSOhhOSo7Vg+N2L 42 | md24dpBLTwI4HPL/P3L06GhwrZt1vLt6e3vz93S44Z/v15/WP67lerpP9P6Da5ytCUsWqvoRXeOE 43 | Qwl4VVLz0Vzjeo6G0Vz76fFLvuFDOamf1v2ZBRuYUPBCwcM9HxD8jrPB7wnjLQac0WRtu9jFv6YV 44 | BhrAJoQBFwycUMAPnL7K/rWLv2Hvs2LJXeHoo+EAxmDi+xokATVMCj4OMKOhnKNJeBRUfVM6fnhI 45 | wUdvxLHt4qseQOUucMK4Uc7SI/BOURa/o0C2wl78jEeWA9jkrdRdaMBiUtGA9/D+5qM2EGgnXgi1 46 | qVKMOWhRSlFN8ozRGhgxonjDBnoOAXs75eJDYjGwZ0mDNw/fVY/aFFdC/blu4FG/hz0LDKQmtMuF 47 | iK5qOOsl3E54Yp+CXxU4VshHlkGhc7Vi52qhzlVaTefgOFE/gReEA67Qc+wxmQLF19O4LCQ30Lnb 48 | r+F3DgT3KNXwS12lIcWxObeguHBYUEGwZxkojlVe8jYVkB4EP2dUA69AVm4vWChOwnmcCvHqs+TO 49 | 1AuXMVGPennSdf2N8b+S9kzyP8efs49G5o0WhBm9ZsEyLD0JOjW8UEgoe5bZxx4rvh1OfiGWCuWZ 50 | uuZxRDU9rco39huH10yLqtFThB1aEb+Q0nnvSpccB5TyBisr3p/N+ZQC9XWdL/RZSr3xQmcD91kN 51 | Ah0QJj4CR4SZxsn+x+spSyDoAwTa46V7+qdxMw8YXOv64POAmx82nLJutlfbH6+2219c49Q43Ql6 52 | 5ehah3G4syzxy4GWEcceXRtzCI3L9afTPjqKKdud8QGjuvbtu8Zxtpeh7fbq6elfAAAA//8DALAq 53 | KXrTBAAA 54 | headers: 55 | CF-Cache-Status: 56 | - DYNAMIC 57 | CF-RAY: 58 | - 87084f30df5f31f7-LAX 59 | Connection: 60 | - keep-alive 61 | Content-Encoding: 62 | - gzip 63 | Content-Type: 64 | - application/json 65 | Date: 66 | - Sun, 07 Apr 2024 07:30:05 GMT 67 | Server: 68 | - cloudflare 69 | Transfer-Encoding: 70 | - chunked 71 | anthropic-ratelimit-requests-limit: 72 | - '5' 73 | anthropic-ratelimit-requests-remaining: 74 | - '5' 75 | anthropic-ratelimit-requests-reset: 76 | - '2024-04-07T07:31:00Z' 77 | anthropic-ratelimit-tokens-limit: 78 | - '10000' 79 | anthropic-ratelimit-tokens-remaining: 80 | - '10000' 81 | anthropic-ratelimit-tokens-reset: 82 | - '2024-04-07T07:31:00Z' 83 | request-id: 84 | - req_01A7u6aDNi2C6mvDawgML8jB 85 | via: 86 | - 1.1 google 87 | x-cloud-trace-context: 88 | - ef9f2bfaf4cbc96c6557e43e805aa4f1 89 | status: 90 | code: 200 91 | message: OK 92 | version: 1 93 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_anthropic/cassettes/test_messages/test_anthropic_message_create_with_events_with_no_content.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "Tell me 4 | a joke about OpenTelemetry"}], "model": "claude-3-opus-20240229"}' 5 | headers: 6 | accept: 7 | - application/json 8 | accept-encoding: 9 | - gzip, deflate 10 | anthropic-version: 11 | - '2023-06-01' 12 | connection: 13 | - keep-alive 14 | content-length: 15 | - '136' 16 | content-type: 17 | - application/json 18 | host: 19 | - api.anthropic.com 20 | user-agent: 21 | - Anthropic/Python 0.21.3 22 | x-stainless-arch: 23 | - other:amd64 24 | x-stainless-async: 25 | - 'false' 26 | x-stainless-lang: 27 | - python 28 | x-stainless-os: 29 | - Windows 30 | x-stainless-package-version: 31 | - 0.21.3 32 | x-stainless-runtime: 33 | - CPython 34 | x-stainless-runtime-version: 35 | - 3.9.13 36 | method: POST 37 | uri: https://api.anthropic.com/v1/messages 38 | response: 39 | body: 40 | string: !!binary | 41 | H4sIAAAAAAAAA5SUwW7cRgyGX4WZi1tAazjboml1KVIghxyMuqiBFogKY1biSvSOhhOSo7Vg+N2L 42 | md24dpBLTwI4HPL/P3L06GhwrZt1vLt6e3vz93S44Z/v15/WP67lerpP9P6Da5ytCUsWqvoRXeOE 43 | Qwl4VVLz0Vzjeo6G0Vz76fFLvuFDOamf1v2ZBRuYUPBCwcM9HxD8jrPB7wnjLQac0WRtu9jFv6YV 44 | BhrAJoQBFwycUMAPnL7K/rWLv2Hvs2LJXeHoo+EAxmDi+xokATVMCj4OMKOhnKNJeBRUfVM6fnhI 45 | wUdvxLHt4qseQOUucMK4Uc7SI/BOURa/o0C2wl78jEeWA9jkrdRdaMBiUtGA9/D+5qM2EGgnXgi1 46 | qVKMOWhRSlFN8ozRGhgxonjDBnoOAXs75eJDYjGwZ0mDNw/fVY/aFFdC/blu4FG/hz0LDKQmtMuF 47 | iK5qOOsl3E54Yp+CXxU4VshHlkGhc7Vi52qhzlVaTefgOFE/gReEA67Qc+wxmQLF19O4LCQ30Lnb 48 | r+F3DgT3KNXwS12lIcWxObeguHBYUEGwZxkojlVe8jYVkB4EP2dUA69AVm4vWChOwnmcCvHqs+TO 49 | 1AuXMVGPennSdf2N8b+S9kzyP8efs49G5o0WhBm9ZsEyLD0JOjW8UEgoe5bZxx4rvh1OfiGWCuWZ 50 | uuZxRDU9rco39huH10yLqtFThB1aEb+Q0nnvSpccB5TyBisr3p/N+ZQC9XWdL/RZSr3xQmcD91kN 51 | Ah0QJj4CR4SZxsn+x+spSyDoAwTa46V7+qdxMw8YXOv64POAmx82nLJutlfbH6+2219c49Q43Ql6 52 | 5ehah3G4syzxy4GWEcceXRtzCI3L9afTPjqKKdud8QGjuvbtu8Zxtpeh7fbq6elfAAAA//8DALAq 53 | KXrTBAAA 54 | headers: 55 | CF-Cache-Status: 56 | - DYNAMIC 57 | CF-RAY: 58 | - 87084f30df5f31f7-LAX 59 | Connection: 60 | - keep-alive 61 | Content-Encoding: 62 | - gzip 63 | Content-Type: 64 | - application/json 65 | Date: 66 | - Sun, 07 Apr 2024 07:30:05 GMT 67 | Server: 68 | - cloudflare 69 | Transfer-Encoding: 70 | - chunked 71 | anthropic-ratelimit-requests-limit: 72 | - '5' 73 | anthropic-ratelimit-requests-remaining: 74 | - '5' 75 | anthropic-ratelimit-requests-reset: 76 | - '2024-04-07T07:31:00Z' 77 | anthropic-ratelimit-tokens-limit: 78 | - '10000' 79 | anthropic-ratelimit-tokens-remaining: 80 | - '10000' 81 | anthropic-ratelimit-tokens-reset: 82 | - '2024-04-07T07:31:00Z' 83 | request-id: 84 | - req_01A7u6aDNi2C6mvDawgML8jB 85 | via: 86 | - 1.1 google 87 | x-cloud-trace-context: 88 | - ef9f2bfaf4cbc96c6557e43e805aa4f1 89 | status: 90 | code: 200 91 | message: OK 92 | version: 1 93 | -------------------------------------------------------------------------------- /tests/test_instrumentations/test_openai/traces/cassettes/test_completions/test_completion.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "davinci-002", "prompt": "Tell me a joke about opentelemetry"}' 4 | headers: 5 | accept: 6 | - application/json 7 | accept-encoding: 8 | - gzip, deflate 9 | connection: 10 | - keep-alive 11 | content-length: 12 | - '72' 13 | content-type: 14 | - application/json 15 | host: 16 | - api.openai.com 17 | user-agent: 18 | - OpenAI/Python 1.12.0 19 | x-stainless-arch: 20 | - arm64 21 | x-stainless-async: 22 | - 'false' 23 | x-stainless-lang: 24 | - python 25 | x-stainless-os: 26 | - MacOS 27 | x-stainless-package-version: 28 | - 1.12.0 29 | x-stainless-runtime: 30 | - CPython 31 | x-stainless-runtime-version: 32 | - 3.9.5 33 | method: POST 34 | uri: https://api.openai.com/v1/completions 35 | response: 36 | body: 37 | string: !!binary | 38 | H4sIAAAAAAAAA0SQMU/DMBSE9/yKJ89YcpJCQ1ZQxcbQBQoocuxHYnD8gu3Soqr/HTmJ2sXDnb+7 39 | 0ztlAMxoVgNTw2h5dfhZFY/5lpSMyub+Yehed8+bl7Xe7A7sJv2m9gtVTETEY2wUDaPFaMjNtvIo 40 | I6bEfC3uRVlWt2IyBtJoE6blr3HKcCGKBenJKAyshrcMAOA0vTAXJIJ3BNa0Xvq/d7fleggBJRSi 41 | EFxUPC9reKJ53YQZp/HIahAXxVI3empThdtbe9E/jTOhbzzKQC41WXRd7NnknzOAj2ngPsgOWb0M 42 | Y6OnYYxNpG90KbKa89j1FFcvv1vMSFHaq16sstRwzv4BAAD//wMAdLAgAoIBAAA= 43 | headers: 44 | CF-Cache-Status: 45 | - DYNAMIC 46 | CF-RAY: 47 | - 85c022da9a47bb14-MXP 48 | Cache-Control: 49 | - no-cache, must-revalidate 50 | Connection: 51 | - keep-alive 52 | Content-Encoding: 53 | - gzip 54 | Content-Type: 55 | - application/json 56 | Date: 57 | - Tue, 27 Feb 2024 11:37:30 GMT 58 | Server: 59 | - cloudflare 60 | Set-Cookie: 61 | - __cf_bm=8L78ss5z_LU7MemkD1pN_f_BDP7v_lYCqYqih9Zvicg-1709033850-1.0-AWIUdFiaopi5JT1nAItlYRWgcI6QDySDIp81VWqC+Jk/H93vqmteXVRKn7I1MiaEbV+LEP50bUYOxo25WKbMTaU=; 62 | path=/; expires=Tue, 27-Feb-24 12:07:30 GMT; domain=.api.openai.com; HttpOnly; 63 | Secure; SameSite=None 64 | - _cfuvid=2D2znqizYnNBE.eWDDC23lZ7TacISDzNSSkQWwb4cZQ-1709033850735-0.0-604800000; 65 | path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None 66 | Transfer-Encoding: 67 | - chunked 68 | access-control-allow-origin: 69 | - '*' 70 | alt-svc: 71 | - h3=":443"; ma=86400 72 | openai-model: 73 | - davinci-002 74 | openai-organization: 75 | - traceloop 76 | openai-processing-ms: 77 | - '358' 78 | openai-version: 79 | - '2020-10-01' 80 | strict-transport-security: 81 | - max-age=15724800; includeSubDomains 82 | x-ratelimit-limit-requests: 83 | - '3000' 84 | x-ratelimit-limit-tokens: 85 | - '250000' 86 | x-ratelimit-remaining-requests: 87 | - '2999' 88 | x-ratelimit-remaining-tokens: 89 | - '249975' 90 | x-ratelimit-reset-requests: 91 | - 20ms 92 | x-ratelimit-reset-tokens: 93 | - 5ms 94 | x-request-id: 95 | - req_a7de5715969ad94860f4bf724e99032e 96 | status: 97 | code: 200 98 | message: OK 99 | version: 1 100 | -------------------------------------------------------------------------------- /tests/cassettes/test_litellm_anthropic/test_litellm_anthropic_with_computer_tools.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"model": "claude-sonnet-4-20250514", "messages": [{"role": "user", "content": 4 | [{"type": "text", "text": "What is the capital of France?"}]}], "tools": [{"type": 5 | "computer_20250124", "name": "computer", "display_width_px": 1024, "display_height_px": 6 | 768, "display_number": 1}, {"type": "text_editor_20250124", "name": "str_replace_editor"}, 7 | {"type": "bash_20250124", "name": "bash"}], "max_tokens": 4096}' 8 | headers: 9 | accept: 10 | - application/json 11 | accept-encoding: 12 | - gzip, deflate, zstd 13 | anthropic-beta: 14 | - computer-use-2025-01-24,computer-use-2024-10-22 15 | anthropic-version: 16 | - '2023-06-01' 17 | connection: 18 | - keep-alive 19 | content-length: 20 | - '404' 21 | content-type: 22 | - application/json 23 | host: 24 | - api.anthropic.com 25 | user-agent: 26 | - litellm/1.76.2 27 | method: POST 28 | uri: https://api.anthropic.com/v1/messages 29 | response: 30 | body: 31 | string: !!binary | 32 | H4sIAAAAAAAAAwAAAP//dJJbjxMxDIX/iuWXfUmrttruZd4WdpG4CthKgBAamcTTCc0ks46zbKn6 33 | 39HMUlFAvNiSz3eOI8U79A4r7PK6ns3f3axf/vAXD7e+2T65/Pii+3A3//YJDeq254HinGnNaFBS 34 | GAaUs89KUdFglxwHrNAGKo4nOcXIOjmdLGaL5Ww5P0WDNkXlqFh93h0ilR8G89gqXLUMlnqvFCA1 35 | 8EwoWgaf4S2Jz9PHBi1l+MocQY9w63V75Ml+qJcX53B1DRTdEDLiqUSV7UmGQLLmrKPRQEiWlB34 36 | x9SYRNuJ5ahCAXoSPcqmkOJ6xG7ZR4b3/p5lCs/1JENDXSoZmiQQKLqOZJMh+A2P/I1vGg6wSt9Z 37 | DLxK5V4YXpfMpTPwJqnw5Jo6hqekLTuhYMa3X4kFx7ASn7q+5SnuvxjMmvpamHKKWCFHV2uRiL+E 38 | zHeFo2WsYgnBYBk/rtqhj33RWtOGY8ZqcTZbGLRkW66tMKlPsf4TmR10YXL/0w7eYQP3LXcsFOpl 39 | 9y//W523f6t7g6no8ej8zGBmufeWa/UsWOFwb47E4X7/EwAA//8DAD8Ou8i9AgAA 40 | headers: 41 | CF-RAY: 42 | - 97be96372b076559-LHR 43 | Connection: 44 | - keep-alive 45 | Content-Encoding: 46 | - gzip 47 | Content-Type: 48 | - application/json 49 | Date: 50 | - Mon, 08 Sep 2025 12:53:08 GMT 51 | Server: 52 | - cloudflare 53 | Transfer-Encoding: 54 | - chunked 55 | X-Robots-Tag: 56 | - none 57 | anthropic-organization-id: 58 | - 04aa8588-6567-40cb-9042-a54b20ebaf4f 59 | anthropic-ratelimit-input-tokens-limit: 60 | - '2000000' 61 | anthropic-ratelimit-input-tokens-remaining: 62 | - '1999000' 63 | anthropic-ratelimit-input-tokens-reset: 64 | - '2025-09-08T12:53:06Z' 65 | anthropic-ratelimit-output-tokens-limit: 66 | - '400000' 67 | anthropic-ratelimit-output-tokens-remaining: 68 | - '400000' 69 | anthropic-ratelimit-output-tokens-reset: 70 | - '2025-09-08T12:53:07Z' 71 | anthropic-ratelimit-tokens-limit: 72 | - '2400000' 73 | anthropic-ratelimit-tokens-remaining: 74 | - '2399000' 75 | anthropic-ratelimit-tokens-reset: 76 | - '2025-09-08T12:53:06Z' 77 | cf-cache-status: 78 | - DYNAMIC 79 | request-id: 80 | - req_011CSvxz9Jmb3r7LCFhiQXum 81 | strict-transport-security: 82 | - max-age=31536000; includeSubDomains; preload 83 | via: 84 | - 1.1 google 85 | x-envoy-upstream-service-time: 86 | - '2140' 87 | status: 88 | code: 200 89 | message: OK 90 | version: 1 91 | --------------------------------------------------------------------------------