├── .cz.toml
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.yml
│ └── feature_request.yml
├── PULL_REQUEST_TEMPLATE.md
├── dependabot.yml
└── workflows
│ ├── ci.yml
│ └── release.yml
├── .gitignore
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── GOVERNANCE.md
├── LICENSE
├── MAINTAINERS.md
├── README.md
├── SECURITY.md
├── img
├── logo-dark.png
└── logo-light.png
├── nx.json
├── package-lock.json
├── package.json
├── packages
├── .gitkeep
├── opentelemetry-instrumentation-alephalpha
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── alephalpha
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ ├── pytest.ini
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ └── test_completion
│ │ │ └── test_alephalpha_completion.yaml
│ │ ├── conftest.py
│ │ └── test_completion.py
├── opentelemetry-instrumentation-anthropic
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── anthropic
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── streaming.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_completion
│ │ │ └── test_anthropic_completion.yaml
│ │ ├── test_messages
│ │ │ ├── test_anthropic_async_multi_modal.yaml
│ │ │ ├── test_anthropic_message_create.yaml
│ │ │ ├── test_anthropic_message_streaming.yaml
│ │ │ ├── test_anthropic_multi_modal.yaml
│ │ │ ├── test_anthropic_tools.yaml
│ │ │ ├── test_async_anthropic_message_create.yaml
│ │ │ ├── test_async_anthropic_message_streaming.yaml
│ │ │ └── test_with_asyncio_run.yaml
│ │ ├── test_prompt_caching
│ │ │ ├── test_anthropic_prompt_caching.yaml
│ │ │ ├── test_anthropic_prompt_caching_async.yaml
│ │ │ ├── test_anthropic_prompt_caching_async_stream.yaml
│ │ │ └── test_anthropic_prompt_caching_stream.yaml
│ │ └── test_thinking
│ │ │ ├── test_anthropic_thinking.yaml
│ │ │ ├── test_anthropic_thinking_streaming.yaml
│ │ │ ├── test_async_anthropic_thinking.yaml
│ │ │ └── test_async_anthropic_thinking_streaming.yaml
│ │ ├── conftest.py
│ │ ├── data
│ │ ├── 1024+tokens.txt
│ │ └── logo.jpg
│ │ ├── test_completion.py
│ │ ├── test_messages.py
│ │ ├── test_prompt_caching.py
│ │ ├── test_thinking.py
│ │ └── utils.py
├── opentelemetry-instrumentation-bedrock
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── bedrock
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── guardrail.py
│ │ │ ├── prompt_caching.py
│ │ │ ├── reusable_streaming_body.py
│ │ │ ├── streaming_wrapper.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── metrics
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ │ ├── test_bedrock_guardrails_metrics
│ │ │ │ ├── test_titan_converse_guardrail.yaml
│ │ │ │ ├── test_titan_converse_stream_guardrail.yaml
│ │ │ │ ├── test_titan_invoke_model_guardrail.yaml
│ │ │ │ └── test_titan_invoke_stream_guardrail.yaml
│ │ │ ├── test_bedrock_metrics
│ │ │ │ └── test_invoke_model_metrics.yaml
│ │ │ └── test_bedrock_prompt_caching_metrics
│ │ │ │ └── test_prompt_cache.yaml
│ │ ├── test_bedrock_guardrails_metrics.py
│ │ ├── test_bedrock_metrics.py
│ │ └── test_bedrock_prompt_caching_metrics.py
│ │ └── traces
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_ai21
│ │ │ └── test_ai21_j2_completion_string_content.yaml
│ │ ├── test_anthropic
│ │ │ ├── test_anthropic_2_completion.yaml
│ │ │ ├── test_anthropic_3_completion_complex_content.yaml
│ │ │ ├── test_anthropic_3_completion_streaming.yaml
│ │ │ ├── test_anthropic_3_completion_string_content.yaml
│ │ │ ├── test_anthropic_cross_region.yaml
│ │ │ └── test_prompt_cache.yaml
│ │ ├── test_cohere
│ │ │ └── test_cohere_completion.yaml
│ │ ├── test_imported_model
│ │ │ └── test_imported_model_completion.yaml
│ │ ├── test_meta
│ │ │ ├── test_meta_converse.yaml
│ │ │ ├── test_meta_converse_stream.yaml
│ │ │ ├── test_meta_llama2_completion_string_content.yaml
│ │ │ └── test_meta_llama3_completion.yaml
│ │ ├── test_nova
│ │ │ ├── test_nova_completion.yaml
│ │ │ ├── test_nova_converse.yaml
│ │ │ ├── test_nova_converse_stream.yaml
│ │ │ ├── test_nova_cross_region_invoke.yaml
│ │ │ └── test_nova_invoke_stream.yaml
│ │ └── test_titan
│ │ │ ├── test_titan_completion.yaml
│ │ │ ├── test_titan_converse.yaml
│ │ │ ├── test_titan_converse_stream.yaml
│ │ │ └── test_titan_invoke_stream.yaml
│ │ ├── test_ai21.py
│ │ ├── test_anthropic.py
│ │ ├── test_cohere.py
│ │ ├── test_imported_model.py
│ │ ├── test_meta.py
│ │ ├── test_nova.py
│ │ └── test_titan.py
├── opentelemetry-instrumentation-chromadb
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── chromadb
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ ├── version.py
│ │ │ └── wrapper.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ └── test_query.py
├── opentelemetry-instrumentation-cohere
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── cohere
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_chat
│ │ │ └── test_cohere_chat.yaml
│ │ ├── test_completion
│ │ │ └── test_cohere_completion.yaml
│ │ └── test_rerank
│ │ │ └── test_cohere_rerank.yaml
│ │ ├── conftest.py
│ │ ├── test_chat.py
│ │ ├── test_completion.py
│ │ └── test_rerank.py
├── opentelemetry-instrumentation-crewai
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── crewai
│ │ │ ├── __init__.py
│ │ │ ├── crewai_span_attributes.py
│ │ │ ├── instrumentation.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ └── test_crewai_instrumentation.py
├── opentelemetry-instrumentation-google-generativeai
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── google_generativeai
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ └── test_generate_content.py
├── opentelemetry-instrumentation-groq
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── groq
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── data
│ │ └── logo.jpg
│ │ └── traces
│ │ ├── cassettes
│ │ └── test_chat_tracing
│ │ │ ├── test_async_chat.yaml
│ │ │ ├── test_chat.yaml
│ │ │ └── test_chat_streaming.yaml
│ │ ├── conftest.py
│ │ └── test_chat_tracing.py
├── opentelemetry-instrumentation-haystack
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── haystack
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ ├── version.py
│ │ │ ├── wrap_node.py
│ │ │ ├── wrap_openai.py
│ │ │ └── wrap_pipeline.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ └── test_simple_pipeline
│ │ │ └── test_haystack.yaml
│ │ ├── conftest.py
│ │ ├── test_placeholder.py
│ │ └── test_simple_pipeline.py
├── opentelemetry-instrumentation-lancedb
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── lancedb
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ ├── version.py
│ │ │ └── wrapper.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ └── test_query.py
├── opentelemetry-instrumentation-langchain
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── langchain
│ │ │ ├── __init__.py
│ │ │ ├── callback_handler.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_agents
│ │ │ └── test_agents.yaml
│ │ ├── test_chains
│ │ │ ├── test_asequential_chain.yaml
│ │ │ ├── test_astream.yaml
│ │ │ ├── test_sequential_chain.yaml
│ │ │ └── test_stream.yaml
│ │ ├── test_documents_chains
│ │ │ └── test_sequential_chain.yaml
│ │ ├── test_lcel
│ │ │ ├── test_async_invoke.yaml
│ │ │ ├── test_async_lcel.yaml
│ │ │ ├── test_invoke.yaml
│ │ │ ├── test_lcel_with_datetime.yaml
│ │ │ ├── test_simple_lcel.yaml
│ │ │ └── test_stream.yaml
│ │ ├── test_llms
│ │ │ ├── test_anthropic.yaml
│ │ │ ├── test_bedrock.yaml
│ │ │ ├── test_custom_llm.yaml
│ │ │ ├── test_openai.yaml
│ │ │ ├── test_openai_functions.yaml
│ │ │ ├── test_trace_propagation[ChatOpenAI].yaml
│ │ │ ├── test_trace_propagation[OpenAI].yaml
│ │ │ ├── test_trace_propagation[VLLMOpenAI].yaml
│ │ │ ├── test_trace_propagation_async[ChatOpenAI].yaml
│ │ │ ├── test_trace_propagation_async[OpenAI].yaml
│ │ │ ├── test_trace_propagation_async[VLLMOpenAI].yaml
│ │ │ ├── test_trace_propagation_stream[ChatOpenAI].yaml
│ │ │ ├── test_trace_propagation_stream[OpenAI].yaml
│ │ │ ├── test_trace_propagation_stream[VLLMOpenAI].yaml
│ │ │ ├── test_trace_propagation_stream_async[ChatOpenAI].yaml
│ │ │ ├── test_trace_propagation_stream_async[OpenAI].yaml
│ │ │ └── test_trace_propagation_stream_async[VLLMOpenAI].yaml
│ │ ├── test_structured_output
│ │ │ └── test_structured_output.yaml
│ │ └── test_tool_calls
│ │ │ ├── test_parallel_tool_calls.yaml
│ │ │ ├── test_tool_calls.yaml
│ │ │ ├── test_tool_calls_anthropic_text_block.yaml
│ │ │ ├── test_tool_calls_anthropic_text_block_and_history.yaml
│ │ │ └── test_tool_calls_with_history.yaml
│ │ ├── conftest.py
│ │ ├── metrics
│ │ ├── cassettes
│ │ │ └── test_langchain_metrics
│ │ │ │ ├── test_llm_chain_metrics.yaml
│ │ │ │ └── test_llm_chain_streaming_metrics.yaml
│ │ └── test_langchain_metrics.py
│ │ ├── test_agents.py
│ │ ├── test_chains.py
│ │ ├── test_documents_chains.py
│ │ ├── test_lcel.py
│ │ ├── test_llms.py
│ │ ├── test_structured_output.py
│ │ └── test_tool_calls.py
├── opentelemetry-instrumentation-llamaindex
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── data
│ │ └── paul_graham
│ │ │ └── paul_graham_essay.txt
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── llamaindex
│ │ │ ├── __init__.py
│ │ │ ├── base_agent_instrumentor.py
│ │ │ ├── base_embedding_instrumentor.py
│ │ │ ├── base_retriever_instrumentor.py
│ │ │ ├── base_synthesizer_instrumentor.py
│ │ │ ├── base_tool_instrumentor.py
│ │ │ ├── config.py
│ │ │ ├── custom_llm_instrumentor.py
│ │ │ ├── dispatcher_wrapper.py
│ │ │ ├── query_pipeline_instrumentor.py
│ │ │ ├── retriever_query_engine_instrumentor.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_agents
│ │ │ ├── test_agent_with_multiple_tools.yaml
│ │ │ ├── test_agent_with_query_tool.yaml
│ │ │ └── test_agents_and_tools.yaml
│ │ ├── test_chroma_vector_store
│ │ │ └── test_rag_with_chroma.yaml
│ │ └── test_query_pipeline
│ │ │ └── test_query_pipeline.yaml
│ │ ├── conftest.py
│ │ ├── test_agents.py
│ │ ├── test_chroma_vector_store.py
│ │ ├── test_instrumentation.py
│ │ └── test_query_pipeline.py
├── opentelemetry-instrumentation-marqo
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── marqo
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ ├── version.py
│ │ │ └── wrapper.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ └── test_query
│ │ │ ├── test_marqo_add_documents.yaml
│ │ │ ├── test_marqo_delete_documents.yaml
│ │ │ └── test_marqo_search.yaml
│ │ ├── conftest.py
│ │ └── test_query.py
├── opentelemetry-instrumentation-mcp
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── mcp
│ │ │ ├── __init__.py
│ │ │ ├── instrumentation.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ └── test_mcp_instrumentation.py
├── opentelemetry-instrumentation-milvus
│ ├── .flake8
│ ├── .gitignore
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── milvus
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ ├── version.py
│ │ │ └── wrapper.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_hybrid_search.py
│ │ ├── test_query.py
│ │ └── test_search.py
├── opentelemetry-instrumentation-mistralai
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── mistralai
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_chat
│ │ │ ├── test_mistralai_async_chat.yaml
│ │ │ ├── test_mistralai_async_streaming_chat.yaml
│ │ │ ├── test_mistralai_chat.yaml
│ │ │ └── test_mistralai_streaming_chat.yaml
│ │ └── test_embeddings
│ │ │ ├── test_mistral_async_embeddings.yaml
│ │ │ └── test_mistral_embeddings.yaml
│ │ ├── conftest.py
│ │ ├── test_chat.py
│ │ └── test_embeddings.py
├── opentelemetry-instrumentation-ollama
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── ollama
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_chat
│ │ │ ├── test_ollama_async_chat.yaml
│ │ │ ├── test_ollama_async_streaming_chat.yaml
│ │ │ ├── test_ollama_chat.yaml
│ │ │ ├── test_ollama_chat_tool_calls.yaml
│ │ │ └── test_ollama_streaming_chat.yaml
│ │ ├── test_embeddings
│ │ │ └── test_ollama_embeddings.yaml
│ │ ├── test_generation
│ │ │ ├── test_ollama_async_generation.yaml
│ │ │ ├── test_ollama_async_streaming_generation.yaml
│ │ │ ├── test_ollama_generation.yaml
│ │ │ └── test_ollama_streaming_generation.yaml
│ │ └── test_ollama_metrics
│ │ │ └── test_ollama_streaming_metrics.yaml
│ │ ├── conftest.py
│ │ ├── test_chat.py
│ │ ├── test_embeddings.py
│ │ ├── test_generation.py
│ │ └── test_ollama_metrics.py
├── opentelemetry-instrumentation-openai
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── openai
│ │ │ ├── __init__.py
│ │ │ ├── shared
│ │ │ ├── __init__.py
│ │ │ ├── chat_wrappers.py
│ │ │ ├── completion_wrappers.py
│ │ │ ├── config.py
│ │ │ ├── embeddings_wrappers.py
│ │ │ └── image_gen_wrappers.py
│ │ │ ├── utils.py
│ │ │ ├── v0
│ │ │ └── __init__.py
│ │ │ ├── v1
│ │ │ ├── __init__.py
│ │ │ ├── assistant_wrappers.py
│ │ │ └── event_handler_wrapper.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ ├── pytest.ini
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── data
│ │ └── 1024+tokens.txt
│ │ ├── metrics
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ │ └── test_openai_metrics
│ │ │ │ ├── test_chat_completion_metrics.yaml
│ │ │ │ ├── test_chat_completion_metrics_stream.yaml
│ │ │ │ ├── test_chat_parsed_completion_metrics.yaml
│ │ │ │ ├── test_chat_streaming_metrics.yaml
│ │ │ │ ├── test_embeddings_metrics.yaml
│ │ │ │ └── test_image_gen_metrics.yaml
│ │ ├── conftest.py
│ │ └── test_openai_metrics.py
│ │ └── traces
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_assistant
│ │ │ ├── test_existing_assistant.yaml
│ │ │ ├── test_new_assistant.yaml
│ │ │ ├── test_new_assistant_with_polling.yaml
│ │ │ ├── test_streaming_existing_assistant.yaml
│ │ │ └── test_streaming_new_assistant.yaml
│ │ ├── test_azure
│ │ │ ├── test_chat.yaml
│ │ │ ├── test_chat_async_streaming.yaml
│ │ │ ├── test_chat_content_filtering.yaml
│ │ │ ├── test_chat_streaming.yaml
│ │ │ └── test_prompt_content_filtering.yaml
│ │ ├── test_chat
│ │ │ ├── test_chat.yaml
│ │ │ ├── test_chat_async_context_propagation.yaml
│ │ │ ├── test_chat_async_streaming.yaml
│ │ │ ├── test_chat_context_propagation.yaml
│ │ │ ├── test_chat_pydantic_based_tool_calls.yaml
│ │ │ ├── test_chat_streaming.yaml
│ │ │ ├── test_chat_tool_calls.yaml
│ │ │ ├── test_chat_tools.yaml
│ │ │ ├── test_chat_tools_async_streaming.yaml
│ │ │ ├── test_chat_tools_streaming.yaml
│ │ │ └── test_with_asyncio_run.yaml
│ │ ├── test_completions
│ │ │ ├── test_async_completion.yaml
│ │ │ ├── test_async_completion_context_propagation.yaml
│ │ │ ├── test_async_completion_streaming.yaml
│ │ │ ├── test_completion.yaml
│ │ │ ├── test_completion_context_propagation.yaml
│ │ │ ├── test_completion_langchain_style.yaml
│ │ │ └── test_completion_streaming.yaml
│ │ ├── test_embeddings
│ │ │ ├── test_async_embeddings_context_propagation.yaml
│ │ │ ├── test_azure_openai_embeddings.yaml
│ │ │ ├── test_embeddings.yaml
│ │ │ ├── test_embeddings_context_propagation.yaml
│ │ │ └── test_embeddings_with_raw_response.yaml
│ │ ├── test_exceptions
│ │ │ └── test_exception_in_instrumentation_suppressed.yaml
│ │ ├── test_functions
│ │ │ ├── test_open_ai_function_calls.yaml
│ │ │ ├── test_open_ai_function_calls_tools.yaml
│ │ │ ├── test_open_ai_function_calls_tools_parallel.yaml
│ │ │ ├── test_open_ai_function_calls_tools_streaming.yaml
│ │ │ └── test_open_ai_function_calls_tools_streaming_parallel.yaml
│ │ ├── test_prompt_caching
│ │ │ ├── test_openai_prompt_caching.yaml
│ │ │ └── test_openai_prompt_caching_async.yaml
│ │ ├── test_structured
│ │ │ ├── test_async_parsed_completion.yaml
│ │ │ ├── test_async_parsed_refused_completion.yaml
│ │ │ ├── test_parsed_completion.yaml
│ │ │ └── test_parsed_refused_completion.yaml
│ │ └── test_vision
│ │ │ ├── test_vision.yaml
│ │ │ └── test_vision_base64.yaml
│ │ ├── conftest.py
│ │ ├── test_assistant.py
│ │ ├── test_azure.py
│ │ ├── test_chat.py
│ │ ├── test_completions.py
│ │ ├── test_embedding_metrics_handler.py
│ │ ├── test_embeddings.py
│ │ ├── test_exceptions.py
│ │ ├── test_functions.py
│ │ ├── test_prompt_caching.py
│ │ ├── test_structured.py
│ │ ├── test_vision.py
│ │ └── utils.py
├── opentelemetry-instrumentation-pinecone
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── pinecone
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── query_handlers.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ └── test_query
│ │ │ └── test_pinecone_retrieval.yaml
│ │ ├── conftest.py
│ │ └── test_query.py
├── opentelemetry-instrumentation-qdrant
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── qdrant
│ │ │ ├── __init__.py
│ │ │ ├── async_qdrant_client_methods.json
│ │ │ ├── config.py
│ │ │ ├── qdrant_client_methods.json
│ │ │ ├── utils.py
│ │ │ ├── version.py
│ │ │ └── wrapper.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ └── test_qdrant_instrumentation.py
├── opentelemetry-instrumentation-replicate
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── replicate
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_image_generation
│ │ │ ├── test_replicate_image_generation.yaml
│ │ │ └── test_replicate_image_generation_predictions.yaml
│ │ └── test_llama
│ │ │ └── test_replicate_llama_stream.yaml
│ │ ├── conftest.py
│ │ ├── test_image_generation.py
│ │ └── test_llama.py
├── opentelemetry-instrumentation-sagemaker
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── sagemaker
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── reusable_streaming_body.py
│ │ │ ├── streaming_wrapper.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ └── test_invocation
│ │ │ └── test_sagemaker_completion_string_content.yaml
│ │ ├── conftest.py
│ │ └── test_invocation.py
├── opentelemetry-instrumentation-together
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── together
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ ├── pytest.ini
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_chat
│ │ │ └── test_together_chat.yaml
│ │ └── test_completion
│ │ │ └── test_together_completion.yaml
│ │ ├── conftest.py
│ │ ├── test_chat.py
│ │ └── test_completion.py
├── opentelemetry-instrumentation-transformers
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── transformers
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── text_generation_pipeline_wrapper.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ └── test_placeholder.py
├── opentelemetry-instrumentation-vertexai
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── vertexai
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── disabled_test_bison.py
│ │ ├── disabled_test_gemini.py
│ │ └── test_placeholder.py
├── opentelemetry-instrumentation-watsonx
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── watsonx
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── metrics
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ │ └── test_watsonx_metrics
│ │ │ │ ├── test_generate_metrics.yaml
│ │ │ │ └── test_generate_stream_metrics.yaml
│ │ ├── conftest.py
│ │ └── test_watsonx_metrics.py
│ │ └── traces
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ └── test_generate
│ │ │ ├── test_generate.yaml
│ │ │ └── test_generate_text_stream.yaml
│ │ ├── conftest.py
│ │ └── test_generate.py
├── opentelemetry-instrumentation-weaviate
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── instrumentation
│ │ │ └── weaviate
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── utils.py
│ │ │ ├── version.py
│ │ │ └── wrapper.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── cassettes
│ │ ├── test_weaviate_instrumentation
│ │ │ ├── test_weaviate_create_batch.yaml
│ │ │ ├── test_weaviate_create_collection.yaml
│ │ │ ├── test_weaviate_create_collection_from_dict.yaml
│ │ │ ├── test_weaviate_delete_all.yaml
│ │ │ ├── test_weaviate_delete_collection.yaml
│ │ │ ├── test_weaviate_get_collection.yaml
│ │ │ ├── test_weaviate_insert_data.yaml
│ │ │ ├── test_weaviate_query_aggregate.yaml
│ │ │ └── test_weaviate_query_raw.yaml
│ │ └── test_weaviate_instrumentation_v3
│ │ │ ├── test_weaviate_create_batch.yaml
│ │ │ ├── test_weaviate_create_data_object.yaml
│ │ │ ├── test_weaviate_create_schema.yaml
│ │ │ ├── test_weaviate_create_schemas.yaml
│ │ │ ├── test_weaviate_delete_all.yaml
│ │ │ ├── test_weaviate_delete_schema.yaml
│ │ │ ├── test_weaviate_get_schema.yaml
│ │ │ ├── test_weaviate_query_aggregate.yaml
│ │ │ ├── test_weaviate_query_get.yaml
│ │ │ └── test_weaviate_query_raw.yaml
│ │ ├── conftest.py
│ │ ├── test_weaviate_instrumentation.py
│ │ └── test_weaviate_instrumentation_v3.py
├── opentelemetry-semantic-conventions-ai
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── opentelemetry
│ │ └── semconv_ai
│ │ │ ├── __init__.py
│ │ │ ├── utils.py
│ │ │ └── version.py
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ └── test_placeholder.py
├── sample-app
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── data
│ │ ├── paul_graham
│ │ │ └── paul_graham_essay.txt
│ │ ├── scifact
│ │ │ ├── scifact_claims.jsonl
│ │ │ └── scifact_corpus.jsonl
│ │ ├── sherlock
│ │ │ └── firstchapter.txt
│ │ └── vision
│ │ │ └── elephant.jpeg
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ ├── sample_app
│ │ ├── __init__.py
│ │ ├── anthropic_joke_example.py
│ │ ├── anthropic_joke_streaming_example.py
│ │ ├── anthropic_vision_base64_example.py
│ │ ├── async_anthropic_example.py
│ │ ├── async_anthropic_joke_streaming.py
│ │ ├── async_methods_decorated_app.py
│ │ ├── azure_openai.py
│ │ ├── bedrock_example_app.py
│ │ ├── chroma_app.py
│ │ ├── chroma_sentence_transformer_app.py
│ │ ├── classes_decorated_app.py
│ │ ├── cohere_example.py
│ │ ├── crewai_example.py
│ │ ├── gemini.py
│ │ ├── groq_example.py
│ │ ├── haystack_app.py
│ │ ├── langchain_agent.py
│ │ ├── langchain_app.py
│ │ ├── langchain_lcel.py
│ │ ├── langchain_watsonx.py
│ │ ├── langgraph_example.py
│ │ ├── litellm_example.py
│ │ ├── llama_index_chroma_app.py
│ │ ├── llama_index_chroma_huggingface_app.py
│ │ ├── llama_index_workflow_app.py
│ │ ├── mcp_sonnet_example.py
│ │ ├── methods_decorated_app.py
│ │ ├── ollama_streaming.py
│ │ ├── openai_assistant.py
│ │ ├── openai_functions.py
│ │ ├── openai_streaming.py
│ │ ├── openai_streaming_assistant.py
│ │ ├── openai_structured_outputs.py
│ │ ├── openai_vision_base64_example.py
│ │ ├── pinecone_app.py
│ │ ├── pinecone_app_sentence_transformers.py
│ │ ├── prompt_registry_example_app.py
│ │ ├── prompt_registry_vision.py
│ │ ├── redis_rag_app.py
│ │ ├── replicate_functions.py
│ │ ├── replicate_streaming.py
│ │ ├── thread_pool_example.py
│ │ ├── vertexai_streaming.py
│ │ ├── watsonx-langchain.py
│ │ ├── watsonx_flow.py
│ │ ├── watsonx_generate.py
│ │ ├── weaviate_v3.py
│ │ └── weaviate_v4.py
│ └── tests
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ └── test_placeholder.py
└── traceloop-sdk
│ ├── .flake8
│ ├── .python-version
│ ├── README.md
│ ├── poetry.lock
│ ├── poetry.toml
│ ├── project.json
│ ├── pyproject.toml
│ ├── tests
│ ├── __init__.py
│ ├── __pycache__
│ │ └── conftest.cpython-311-pytest-7.4.0.pyc
│ ├── cassettes
│ │ ├── test_association_properties
│ │ │ ├── test_langchain_and_external_association_properties.yaml
│ │ │ └── test_langchain_association_properties.yaml
│ │ ├── test_manual
│ │ │ ├── test_manual_report.yaml
│ │ │ └── test_resource_attributes.yaml
│ │ ├── test_privacy_no_prompts
│ │ │ └── test_simple_workflow.yaml
│ │ ├── test_prompt_management
│ │ │ └── test_prompt_management.yaml
│ │ ├── test_sdk_initialization
│ │ │ ├── test_resource_attributes.yaml
│ │ │ └── test_span_postprocess_callback.yaml
│ │ ├── test_tasks
│ │ │ └── test_task_io_serialization_with_langchain.yaml
│ │ └── test_workflows
│ │ │ ├── test_simple_aworkflow.yaml
│ │ │ ├── test_simple_workflow.yaml
│ │ │ └── test_streaming_workflow.yaml
│ ├── conftest.py
│ ├── test_association_properties.py
│ ├── test_class_tasks.py
│ ├── test_client.py
│ ├── test_manual.py
│ ├── test_nested_tasks.py
│ ├── test_privacy_no_prompts.py
│ ├── test_prompt_management.py
│ ├── test_sdk_initialization.py
│ ├── test_tasks.py
│ ├── test_user_feedback.py
│ └── test_workflows.py
│ └── traceloop
│ └── sdk
│ ├── __init__.py
│ ├── annotation
│ ├── __init__.py
│ ├── base_annotation.py
│ └── user_feedback.py
│ ├── client
│ ├── __init__.py
│ ├── client.py
│ └── http.py
│ ├── config
│ └── __init__.py
│ ├── decorators
│ ├── __init__.py
│ └── base.py
│ ├── fetcher.py
│ ├── images
│ └── image_uploader.py
│ ├── instruments.py
│ ├── logging
│ ├── __init__.py
│ └── logging.py
│ ├── metrics
│ ├── __init__.py
│ └── metrics.py
│ ├── prompts
│ ├── __init__.py
│ ├── client.py
│ ├── model.py
│ └── registry.py
│ ├── telemetry.py
│ ├── tracing
│ ├── __init__.py
│ ├── content_allow_list.py
│ ├── context_manager.py
│ ├── manual.py
│ └── tracing.py
│ ├── utils
│ ├── __init__.py
│ ├── in_memory_span_exporter.py
│ ├── json_encoder.py
│ └── package_check.py
│ └── version.py
└── scripts
└── build-release.sh
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | - [ ] I have added tests that cover my changes.
4 | - [ ] If adding a new instrumentation or changing an existing one, I've added screenshots from some observability platform showing the change.
5 | - [ ] PR name follows conventional commits format: `feat(instrumentation): ...` or `fix(instrumentation): ...`.
6 | - [ ] (If applicable) I have updated the documentation accordingly.
7 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to OpenLLMetry
2 |
3 | Thanks for taking the time to contribute! 😃 🚀
4 |
5 | Please refer to our [Contributing Guide](https://traceloop.com/docs/openllmetry/contributing/overview) for instructions on how to contribute.
6 |
7 |
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | ## Overview
2 |
3 | This document contains a list of maintainers in this repo.
4 | If you're interested in contributing, and becoming a maintainer, see [CONTRIBUTING](CONTRIBUTING.md).
5 |
6 | ## Current Maintainers
7 |
8 | | Maintainer | GitHub ID | Email |
9 | | -------------- | --------------------------------------------------- | --------------------- |
10 | | Nir Gazit | [nirga](https://github.com/nirga) | nir@traceloop.com |
11 | | Gal Kleinman | [galkleinman](https://github.com/galkleinman) | gal@traceloop.com |
12 | | Tomer Friedman | [tomer-friedman](https://github.com/tomer-friedman) | tomer@traceloop.com |
13 | | Paolo Rechia | [paolorechia](https://github.com/paolorechia) | paolorechia@gmail.com |
14 |
--------------------------------------------------------------------------------
/img/logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/img/logo-dark.png
--------------------------------------------------------------------------------
/img/logo-light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/img/logo-light.png
--------------------------------------------------------------------------------
/nx.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "nx/presets/npm.json",
3 | "$schema": "./node_modules/nx/schemas/nx-schema.json",
4 | "plugins": ["@nxlv/python"]
5 | }
6 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "openllmetry",
3 | "version": "0.0.0",
4 | "license": "MIT",
5 | "scripts": {},
6 | "private": true,
7 | "devDependencies": {
8 | "@nxlv/python": "^20.14.0",
9 | "nx": "^20.8.1"
10 | },
11 | "workspaces": [
12 | "packages/*"
13 | ]
14 | }
15 |
--------------------------------------------------------------------------------
/packages/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/.gitkeep
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Aleph Alpha Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing calls to any of Aleph Alpha's endpoints sent with the official [Aleph Alpha Client](https://github.com/Aleph-Alpha/aleph-alpha-client).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-alephalpha
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.alephalpha import AlephAlphaInstrumentor
19 |
20 | AlephAlphaInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/opentelemetry/instrumentation/alephalpha/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/opentelemetry/instrumentation/alephalpha/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from opentelemetry.instrumentation.alephalpha.config import Config
3 | import traceback
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/opentelemetry/instrumentation/alephalpha/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | asyncio_mode=auto
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import os
4 | import pytest
5 | from opentelemetry import trace
6 | from opentelemetry.sdk.trace import TracerProvider
7 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
8 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
9 | from opentelemetry.instrumentation.alephalpha import AlephAlphaInstrumentor
10 |
11 | pytest_plugins = []
12 |
13 |
14 | @pytest.fixture(scope="session")
15 | def exporter():
16 | exporter = InMemorySpanExporter()
17 | processor = SimpleSpanProcessor(exporter)
18 |
19 | provider = TracerProvider()
20 | provider.add_span_processor(processor)
21 | trace.set_tracer_provider(provider)
22 |
23 | AlephAlphaInstrumentor().instrument()
24 |
25 | return exporter
26 |
27 |
28 | @pytest.fixture(autouse=True)
29 | def clear_exporter(exporter):
30 | exporter.clear()
31 |
32 |
33 | @pytest.fixture(autouse=True)
34 | def environment():
35 | if "AA_TOKEN" not in os.environ:
36 | os.environ["AA_TOKEN"] = ("test_api_key")
37 |
38 |
39 | @pytest.fixture(scope="module")
40 | def vcr_config():
41 | return {"filter_headers": ["authorization"], "decode_compressed_response": True}
42 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-alephalpha/tests/test_completion.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | from aleph_alpha_client import Client, Prompt, CompletionRequest
4 |
5 |
6 | @pytest.mark.vcr
7 | def test_alephalpha_completion(exporter):
8 | client = Client(token=os.environ.get("AA_TOKEN"))
9 | prompt_text = "Tell me a joke about OpenTelemetry."
10 | params = {
11 | "prompt": Prompt.from_text(prompt_text),
12 | "maximum_tokens": 1000,
13 | }
14 | request = CompletionRequest(**params)
15 | response = client.complete(request, model="luminous-base")
16 |
17 | spans = exporter.get_finished_spans()
18 | together_span = spans[0]
19 | assert together_span.name == "alephalpha.completion"
20 | assert together_span.attributes.get("gen_ai.system") == "AlephAlpha"
21 | assert together_span.attributes.get("llm.request.type") == "completion"
22 | assert together_span.attributes.get("gen_ai.request.model") == "luminous-base"
23 | assert (
24 | together_span.attributes.get("gen_ai.prompt.0.content")
25 | == "Tell me a joke about OpenTelemetry."
26 | )
27 | assert (
28 | together_span.attributes.get("gen_ai.completion.0.content")
29 | == response.completions[0].completion
30 | )
31 | assert together_span.attributes.get("gen_ai.usage.prompt_tokens") == 9
32 | assert together_span.attributes.get(
33 | "llm.usage.total_tokens"
34 | ) == together_span.attributes.get(
35 | "gen_ai.usage.completion_tokens"
36 | ) + together_span.attributes.get(
37 | "gen_ai.usage.prompt_tokens"
38 | )
39 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Anthropic Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing Anthropic prompts and completions sent with the official [Anthropic library](https://github.com/anthropics/anthropic-sdk-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-anthropic
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
19 |
20 | AnthropicInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/config.py:
--------------------------------------------------------------------------------
1 | from typing import Callable, Optional
2 | from typing_extensions import Coroutine
3 |
4 |
5 | class Config:
6 | enrich_token_usage = False
7 | exception_logger = None
8 | get_common_metrics_attributes: Callable[[], dict] = lambda: {}
9 | upload_base64_image: Optional[
10 | Callable[[str, str, str, str], Coroutine[None, None, str]]
11 | ] = None
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/tests/data/logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/opentelemetry-instrumentation-anthropic/tests/data/logo.jpg
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
3 | from opentelemetry.semconv_ai import SpanAttributes
4 |
5 | from .utils import verify_metrics
6 |
7 |
8 | @pytest.mark.vcr
9 | def test_anthropic_completion(exporter, reader):
10 | client = Anthropic()
11 | client.completions.create(
12 | prompt=f"{HUMAN_PROMPT}\nHello world\n{AI_PROMPT}",
13 | model="claude-instant-1.2",
14 | max_tokens_to_sample=2048,
15 | top_p=0.1,
16 | )
17 | try:
18 | client.completions.create(
19 | unknown_parameter="unknown",
20 | )
21 | except Exception:
22 | pass
23 |
24 | spans = exporter.get_finished_spans()
25 | assert all(span.name == "anthropic.completion" for span in spans)
26 |
27 | anthropic_span = spans[0]
28 | assert (
29 | anthropic_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"]
30 | == f"{HUMAN_PROMPT}\nHello world\n{AI_PROMPT}"
31 | )
32 | assert anthropic_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content")
33 | assert anthropic_span.attributes.get("gen_ai.response.id") == "compl_01EjfrPvPEsRDRUKD6VoBxtK"
34 |
35 | metrics_data = reader.get_metrics_data()
36 | resource_metrics = metrics_data.resource_metrics
37 |
38 | verify_metrics(resource_metrics, "claude-instant-1.2", ignore_zero_input_tokens=True)
39 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Bedrock Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing any of AWS Bedrock's models prompts and completions sent with [Boto3](https://github.com/boto/boto3) to Bedrock.
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-bedrock
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.bedrock import BedrockInstrumentor
19 |
20 | BedrockInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/opentelemetry/instrumentation/bedrock/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | enrich_token_usage = False
3 | exception_logger = None
4 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/opentelemetry/instrumentation/bedrock/prompt_caching.py:
--------------------------------------------------------------------------------
1 | from opentelemetry import trace
2 |
3 |
4 | class CachingHeaders:
5 | READ = "x-amzn-bedrock-cache-read-input-token-count"
6 | WRITE = "x-amzn-bedrock-cache-write-input-token-count"
7 |
8 |
9 | class CacheSpanAttrs: # TODO: move it under SemConv pkg
10 | TYPE = "gen_ai.cache.type"
11 | CACHED = "gen_ai.prompt_caching"
12 |
13 |
14 | def prompt_caching_handling(headers, vendor, model, metric_params):
15 | base_attrs = {
16 | "gen_ai.system": vendor,
17 | "gen_ai.response.model": model,
18 | }
19 | span = trace.get_current_span()
20 | if CachingHeaders.READ in headers:
21 | read_cached_tokens = int(headers[CachingHeaders.READ])
22 | metric_params.prompt_caching.add(
23 | read_cached_tokens,
24 | attributes={
25 | **base_attrs,
26 | CacheSpanAttrs.TYPE: "read",
27 | },
28 | )
29 | if read_cached_tokens > 0:
30 | span.set_attribute(CacheSpanAttrs.CACHED, "read")
31 | if CachingHeaders.WRITE in headers:
32 | write_cached_tokens = int(headers[CachingHeaders.WRITE])
33 | metric_params.prompt_caching.add(
34 | write_cached_tokens,
35 | attributes={
36 | **base_attrs,
37 | CacheSpanAttrs.TYPE: "write",
38 | },
39 | )
40 | if write_cached_tokens > 0:
41 | span.set_attribute(CacheSpanAttrs.CACHED, "write")
42 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/opentelemetry/instrumentation/bedrock/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 |
4 | from opentelemetry.instrumentation.bedrock.config import Config
5 |
6 |
7 | def dont_throw(func):
8 | """
9 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
10 |
11 | @param func: The function to wrap
12 | @return: The wrapper function
13 | """
14 | # Obtain a logger specific to the function's module
15 | logger = logging.getLogger(func.__module__)
16 |
17 | def wrapper(*args, **kwargs):
18 | try:
19 | return func(*args, **kwargs)
20 | except Exception as e:
21 | logger.debug(
22 | "OpenLLMetry failed to trace in %s, error: %s",
23 | func.__name__,
24 | traceback.format_exc(),
25 | )
26 | if Config.exception_logger:
27 | Config.exception_logger(e)
28 |
29 | return wrapper
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/opentelemetry/instrumentation/bedrock/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/bedrock"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-bedrock"
11 | version = "0.40.7"
12 | description = "OpenTelemetry Bedrock instrumentation"
13 | authors = [
14 | "Gal Kleinman ",
15 | "Nir Gazit ",
16 | "Tomer Friedman ",
17 | ]
18 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-bedrock"
19 | license = "Apache-2.0"
20 | readme = "README.md"
21 |
22 | [[tool.poetry.packages]]
23 | include = "opentelemetry/instrumentation/bedrock"
24 |
25 | [tool.poetry.dependencies]
26 | python = ">=3.9,<4"
27 | opentelemetry-api = "^1.28.0"
28 | opentelemetry-instrumentation = ">=0.50b0"
29 | opentelemetry-semantic-conventions = ">=0.50b0"
30 | opentelemetry-semantic-conventions-ai = "0.4.9"
31 | anthropic = ">=0.17.0"
32 | tokenizers = ">=0.13.0"
33 |
34 | [tool.poetry.group.dev.dependencies]
35 | autopep8 = "^2.2.0"
36 | flake8 = "7.0.0"
37 |
38 | [tool.poetry.group.test.dependencies]
39 | boto3 = "^1.34.120"
40 | vcrpy = "^6.0.1"
41 | pytest = "^8.2.2"
42 | pytest-sugar = "1.0.0"
43 | pytest-recording = "^0.13.1"
44 | opentelemetry-sdk = "^1.27.0"
45 |
46 | [build-system]
47 | requires = ["poetry-core"]
48 | build-backend = "poetry.core.masonry.api"
49 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/tests/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-bedrock/tests/traces/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Chroma Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing client-side calls to Chroma vector DB sent with the official [Chroma library](https://github.com/chroma-core/chroma).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-chromadb
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.chromadb import ChromaInstrumentor
19 |
20 | ChromaInstrumentor().instrument()
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/opentelemetry/instrumentation/chromadb/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/opentelemetry/instrumentation/chromadb/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.chromadb.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/opentelemetry/instrumentation/chromadb/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/chromadb"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-chromadb"
11 | version = "0.40.7"
12 | description = "OpenTelemetry Chroma DB instrumentation"
13 | authors = [
14 | "Gal Kleinman ",
15 | "Nir Gazit ",
16 | "Tomer Friedman ",
17 | ]
18 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-chromadb"
19 | license = "Apache-2.0"
20 | readme = "README.md"
21 |
22 | [[tool.poetry.packages]]
23 | include = "opentelemetry/instrumentation/chromadb"
24 |
25 | [tool.poetry.dependencies]
26 | python = ">=3.9,<4"
27 | opentelemetry-api = "^1.28.0"
28 | opentelemetry-semantic-conventions = ">=0.50b0"
29 | opentelemetry-instrumentation = ">=0.50b0"
30 | opentelemetry-semantic-conventions-ai = "0.4.9"
31 |
32 | [tool.poetry.group.dev.dependencies]
33 | autopep8 = "^2.2.0"
34 | flake8 = "7.0.0"
35 | pytest = "^8.2.2"
36 | pytest-sugar = "1.0.0"
37 |
38 | [tool.poetry.group.test.dependencies]
39 | chromadb = "^0.5.0"
40 | pytest = "^8.2.2"
41 | pytest-sugar = "1.0.0"
42 | opentelemetry-sdk = "^1.27.0"
43 |
44 | [build-system]
45 | requires = ["poetry-core"]
46 | build-backend = "poetry.core.masonry.api"
47 |
48 | [tool.poetry.extras]
49 | instruments = ["chromadb"]
50 |
51 | [tool.poetry.plugins."opentelemetry_instrumentor"]
52 | chromadb = "opentelemetry.instrumentation.chromadb:ChromaInstrumentor"
53 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-chromadb/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
7 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8 | from opentelemetry.instrumentation.chromadb import ChromaInstrumentor
9 |
10 | pytest_plugins = []
11 |
12 |
13 | @pytest.fixture(scope="session")
14 | def exporter():
15 | exporter = InMemorySpanExporter()
16 | processor = SimpleSpanProcessor(exporter)
17 |
18 | provider = TracerProvider()
19 | provider.add_span_processor(processor)
20 | trace.set_tracer_provider(provider)
21 |
22 | ChromaInstrumentor().instrument()
23 |
24 | return exporter
25 |
26 |
27 | @pytest.fixture(autouse=True)
28 | def clear_exporter(exporter):
29 | exporter.clear()
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Cohere Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing calls to any of Cohere's endpoints sent with the official [Cohere library](https://github.com/cohere-ai/cohere-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-cohere
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.cohere import CohereInstrumentor
19 |
20 | CohereInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/opentelemetry/instrumentation/cohere/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/opentelemetry/instrumentation/cohere/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.cohere.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/opentelemetry/instrumentation/cohere/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import os
4 | import pytest
5 | from opentelemetry import trace
6 | from opentelemetry.sdk.trace import TracerProvider
7 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
8 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
9 | from opentelemetry.instrumentation.cohere import CohereInstrumentor
10 |
11 | pytest_plugins = []
12 |
13 |
14 | @pytest.fixture(scope="session")
15 | def exporter():
16 | exporter = InMemorySpanExporter()
17 | processor = SimpleSpanProcessor(exporter)
18 |
19 | provider = TracerProvider()
20 | provider.add_span_processor(processor)
21 | trace.set_tracer_provider(provider)
22 |
23 | CohereInstrumentor().instrument()
24 |
25 | return exporter
26 |
27 |
28 | @pytest.fixture(autouse=True)
29 | def clear_exporter(exporter):
30 | exporter.clear()
31 |
32 |
33 | @pytest.fixture(autouse=True)
34 | def environment():
35 | if "COHERE_API_KEY" not in os.environ:
36 | os.environ["COHERE_API_KEY"] = "test_api_key"
37 |
38 |
39 | @pytest.fixture(scope="module")
40 | def vcr_config():
41 | return {"filter_headers": ["authorization"]}
42 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/tests/test_chat.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import cohere
4 | import pytest
5 | from opentelemetry.semconv_ai import SpanAttributes
6 |
7 |
8 | @pytest.mark.vcr
9 | def test_cohere_chat(exporter):
10 | co = cohere.Client(os.environ.get("COHERE_API_KEY"))
11 | res = co.chat(model="command", message="Tell me a joke, pirate style")
12 |
13 | spans = exporter.get_finished_spans()
14 | cohere_span = spans[0]
15 | assert cohere_span.name == "cohere.chat"
16 | assert cohere_span.attributes.get(SpanAttributes.LLM_SYSTEM) == "Cohere"
17 | assert cohere_span.attributes.get(SpanAttributes.LLM_REQUEST_TYPE) == "chat"
18 | assert cohere_span.attributes.get(SpanAttributes.LLM_REQUEST_MODEL) == "command"
19 | assert (
20 | cohere_span.attributes.get(f"{SpanAttributes.LLM_PROMPTS}.0.content")
21 | == "Tell me a joke, pirate style"
22 | )
23 | assert (
24 | cohere_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content")
25 | == res.text
26 | )
27 | assert cohere_span.attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) == 58
28 | assert cohere_span.attributes.get(
29 | SpanAttributes.LLM_USAGE_TOTAL_TOKENS
30 | ) == cohere_span.attributes.get(
31 | SpanAttributes.LLM_USAGE_COMPLETION_TOKENS
32 | ) + cohere_span.attributes.get(
33 | SpanAttributes.LLM_USAGE_PROMPT_TOKENS
34 | )
35 | assert cohere_span.attributes.get("gen_ai.response.id") == "440f51f4-3e47-44b6-a5d7-5ba33edcfc58"
36 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-cohere/tests/test_completion.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import cohere
4 | import pytest
5 | from opentelemetry.semconv_ai import SpanAttributes
6 |
7 |
8 | @pytest.mark.vcr
9 | def test_cohere_completion(exporter):
10 | co = cohere.Client(os.environ.get("COHERE_API_KEY"))
11 | res = co.generate(model="command", prompt="Tell me a joke, pirate style")
12 |
13 | spans = exporter.get_finished_spans()
14 | cohere_span = spans[0]
15 | assert cohere_span.name == "cohere.completion"
16 | assert cohere_span.attributes.get(SpanAttributes.LLM_SYSTEM) == "Cohere"
17 | assert cohere_span.attributes.get(SpanAttributes.LLM_REQUEST_TYPE) == "completion"
18 | assert cohere_span.attributes.get(SpanAttributes.LLM_REQUEST_MODEL) == "command"
19 | assert (
20 | cohere_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content")
21 | == res.generations[0].text
22 | )
23 | assert cohere_span.attributes.get("gen_ai.response.id") == "64c671fc-c536-41fc-adbd-5f7c81177371"
24 | assert cohere_span.attributes.get("gen_ai.response.0.id") == "13255d0a-eef8-47fc-91f7-d2607d228fbf"
25 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-crewai/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-crewai/.python-version:
--------------------------------------------------------------------------------
1 | 3.10
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-crewai/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry CrewAI Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing agentic workflows implemented with crewAI framework [crewAI library](https://github.com/crewAIInc/crewAI).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-crewai
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.crewai import CrewAIInstrumentor
19 |
20 | CrewAIInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-crewai/opentelemetry/instrumentation/crewai/__init__.py:
--------------------------------------------------------------------------------
1 | """OpenTelemetry CrewAI instrumentation"""
2 | from opentelemetry.instrumentation.crewai.version import __version__
3 | from opentelemetry.instrumentation.crewai.instrumentation import CrewAIInstrumentor
4 |
5 | __all__ = ["CrewAIInstrumentor", "__version__"]
6 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-crewai/opentelemetry/instrumentation/crewai/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.36.0"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-crewai/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-crewai/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/crewai"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-crewai"
11 | version = "0.40.7"
12 | description = "OpenTelemetry crewAI instrumentation"
13 | authors = ["Gal Kleinman ", "Nir Gazit "]
14 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-crewai"
15 | license = "Apache-2.0"
16 | readme = "README.md"
17 |
18 | [[tool.poetry.packages]]
19 | include = "opentelemetry/instrumentation/crewai"
20 |
21 | [tool.poetry.dependencies]
22 | python = ">=3.10,<4"
23 | opentelemetry-api = "^1.28.0"
24 | opentelemetry-instrumentation = ">=0.50b0"
25 | opentelemetry-semantic-conventions = ">=0.50b0"
26 | opentelemetry-semantic-conventions-ai = "0.4.9"
27 |
28 | [tool.poetry.group.dev.dependencies]
29 | autopep8 = "^2.2.0"
30 | flake8 = "7.1.1"
31 | pytest = "^8.2.2"
32 | pytest-sugar = "1.0.0"
33 |
34 | [tool.poetry.group.test.dependencies]
35 | crewai = { version = "^0.80.0", python = ">=3.10,<=3.13" }
36 | pytest = "^8.2.2"
37 | pytest-sugar = "1.0.0"
38 | pytest-recording = "^0.13.1"
39 | opentelemetry-sdk = "^1.27.0"
40 |
41 | [build-system]
42 | requires = ["poetry-core"]
43 | build-backend = "poetry.core.masonry.api"
44 |
45 | [tool.poetry.extras]
46 | instruments = ["crewai"]
47 |
48 | [tool.poetry.plugins."opentelemetry_instrumentor"]
49 | crewai = "opentelemetry.instrumentation.crewai:CrewAIInstrumentor"
50 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Google Generative AI Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing Google Gemini prompts and completions sent with the official [Google Generative AI library](https://github.com/google-gemini/generative-ai-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-google-generativeai
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.google_generativeai import GoogleGenerativeAiInstrumentor
19 |
20 | GoogleGenerativeAiInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 |
4 | from opentelemetry.instrumentation.google_generativeai.config import Config
5 |
6 |
7 | def dont_throw(func):
8 | """
9 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
10 |
11 | @param func: The function to wrap
12 | @return: The wrapper function
13 | """
14 | # Obtain a logger specific to the function's module
15 | logger = logging.getLogger(func.__module__)
16 |
17 | def wrapper(*args, **kwargs):
18 | try:
19 | return func(*args, **kwargs)
20 | except Exception as e:
21 | logger.debug(
22 | "OpenLLMetry failed to trace in %s, error: %s",
23 | func.__name__,
24 | traceback.format_exc(),
25 | )
26 | if Config.exception_logger:
27 | Config.exception_logger(e)
28 |
29 | return wrapper
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/opentelemetry/instrumentation/google_generativeai/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.21.5"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-google-generativeai/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import os
4 | import pytest
5 | from opentelemetry import trace
6 | from opentelemetry.sdk.trace import TracerProvider
7 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
8 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
9 | from opentelemetry.instrumentation.google_generativeai import (
10 | GoogleGenerativeAiInstrumentor,
11 | )
12 |
13 | pytest_plugins = []
14 |
15 |
16 | @pytest.fixture(scope="session")
17 | def exporter():
18 | exporter = InMemorySpanExporter()
19 | processor = SimpleSpanProcessor(exporter)
20 |
21 | provider = TracerProvider()
22 | provider.add_span_processor(processor)
23 | trace.set_tracer_provider(provider)
24 |
25 | GoogleGenerativeAiInstrumentor().instrument()
26 |
27 | return exporter
28 |
29 |
30 | @pytest.fixture(autouse=True)
31 | def clear_exporter(exporter):
32 | exporter.clear()
33 |
34 |
35 | @pytest.fixture(scope="module")
36 | def vcr_config():
37 | return {"filter_headers": ["authorization"]}
38 |
39 |
40 | @pytest.fixture(autouse=True)
41 | def environment():
42 | if "GOOGLE_API_KEY" not in os.environ:
43 | os.environ["GOOGLE_API_KEY"] = "test_api_key"
44 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Groq Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing Groq prompts and completions sent with the official [Groq SDK](https://github.com/groq/groq-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-groq
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.groq import GroqInstrumentor
19 |
20 | GroqInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/config.py:
--------------------------------------------------------------------------------
1 | from typing import Callable
2 |
3 |
4 | class Config:
5 | enrich_token_usage = False
6 | exception_logger = None
7 | get_common_metrics_attributes: Callable[[], dict] = lambda: {}
8 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/groq"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-groq"
11 | version = "0.40.7"
12 | description = "OpenTelemetry Groq instrumentation"
13 | authors = ["Gal Kleinman ", "Nir Gazit "]
14 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-groq"
15 | license = "Apache-2.0"
16 | readme = "README.md"
17 |
18 | [[tool.poetry.packages]]
19 | include = "opentelemetry/instrumentation/groq"
20 |
21 | [tool.poetry.dependencies]
22 | python = ">=3.9,<4"
23 | opentelemetry-api = "^1.28.0"
24 | opentelemetry-instrumentation = ">=0.50b0"
25 | opentelemetry-semantic-conventions = ">=0.50b0"
26 | opentelemetry-semantic-conventions-ai = "0.4.9"
27 |
28 | [tool.poetry.group.dev.dependencies]
29 | autopep8 = "^2.2.0"
30 | flake8 = "7.0.0"
31 | pytest = "^8.2.2"
32 | pytest-sugar = "1.0.0"
33 |
34 | [tool.poetry.group.test.dependencies]
35 | groq = ">=0.18.0"
36 | pytest = "^8.2.2"
37 | pytest-sugar = "1.0.0"
38 | vcrpy = "^6.0.1"
39 | pytest-recording = "^0.13.1"
40 | opentelemetry-sdk = "^1.27.0"
41 | pytest-asyncio = "^0.23.7"
42 |
43 | [build-system]
44 | requires = ["poetry-core"]
45 | build-backend = "poetry.core.masonry.api"
46 |
47 | [tool.poetry.extras]
48 | instruments = ["groq"]
49 |
50 | [tool.poetry.plugins."opentelemetry_instrumentor"]
51 | groq = "opentelemetry.instrumentation.groq:GroqInstrumentor"
52 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-groq/tests/data/logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/opentelemetry-instrumentation-groq/tests/data/logo.jpg
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Haystack Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing complete LLM applications built with [Haystack](https://github.com/deepset-ai/haystack).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-haystack
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.haystack import HaystackInstrumentor
19 |
20 | HaystackInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_node.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from opentelemetry import context as context_api
3 | from opentelemetry.context import attach, set_value
4 | from opentelemetry.instrumentation.utils import (
5 | _SUPPRESS_INSTRUMENTATION_KEY,
6 | )
7 | from opentelemetry.instrumentation.haystack.utils import with_tracer_wrapper
8 | from opentelemetry.semconv_ai import SpanAttributes, TraceloopSpanKindValues
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | @with_tracer_wrapper
14 | def wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
15 | if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
16 | return wrapped(*args, **kwargs)
17 | name = instance.name
18 | attach(set_value("workflow_name", name))
19 | with tracer.start_as_current_span(f"{name}.task") as span:
20 | span.set_attribute(
21 | SpanAttributes.TRACELOOP_SPAN_KIND,
22 | TraceloopSpanKindValues.TASK.value,
23 | )
24 | span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_NAME, name)
25 |
26 | response = wrapped(*args, **kwargs)
27 |
28 | return response
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_pipeline.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from opentelemetry import context as context_api
3 | from opentelemetry.context import attach, set_value
4 | from opentelemetry.instrumentation.utils import (
5 | _SUPPRESS_INSTRUMENTATION_KEY,
6 | )
7 | from opentelemetry.instrumentation.haystack.utils import (
8 | with_tracer_wrapper,
9 | process_request,
10 | process_response,
11 | )
12 | from opentelemetry.semconv_ai import SpanAttributes, TraceloopSpanKindValues
13 |
14 | logger = logging.getLogger(__name__)
15 |
16 |
17 | @with_tracer_wrapper
18 | def wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
19 | if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
20 | return wrapped(*args, **kwargs)
21 | name = "haystack_pipeline"
22 | attach(set_value("workflow_name", name))
23 | with tracer.start_as_current_span(f"{name}.workflow") as span:
24 | span.set_attribute(
25 | SpanAttributes.TRACELOOP_SPAN_KIND,
26 | TraceloopSpanKindValues.WORKFLOW.value,
27 | )
28 | span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_NAME, name)
29 | process_request(span, args, kwargs)
30 | response = wrapped(*args, **kwargs)
31 | process_response(span, response)
32 |
33 | return response
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import os
4 | import pytest
5 | from opentelemetry import trace
6 | from opentelemetry.sdk.trace import TracerProvider
7 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
8 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
9 | from opentelemetry.instrumentation.haystack import HaystackInstrumentor
10 |
11 | pytest_plugins = []
12 |
13 |
14 | @pytest.fixture(scope="session")
15 | def exporter():
16 | exporter = InMemorySpanExporter()
17 | processor = SimpleSpanProcessor(exporter)
18 |
19 | provider = TracerProvider()
20 | provider.add_span_processor(processor)
21 | trace.set_tracer_provider(provider)
22 |
23 | HaystackInstrumentor().instrument()
24 |
25 | return exporter
26 |
27 |
28 | @pytest.fixture(autouse=True)
29 | def environment():
30 | os.environ["OPENAI_API_KEY"] = "test_api_key"
31 | os.environ["TRACELOOP_TRACE_CONTENT"] = "true"
32 |
33 |
34 | @pytest.fixture(autouse=True)
35 | def clear_exporter(exporter):
36 | exporter.clear()
37 |
38 |
39 | @pytest.fixture(scope="module")
40 | def vcr_config():
41 | return {"filter_headers": ["authorization"]}
42 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-haystack/tests/test_placeholder.py:
--------------------------------------------------------------------------------
1 | def test_placeholder():
2 | pass
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry LanceDB Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing client-side calls to LanceDB sent with the official [LanceDB library](https://github.com/lancedb/lancedb).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-lancedb
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.lancedb import LanceInstrumentor
19 |
20 | LanceInstrumentor().instrument()
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/opentelemetry/instrumentation/lancedb/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/opentelemetry/instrumentation/lancedb/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.lancedb.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/opentelemetry/instrumentation/lancedb/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-lancedb/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
7 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8 | from opentelemetry.instrumentation.lancedb import LanceInstrumentor
9 |
10 | pytest_plugins = []
11 |
12 |
13 | @pytest.fixture(scope="session")
14 | def exporter():
15 | exporter = InMemorySpanExporter()
16 | processor = SimpleSpanProcessor(exporter)
17 |
18 | provider = TracerProvider()
19 | provider.add_span_processor(processor)
20 | trace.set_tracer_provider(provider)
21 |
22 | LanceInstrumentor().instrument()
23 |
24 | return exporter
25 |
26 |
27 | @pytest.fixture(autouse=True)
28 | def clear_exporter(exporter):
29 | exporter.clear()
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/.python-version:
--------------------------------------------------------------------------------
1 | 3.11
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Langchain Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing complete LLM applications built with [Langchain](https://github.com/langchain-ai/langchain).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-langchain
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.langchain import LangchainInstrumentor
19 |
20 | LangchainInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/tests/cassettes/test_llms/test_bedrock.yaml:
--------------------------------------------------------------------------------
1 | interactions:
2 | - request:
3 | body: '{"anthropic_version": "bedrock-2023-05-31", "messages": [{"role": "user",
4 | "content": "tell me a short joke"}], "system": "You are helpful assistant",
5 | "max_tokens": 1024}'
6 | headers:
7 | Accept:
8 | - !!binary |
9 | YXBwbGljYXRpb24vanNvbg==
10 | Content-Length:
11 | - '169'
12 | Content-Type:
13 | - !!binary |
14 | YXBwbGljYXRpb24vanNvbg==
15 | method: POST
16 | uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/invoke
17 | response:
18 | body:
19 | string: '{"id":"msg_tMvfcauJlAgxHmTMKY79Mk7s","type":"message","role":"assistant","content":[{"type":"text","text":"Here''s
20 | a short joke for you:\n\nWhat do you call a bear with no teeth? A gummy bear!"}],"model":"claude-3-haiku-48k-20240307","stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":16,"output_tokens":27}}'
21 | headers:
22 | Connection:
23 | - keep-alive
24 | Content-Length:
25 | - '326'
26 | Content-Type:
27 | - application/json
28 | Date:
29 | - Tue, 09 Apr 2024 03:53:50 GMT
30 | X-Amzn-Bedrock-Input-Token-Count:
31 | - '16'
32 | X-Amzn-Bedrock-Invocation-Latency:
33 | - '695'
34 | X-Amzn-Bedrock-Output-Token-Count:
35 | - '27'
36 | x-amzn-RequestId:
37 | - 6de9702e-400c-4562-a2ba-47bb4506c7b2
38 | status:
39 | code: 200
40 | message: OK
41 | version: 1
42 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from typing import List
4 | from langchain.schema import HumanMessage
5 | from langchain_openai import ChatOpenAI
6 | from opentelemetry.semconv_ai import SpanAttributes
7 | from pydantic import BaseModel, Field
8 |
9 |
10 | class FoodAnalysis(BaseModel):
11 | name: str = Field(description="The name of the food item")
12 | healthy: bool = Field(description="Whether the food is good for you")
13 | calories: int = Field(description="Estimated calories per serving")
14 | taste_profile: List[str] = Field(description="List of taste characteristics")
15 |
16 |
17 | @pytest.mark.vcr
18 | def test_structured_output(exporter):
19 | query_text = "Analyze the following food item: avocado"
20 | query = [HumanMessage(content=query_text)]
21 | model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
22 | model_with_structured_output = model.with_structured_output(FoodAnalysis)
23 | result = model_with_structured_output.invoke(query)
24 | spans = exporter.get_finished_spans()
25 |
26 | span_names = set(span.name for span in spans)
27 | expected_spans = {"ChatOpenAI.chat"}
28 | assert expected_spans.issubset(span_names)
29 |
30 | chat_span = next(
31 | span for span in spans if span.name == "ChatOpenAI.chat"
32 | )
33 |
34 | assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"] == query_text
35 | assert chat_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] == result.model_dump_json()
36 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-llamaindex/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-llamaindex/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-llamaindex/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry LlamaIndex Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing complete LLM applications built with [LlamaIndex](https://github.com/run-llama/llama_index).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-llamaindex
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.llamaindex import LlamaIndexInstrumentor
19 |
20 | LlamaIndexInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-llamaindex/opentelemetry/instrumentation/llamaindex/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-llamaindex/opentelemetry/instrumentation/llamaindex/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-llamaindex/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-llamaindex/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Marqo Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing client-side calls to Marqo vector DB sent with the official [Marqo library](https://github.com/marqo-ai/marqo).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-marqo
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.marqo import MarqoInstrumentor
19 |
20 | MarqoInstrumentor().instrument()
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/opentelemetry/instrumentation/marqo/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/opentelemetry/instrumentation/marqo/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.marqo.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/opentelemetry/instrumentation/marqo/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-marqo/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
7 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8 | from opentelemetry.instrumentation.marqo import MarqoInstrumentor
9 |
10 | pytest_plugins = []
11 |
12 |
13 | @pytest.fixture(scope="session")
14 | def exporter():
15 | exporter = InMemorySpanExporter()
16 | processor = SimpleSpanProcessor(exporter)
17 |
18 | provider = TracerProvider()
19 | provider.add_span_processor(processor)
20 | trace.set_tracer_provider(provider)
21 |
22 | MarqoInstrumentor().instrument()
23 |
24 | return exporter
25 |
26 |
27 | @pytest.fixture(autouse=True)
28 | def clear_exporter(exporter):
29 | exporter.clear()
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mcp/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mcp/.python-version:
--------------------------------------------------------------------------------
1 | 3.12.6
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mcp/README.md:
--------------------------------------------------------------------------------
1 | OpenTelemetry MCP Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing of agentic workflows implemented with MCP framework [mcp python sdk](https://github.com/modelcontextprotocol/python-sdk).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-mcp
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.mcp import McpInstrumentor
19 |
20 | McpInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application tool usage is working, and can make it easy to debug and evaluate the tool usage.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mcp/opentelemetry/instrumentation/mcp/__init__.py:
--------------------------------------------------------------------------------
1 | from opentelemetry.instrumentation.mcp.version import __version__
2 | from opentelemetry.instrumentation.mcp.instrumentation import McpInstrumentor
3 |
4 | __all__ = ["McpInstrumentor", "__version__"]
5 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mcp/opentelemetry/instrumentation/mcp/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mcp/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mcp/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/mcp"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-mcp"
11 | version = "0.40.7"
12 | description = "OpenTelemetry mcp instrumentation"
13 | authors = ["Felix George "]
14 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-mcp"
15 | license = "Apache-2.0"
16 | readme = "README.md"
17 |
18 | [[tool.poetry.packages]]
19 | include = "opentelemetry/instrumentation/mcp"
20 |
21 | [tool.poetry.dependencies]
22 | python = ">=3.10,<4"
23 | opentelemetry-api = "^1.28.0"
24 | opentelemetry-instrumentation = ">=0.50b0"
25 | opentelemetry-semantic-conventions = ">=0.50b0"
26 | opentelemetry-semantic-conventions-ai = "0.4.9"
27 |
28 | [tool.poetry.group.dev.dependencies]
29 | autopep8 = "^2.2.0"
30 | flake8 = "7.1.1"
31 | pytest = "^8.2.2"
32 | pytest-sugar = "1.0.0"
33 |
34 | [tool.poetry.group.test.dependencies]
35 | mcp = { version = "^1.3.0", python = ">=3.10,<=3.13" }
36 | pytest = "^8.2.2"
37 | pytest-sugar = "1.0.0"
38 | pytest-recording = "^0.13.1"
39 | opentelemetry-sdk = "^1.27.0"
40 |
41 | [build-system]
42 | requires = ["poetry-core"]
43 | build-backend = "poetry.core.masonry.api"
44 |
45 | [tool.poetry.extras]
46 | instruments = ["mcp"]
47 |
48 | [tool.poetry.plugins."opentelemetry_instrumentor"]
49 | mcp = "opentelemetry.instrumentation.mcp:McpInstrumentor"
50 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/.gitignore:
--------------------------------------------------------------------------------
1 | *.db
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Milvus Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing client-side calls to Milvus vector DB sent with the official [Milvus library](https://github.com/milvus-io/milvus).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-milvus
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.milvus import MilvusInstrumentor
19 |
20 | MilvusInstrumentor().instrument()
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/opentelemetry/instrumentation/milvus/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/opentelemetry/instrumentation/milvus/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.milvus.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/opentelemetry/instrumentation/milvus/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-milvus/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
7 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8 | from opentelemetry.instrumentation.milvus import MilvusInstrumentor
9 |
10 | pytest_plugins = []
11 |
12 |
13 | @pytest.fixture(scope="session")
14 | def exporter():
15 | exporter = InMemorySpanExporter()
16 | processor = SimpleSpanProcessor(exporter)
17 |
18 | provider = TracerProvider()
19 | provider.add_span_processor(processor)
20 | trace.set_tracer_provider(provider)
21 |
22 | MilvusInstrumentor().instrument()
23 |
24 | return exporter
25 |
26 |
27 | @pytest.fixture(autouse=True)
28 | def clear_exporter(exporter):
29 | exporter.clear()
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Mistral AI Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing calls to any of mistralai's endpoints sent with the official [Mistral AI library](https://github.com/mistralai-ai/mistralai-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-mistralai
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.mistralai import MistralAiInstrumentor
19 |
20 | MistralAiInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/opentelemetry/instrumentation/mistralai/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/opentelemetry/instrumentation/mistralai/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.mistralai.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/opentelemetry/instrumentation/mistralai/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-mistralai/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import os
4 | import pytest
5 | from opentelemetry import trace
6 | from opentelemetry.sdk.trace import TracerProvider
7 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
8 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
9 | from opentelemetry.instrumentation.mistralai import MistralAiInstrumentor
10 |
11 | pytest_plugins = []
12 |
13 |
14 | @pytest.fixture(scope="session")
15 | def exporter():
16 | exporter = InMemorySpanExporter()
17 | processor = SimpleSpanProcessor(exporter)
18 |
19 | provider = TracerProvider()
20 | provider.add_span_processor(processor)
21 | trace.set_tracer_provider(provider)
22 |
23 | MistralAiInstrumentor().instrument()
24 |
25 | return exporter
26 |
27 |
28 | @pytest.fixture(autouse=True)
29 | def clear_exporter(exporter):
30 | exporter.clear()
31 |
32 |
33 | @pytest.fixture(autouse=True)
34 | def environment():
35 | if "MISTRAL_API_KEY" not in os.environ:
36 | os.environ["MISTRAL_API_KEY"] = "test_api_key"
37 |
38 |
39 | @pytest.fixture(scope="module")
40 | def vcr_config():
41 | return {"filter_headers": ["authorization"]}
42 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Ollama Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing calls to any of Ollama's endpoints sent with the official [Ollama Python Library](https://github.com/ollama/ollama-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-ollama
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.ollama import OllamaInstrumentor
19 |
20 | OllamaInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/opentelemetry/instrumentation/ollama/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/opentelemetry/instrumentation/ollama/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.ollama.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/opentelemetry/instrumentation/ollama/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-ollama/tests/test_embeddings.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import ollama
3 | from opentelemetry.semconv_ai import SpanAttributes
4 |
5 |
6 | @pytest.mark.vcr
7 | def test_ollama_embeddings(exporter):
8 | ollama.embeddings(model="llama3", prompt="Tell me a joke about OpenTelemetry")
9 |
10 | spans = exporter.get_finished_spans()
11 | ollama_span = spans[0]
12 | assert ollama_span.name == "ollama.embeddings"
13 | assert ollama_span.attributes.get(f"{SpanAttributes.LLM_SYSTEM}") == "Ollama"
14 | assert (
15 | ollama_span.attributes.get(f"{SpanAttributes.LLM_REQUEST_TYPE}") == "embedding"
16 | )
17 | assert not ollama_span.attributes.get(f"{SpanAttributes.LLM_IS_STREAMING}")
18 | assert ollama_span.attributes.get(f"{SpanAttributes.LLM_REQUEST_MODEL}") == "llama3"
19 | assert (
20 | ollama_span.attributes.get(f"{SpanAttributes.LLM_PROMPTS}.0.content")
21 | == "Tell me a joke about OpenTelemetry"
22 | )
23 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry OpenAI Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing OpenAI prompts and completions sent with the official [OpenAI library](https://github.com/openai/openai-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-openai
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.openai import OpenAIInstrumentor
19 |
20 | OpenAIInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/shared/config.py:
--------------------------------------------------------------------------------
1 | from typing import Callable
2 |
3 |
4 | class Config:
5 | enrich_token_usage = False
6 | enrich_assistant = False
7 | exception_logger = None
8 | get_common_metrics_attributes: Callable[[], dict] = lambda: {}
9 | upload_base64_image: Callable[[str, str, str], str] = lambda trace_id, span_id, base64_image_url: str
10 | enable_trace_context_propagation: bool = True
11 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | asyncio_mode=auto
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | import os
5 | from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
6 |
7 | pytest_plugins = []
8 |
9 |
10 | @pytest.fixture(autouse=True)
11 | def environment():
12 | if not os.getenv("OPENAI_API_KEY"):
13 | os.environ["OPENAI_API_KEY"] = "test_api_key"
14 | if not os.getenv("AZURE_OPENAI_API_KEY"):
15 | os.environ["AZURE_OPENAI_API_KEY"] = "test_azure_api_key"
16 | if not os.getenv("AZURE_OPENAI_ENDPOINT"):
17 | os.environ["AZURE_OPENAI_ENDPOINT"] = "https://traceloop-stg.openai.azure.com/"
18 |
19 |
20 | @pytest.fixture
21 | def openai_client():
22 | return OpenAI()
23 |
24 |
25 | @pytest.fixture
26 | def vllm_openai_client():
27 | return OpenAI(base_url="http://localhost:8000/v1")
28 |
29 |
30 | @pytest.fixture
31 | def azure_openai_client():
32 | return AzureOpenAI(
33 | api_version="2024-02-01",
34 | )
35 |
36 |
37 | @pytest.fixture
38 | def async_azure_openai_client():
39 | return AsyncAzureOpenAI(
40 | api_version="2024-02-01",
41 | )
42 |
43 |
44 | @pytest.fixture
45 | def async_openai_client():
46 | return AsyncOpenAI()
47 |
48 |
49 | @pytest.fixture
50 | def async_vllm_openai_client():
51 | return AsyncOpenAI(base_url="http://localhost:8000/v1")
52 |
53 |
54 | @pytest.fixture(scope="module")
55 | def vcr_config():
56 | return {"filter_headers": ["authorization", "api-key"]}
57 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/tests/metrics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/opentelemetry-instrumentation-openai/tests/metrics/__init__.py
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/tests/metrics/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from opentelemetry import metrics
3 | from opentelemetry.sdk.resources import Resource
4 | from opentelemetry.sdk.metrics import MeterProvider
5 | from opentelemetry.sdk.metrics.export import InMemoryMetricReader
6 |
7 | from opentelemetry.instrumentation.openai import OpenAIInstrumentor
8 |
9 |
10 | @pytest.fixture(scope="session")
11 | def metrics_test_context():
12 | resource = Resource.create()
13 | reader = InMemoryMetricReader()
14 | provider = MeterProvider(metric_readers=[reader], resource=resource)
15 |
16 | metrics.set_meter_provider(provider)
17 |
18 | OpenAIInstrumentor().instrument()
19 |
20 | return provider, reader
21 |
22 |
23 | @pytest.fixture(scope="session", autouse=True)
24 | def clear_metrics_test_context(metrics_test_context):
25 | provider, reader = metrics_test_context
26 |
27 | reader.shutdown()
28 | provider.shutdown()
29 |
30 |
31 | @pytest.fixture(scope="module")
32 | def vcr_config():
33 | return {
34 | "filter_headers": ["authorization"],
35 | "ignore_hosts": ["openaipublic.blob.core.windows.net"],
36 | }
37 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/tests/traces/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/opentelemetry-instrumentation-openai/tests/traces/__init__.py
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/tests/traces/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from opentelemetry import trace
3 | from opentelemetry.sdk.trace import TracerProvider
4 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
5 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
6 | from opentelemetry.instrumentation.openai import OpenAIInstrumentor
7 |
8 |
9 | @pytest.fixture(scope="session")
10 | def exporter():
11 | exporter = InMemorySpanExporter()
12 | processor = SimpleSpanProcessor(exporter)
13 |
14 | provider = TracerProvider()
15 | provider.add_span_processor(processor)
16 | trace.set_tracer_provider(provider)
17 |
18 | async def upload_base64_image(*args):
19 | return "/some/url"
20 |
21 | OpenAIInstrumentor(
22 | enrich_assistant=True,
23 | enrich_token_usage=True,
24 | upload_base64_image=upload_base64_image,
25 | ).instrument()
26 |
27 | return exporter
28 |
29 |
30 | @pytest.fixture(autouse=True)
31 | def clear_exporter(exporter):
32 | exporter.clear()
33 |
34 |
35 | @pytest.fixture(scope="module")
36 | def vcr_config():
37 | return {
38 | "filter_headers": ["authorization", "api-key"],
39 | "ignore_hosts": ["openaipublic.blob.core.windows.net"],
40 | }
41 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-openai/tests/traces/utils.py:
--------------------------------------------------------------------------------
1 | import httpx
2 | from opentelemetry.sdk.trace import Span
3 | from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
4 | from opentelemetry.trace.propagation import get_current_span
5 | from unittest.mock import MagicMock
6 |
7 |
8 | # from: https://stackoverflow.com/a/41599695/2749989
9 | def spy_decorator(method_to_decorate):
10 | mock = MagicMock()
11 |
12 | def wrapper(self, *args, **kwargs):
13 | mock(*args, **kwargs)
14 | return method_to_decorate(self, *args, **kwargs)
15 |
16 | wrapper.mock = mock
17 | return wrapper
18 |
19 |
20 | def assert_request_contains_tracecontext(request: httpx.Request, expected_span: Span):
21 | assert TraceContextTextMapPropagator._TRACEPARENT_HEADER_NAME in request.headers
22 | ctx = TraceContextTextMapPropagator().extract(request.headers)
23 | request_span_context = get_current_span(ctx).get_span_context()
24 | expected_span_context = expected_span.get_span_context()
25 |
26 | assert request_span_context.trace_id == expected_span_context.trace_id
27 | assert request_span_context.span_id == expected_span_context.span_id
28 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-pinecone/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-pinecone/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-pinecone/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Pinecone Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing client-side calls to Pinecone vector DB sent with the official [Pinecone library](https://github.com/pinecone-io/pinecone-python-client).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-pinecone
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.pinecone import PineconeInstrumentor
19 |
20 | PineconeInstrumentor().instrument()
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import traceback
4 | from opentelemetry.instrumentation.pinecone.config import Config
5 |
6 |
7 | def dont_throw(func):
8 | """
9 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
10 |
11 | @param func: The function to wrap
12 | @return: The wrapper function
13 | """
14 | # Obtain a logger specific to the function's module
15 | logger = logging.getLogger(func.__module__)
16 |
17 | def wrapper(*args, **kwargs):
18 | try:
19 | return func(*args, **kwargs)
20 | except Exception as e:
21 | logger.debug(
22 | "OpenLLMetry failed to trace in %s, error: %s",
23 | func.__name__,
24 | traceback.format_exc(),
25 | )
26 | if Config.exception_logger:
27 | Config.exception_logger(e)
28 |
29 | return wrapper
30 |
31 |
32 | def set_span_attribute(span, name, value):
33 | if value is not None:
34 | if value != "":
35 | span.set_attribute(name, value)
36 | return
37 |
38 |
39 | def is_metrics_enabled() -> bool:
40 | return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true"
41 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-pinecone/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-pinecone/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Qdrant Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing client-side calls to Qdrant vector DB sent with the official [Qdrant client library](https://github.com/qdrant/qdrant-client).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-qdrant
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.qdrant import QdrantInstrumentor
19 |
20 | QdrantInstrumentor().instrument()
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/opentelemetry/instrumentation/qdrant/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/opentelemetry/instrumentation/qdrant/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.qdrant.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/opentelemetry/instrumentation/qdrant/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/qdrant"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ["if TYPE_CHECKING:"]
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-qdrant"
11 | version = "0.40.7"
12 | description = "OpenTelemetry Qdrant instrumentation"
13 | authors = [
14 | "Gal Kleinman ",
15 | "Nir Gazit ",
16 | "Tomer Friedman ",
17 | ]
18 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-qdrant"
19 | license = "Apache-2.0"
20 | readme = "README.md"
21 |
22 | [[tool.poetry.packages]]
23 | include = "opentelemetry/instrumentation/qdrant"
24 |
25 | [tool.poetry.dependencies]
26 | python = ">=3.9,<4"
27 | opentelemetry-api = "^1.28.0"
28 | opentelemetry-instrumentation = ">=0.50b0"
29 | opentelemetry-semantic-conventions = ">=0.50b0"
30 | opentelemetry-semantic-conventions-ai = "0.4.9"
31 |
32 | [tool.poetry.group.dev.dependencies]
33 | autopep8 = "^2.2.0"
34 | flake8 = "7.0.0"
35 |
36 | [tool.poetry.group.test.dependencies]
37 | qdrant-client = "^1.9.1"
38 | pytest = "^8.2.2"
39 | pytest-sugar = "1.0.0"
40 | opentelemetry-sdk = "^1.27.0"
41 |
42 | [build-system]
43 | requires = ["poetry-core"]
44 | build-backend = "poetry.core.masonry.api"
45 |
46 | [tool.poetry.extras]
47 | instruments = ["qdrant-client"]
48 |
49 | [tool.poetry.plugins."opentelemetry_instrumentor"]
50 | qdrant_client = "opentelemetry.instrumentation.qdrant:QdrantInstrumentor"
51 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-qdrant/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
7 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8 | from opentelemetry.instrumentation.qdrant import QdrantInstrumentor
9 |
10 |
11 | @pytest.fixture(scope="session")
12 | def exporter():
13 | exporter = InMemorySpanExporter()
14 | processor = SimpleSpanProcessor(exporter)
15 |
16 | provider = TracerProvider()
17 | provider.add_span_processor(processor)
18 | trace.set_tracer_provider(provider)
19 |
20 | QdrantInstrumentor().instrument()
21 |
22 | return exporter
23 |
24 |
25 | @pytest.fixture(autouse=True)
26 | def clear_exporter(exporter):
27 | exporter.clear()
28 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Replicate Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing Replicate prompts and image generation sent with the official [replicate library](https://github.com/replicate/replicate-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-replicate
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
19 |
20 | ReplicateInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/opentelemetry/instrumentation/replicate/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/opentelemetry/instrumentation/replicate/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from opentelemetry.instrumentation.replicate.config import Config
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/opentelemetry/instrumentation/replicate/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/replicate"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-replicate"
11 | version = "0.40.7"
12 | description = "OpenTelemetry Replicate instrumentation"
13 | authors = ["Kartik Prajapati "]
14 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-replicate"
15 | license = 'Apache-2.0'
16 | readme = 'README.md'
17 |
18 | [[tool.poetry.packages]]
19 | include = "opentelemetry/instrumentation/replicate"
20 |
21 | [tool.poetry.dependencies]
22 | python = ">=3.9,<4"
23 | opentelemetry-api = "^1.28.0"
24 | opentelemetry-instrumentation = ">=0.50b0"
25 | opentelemetry-semantic-conventions = ">=0.50b0"
26 | opentelemetry-semantic-conventions-ai = "0.4.9"
27 |
28 | [tool.poetry.group.dev.dependencies]
29 | autopep8 = "^2.2.0"
30 | flake8 = "7.0.0"
31 | pytest = "^8.2.2"
32 | pytest-sugar = "1.0.0"
33 |
34 | [tool.poetry.group.test.dependencies]
35 | pytest = "^8.2.2"
36 | pytest-sugar = "1.0.0"
37 | vcrpy = "^6.0.1"
38 | pytest-recording = "^0.13.1"
39 | opentelemetry-sdk = "^1.27.0"
40 | replicate = ">=0.23.1,<0.27.0"
41 |
42 | [build-system]
43 | requires = ["poetry-core"]
44 | build-backend = "poetry.core.masonry.api"
45 |
46 | [tool.poetry.extras]
47 | instruments = ["replicate"]
48 |
49 | [tool.poetry.plugins."opentelemetry_instrumentor"]
50 | replicate = "opentelemetry.instrumentation.replicate:ReplicateInstrumentor"
51 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
7 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8 | from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
9 |
10 | pytest_plugins = []
11 |
12 |
13 | @pytest.fixture(scope="session")
14 | def exporter():
15 | exporter = InMemorySpanExporter()
16 | processor = SimpleSpanProcessor(exporter)
17 |
18 | provider = TracerProvider()
19 | provider.add_span_processor(processor)
20 | trace.set_tracer_provider(provider)
21 |
22 | ReplicateInstrumentor().instrument()
23 |
24 | return exporter
25 |
26 |
27 | @pytest.fixture(autouse=True)
28 | def clear_exporter(exporter):
29 | exporter.clear()
30 |
31 |
32 | @pytest.fixture(scope="module")
33 | def vcr_config():
34 | return {"filter_headers": ["authorization"]}
35 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/tests/test_image_generation.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import replicate
3 |
4 |
5 | @pytest.mark.vcr
6 | def test_replicate_image_generation(exporter):
7 | replicate.run(
8 | "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4",
9 | input={"prompt": "robots"},
10 | )
11 |
12 | spans = exporter.get_finished_spans()
13 | assert [span.name for span in spans] == [
14 | "replicate.run",
15 | ]
16 |
17 |
18 | @pytest.mark.vcr
19 | def test_replicate_image_generation_predictions(exporter):
20 | model = replicate.models.get("kvfrans/clipdraw")
21 | version = model.versions.get(
22 | "5797a99edc939ea0e9242d5e8c9cb3bc7d125b1eac21bda852e5cb79ede2cd9b"
23 | )
24 | replicate.predictions.create(version, input={"prompt": "robots"})
25 |
26 | spans = exporter.get_finished_spans()
27 | assert [span.name for span in spans] == [
28 | "replicate.predictions.create",
29 | ]
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-replicate/tests/test_llama.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pytest
3 | import replicate
4 |
5 |
6 | @pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9")
7 | @pytest.mark.vcr
8 | def test_replicate_llama_stream(exporter):
9 | model_version = "meta/llama-2-70b-chat"
10 | for event in replicate.stream(
11 | model_version,
12 | input={
13 | "prompt": "tell me a joke about opentelemetry",
14 | },
15 | ):
16 | continue
17 |
18 | spans = exporter.get_finished_spans()
19 | assert [span.name for span in spans] == [
20 | "replicate.stream",
21 | ]
22 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry SageMaker Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing of any models deployed on Amazon SageMaker and invoked with [Boto3](https://github.com/boto/boto3) to SageMaker.
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-sagemaker
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.sagemaker import SageMakerInstrumentor
19 |
20 | SageMakerInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs SageMaker endpoint request bodies and responses to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/opentelemetry/instrumentation/sagemaker/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | enrich_token_usage = False
3 | exception_logger = None
4 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/opentelemetry/instrumentation/sagemaker/streaming_wrapper.py:
--------------------------------------------------------------------------------
1 | from opentelemetry.instrumentation.sagemaker.utils import dont_throw
2 | from wrapt import ObjectProxy
3 |
4 |
5 | class StreamingWrapper(ObjectProxy):
6 | def __init__(
7 | self,
8 | response,
9 | stream_done_callback=None,
10 | ):
11 | super().__init__(response)
12 |
13 | self._stream_done_callback = stream_done_callback
14 | self._accumulating_body = ""
15 |
16 | def __iter__(self):
17 | for event in self.__wrapped__:
18 | self._process_event(event)
19 | yield event
20 | self._stream_done_callback(self._accumulating_body)
21 |
22 | @dont_throw
23 | def _process_event(self, event):
24 | payload_part = event.get("PayloadPart")
25 | if not payload_part:
26 | return
27 |
28 | decoded_payload_part = payload_part.get("Bytes").decode()
29 | self._accumulating_body += decoded_payload_part
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/opentelemetry/instrumentation/sagemaker/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 |
4 | from opentelemetry.instrumentation.sagemaker.config import Config
5 |
6 |
7 | def dont_throw(func):
8 | """
9 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
10 |
11 | @param func: The function to wrap
12 | @return: The wrapper function
13 | """
14 | # Obtain a logger specific to the function's module
15 | logger = logging.getLogger(func.__module__)
16 |
17 | def wrapper(*args, **kwargs):
18 | try:
19 | return func(*args, **kwargs)
20 | except Exception as e:
21 | logger.debug(
22 | "OpenLLMetry failed to trace in %s, error: %s",
23 | func.__name__,
24 | traceback.format_exc(),
25 | )
26 | if Config.exception_logger:
27 | Config.exception_logger(e)
28 |
29 | return wrapper
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/opentelemetry/instrumentation/sagemaker/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/sagemaker"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-sagemaker"
11 | version = "0.40.7"
12 | description = "OpenTelemetry SageMaker instrumentation"
13 | authors = ["Bobby Lindsey "]
14 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-sagemaker"
15 | license = "Apache-2.0"
16 | readme = "README.md"
17 |
18 | [[tool.poetry.packages]]
19 | include = "opentelemetry/instrumentation/sagemaker"
20 |
21 | [tool.poetry.dependencies]
22 | python = ">=3.9,<4"
23 | opentelemetry-api = "^1.26.0"
24 | opentelemetry-instrumentation = ">=0.50b0"
25 | opentelemetry-semantic-conventions = ">=0.50b0"
26 | opentelemetry-semantic-conventions-ai = "0.4.9"
27 |
28 | [tool.poetry.group.dev.dependencies]
29 | autopep8 = "^2.2.0"
30 | flake8 = "7.0.0"
31 |
32 | [tool.poetry.group.test.dependencies]
33 | boto3 = "^1.34.120"
34 | vcrpy = "^6.0.1"
35 | pytest = "^8.2.2"
36 | pytest-sugar = "1.0.0"
37 | pytest-recording = "^0.13.1"
38 | opentelemetry-sdk = "^1.23.0"
39 |
40 | [build-system]
41 | requires = ["poetry-core"]
42 | build-backend = "poetry.core.masonry.api"
43 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-sagemaker/tests/test_invocation.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from opentelemetry.semconv_ai import SpanAttributes
3 |
4 | import json
5 |
6 |
7 | @pytest.mark.vcr()
8 | def test_sagemaker_completion_string_content(exporter, smrt):
9 | endpoint_name = "my-llama2-endpoint"
10 | prompt = """[INST] <>
11 | You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your
12 | answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure
13 | that your responses are socially unbiased and positive in nature.
14 |
15 | If a question does not make any sense, or is not factually coherent, explain why instead of answering something not
16 | correct. If you don't know the answer to a question, please don't share false information.
17 | <>
18 |
19 | There's a llama in my garden What should I do? [/INST]"""
20 | # Create request body.
21 | body = json.dumps(
22 | {
23 | "inputs": prompt,
24 | "parameters": {"temperature": 0.1, "top_p": 0.9, "max_new_tokens": 128},
25 | }
26 | )
27 |
28 | smrt.invoke_endpoint(
29 | EndpointName=endpoint_name,
30 | Body=body,
31 | ContentType="application/json",
32 | )
33 |
34 | spans = exporter.get_finished_spans()
35 |
36 | meta_span = spans[0]
37 | assert meta_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == endpoint_name
38 | assert meta_span.attributes[SpanAttributes.TRACELOOP_ENTITY_INPUT] == body
39 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Together AI Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing calls to any of Together AI's endpoints sent with the official [Together AI Library](https://github.com/togethercomputer/together-python).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-together
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.together import TogetherAiInstrumentor
19 |
20 | TogetherAiInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/opentelemetry/instrumentation/together/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/opentelemetry/instrumentation/together/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from opentelemetry.instrumentation.together.config import Config
3 | import traceback
4 |
5 |
6 | def dont_throw(func):
7 | """
8 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
9 |
10 | @param func: The function to wrap
11 | @return: The wrapper function
12 | """
13 | # Obtain a logger specific to the function's module
14 | logger = logging.getLogger(func.__module__)
15 |
16 | def wrapper(*args, **kwargs):
17 | try:
18 | return func(*args, **kwargs)
19 | except Exception as e:
20 | logger.debug(
21 | "OpenLLMetry failed to trace in %s, error: %s",
22 | func.__name__,
23 | traceback.format_exc(),
24 | )
25 | if Config.exception_logger:
26 | Config.exception_logger(e)
27 |
28 | return wrapper
29 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/opentelemetry/instrumentation/together/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | asyncio_mode=auto
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import os
4 | import pytest
5 | from opentelemetry import trace
6 | from opentelemetry.sdk.trace import TracerProvider
7 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
8 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
9 | from opentelemetry.instrumentation.together import TogetherAiInstrumentor
10 |
11 | pytest_plugins = []
12 |
13 |
14 | @pytest.fixture(scope="session")
15 | def exporter():
16 | exporter = InMemorySpanExporter()
17 | processor = SimpleSpanProcessor(exporter)
18 |
19 | provider = TracerProvider()
20 | provider.add_span_processor(processor)
21 | trace.set_tracer_provider(provider)
22 |
23 | TogetherAiInstrumentor().instrument()
24 |
25 | return exporter
26 |
27 |
28 | @pytest.fixture(autouse=True)
29 | def clear_exporter(exporter):
30 | exporter.clear()
31 |
32 |
33 | @pytest.fixture(autouse=True)
34 | def environment():
35 | if "TOGETHER_API_KEY" not in os.environ:
36 | os.environ["TOGETHER_API_KEY"] = "test_api_key"
37 |
38 |
39 | @pytest.fixture(scope="module")
40 | def vcr_config():
41 | return {"filter_headers": ["authorization"], "decode_compressed_response": True}
42 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/tests/test_chat.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | from together import Together
4 |
5 |
6 | @pytest.mark.vcr
7 | def test_together_chat(exporter):
8 | client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
9 | response = client.chat.completions.create(
10 | model="mistralai/Mixtral-8x7B-Instruct-v0.1",
11 | messages=[{"role": "user", "content": "Tell me a joke about OpenTelemetry."}],
12 | )
13 |
14 | spans = exporter.get_finished_spans()
15 | together_span = spans[0]
16 | assert together_span.name == "together.chat"
17 | assert together_span.attributes.get("gen_ai.system") == "TogetherAI"
18 | assert together_span.attributes.get("llm.request.type") == "chat"
19 | assert (
20 | together_span.attributes.get("gen_ai.request.model")
21 | == "mistralai/Mixtral-8x7B-Instruct-v0.1"
22 | )
23 | assert (
24 | together_span.attributes.get("gen_ai.prompt.0.content")
25 | == "Tell me a joke about OpenTelemetry."
26 | )
27 | assert (
28 | together_span.attributes.get("gen_ai.completion.0.content")
29 | == response.choices[0].message.content
30 | )
31 | assert together_span.attributes.get("gen_ai.usage.prompt_tokens") == 18
32 | assert together_span.attributes.get(
33 | "llm.usage.total_tokens"
34 | ) == together_span.attributes.get(
35 | "gen_ai.usage.completion_tokens"
36 | ) + together_span.attributes.get(
37 | "gen_ai.usage.prompt_tokens"
38 | )
39 | assert together_span.attributes.get("gen_ai.response.id") == "88fa668fac30bb19-MXP"
40 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-together/tests/test_completion.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | from together import Together
4 |
5 |
6 | @pytest.mark.vcr
7 | def test_together_completion(exporter):
8 | client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
9 | response = client.completions.create(
10 | model="mistralai/Mixtral-8x7B-Instruct-v0.1",
11 | prompt="Tell me a joke about OpenTelemetry.",
12 | )
13 |
14 | spans = exporter.get_finished_spans()
15 | together_span = spans[0]
16 | assert together_span.name == "together.completion"
17 | assert together_span.attributes.get("gen_ai.system") == "TogetherAI"
18 | assert together_span.attributes.get("llm.request.type") == "completion"
19 | assert (
20 | together_span.attributes.get("gen_ai.request.model")
21 | == "mistralai/Mixtral-8x7B-Instruct-v0.1"
22 | )
23 | assert (
24 | together_span.attributes.get("gen_ai.prompt.0.content")
25 | == "Tell me a joke about OpenTelemetry."
26 | )
27 | assert (
28 | together_span.attributes.get("gen_ai.completion.0.content")
29 | == response.choices[0].text
30 | )
31 | assert together_span.attributes.get("gen_ai.usage.prompt_tokens") == 10
32 | assert together_span.attributes.get(
33 | "llm.usage.total_tokens"
34 | ) == together_span.attributes.get(
35 | "gen_ai.usage.completion_tokens"
36 | ) + together_span.attributes.get(
37 | "gen_ai.usage.prompt_tokens"
38 | )
39 | assert together_span.attributes.get("gen_ai.response.id") == "88fa66988e400e83-MXP"
40 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry HuggingFace Transformers Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing texte generation calls sent with the official [HuggingFace Transformers library](https://github.com/huggingface/transformers).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-transformers
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.transformers import TransformersInstrumentor
19 |
20 | TransformersInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/opentelemetry/instrumentation/transformers/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/opentelemetry/instrumentation/transformers/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 |
4 | from opentelemetry.instrumentation.transformers.config import Config
5 |
6 |
7 | def _with_tracer_wrapper(func):
8 | """Helper for providing tracer for wrapper functions."""
9 |
10 | def _with_tracer(tracer, to_wrap):
11 | def wrapper(wrapped, instance, args, kwargs):
12 | return func(tracer, to_wrap, wrapped, instance, args, kwargs)
13 |
14 | return wrapper
15 |
16 | return _with_tracer
17 |
18 |
19 | def dont_throw(func):
20 | """
21 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
22 |
23 | @param func: The function to wrap
24 | @return: The wrapper function
25 | """
26 | # Obtain a logger specific to the function's module
27 | logger = logging.getLogger(func.__module__)
28 |
29 | def wrapper(*args, **kwargs):
30 | try:
31 | return func(*args, **kwargs)
32 | except Exception as e:
33 | logger.debug(
34 | "OpenLLMetry failed to trace in %s, error: %s",
35 | func.__name__,
36 | traceback.format_exc(),
37 | )
38 | if Config.exception_logger:
39 | Config.exception_logger(e)
40 |
41 | return wrapper
42 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/opentelemetry/instrumentation/transformers/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/instrumentation/transformers"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-instrumentation-transformers"
11 | version = "0.40.7"
12 | description = "OpenTelemetry transformers instrumentation"
13 | authors = [
14 | "Gal Kleinman ",
15 | "Nir Gazit ",
16 | "Tomer Friedman ",
17 | ]
18 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-transformers"
19 | license = "Apache-2.0"
20 | readme = "README.md"
21 |
22 | [[tool.poetry.packages]]
23 | include = "opentelemetry/instrumentation/transformers"
24 |
25 | [tool.poetry.dependencies]
26 | python = ">=3.9,<4"
27 | opentelemetry-api = "^1.28.0"
28 | opentelemetry-instrumentation = ">=0.50b0"
29 | opentelemetry-semantic-conventions = ">=0.50b0"
30 | opentelemetry-semantic-conventions-ai = "0.4.9"
31 |
32 | [tool.poetry.group.dev.dependencies]
33 | autopep8 = "^2.2.0"
34 | flake8 = "7.0.0"
35 | pytest = "^8.2.2"
36 | pytest-sugar = "1.0.0"
37 |
38 | [build-system]
39 | requires = ["poetry-core"]
40 | build-backend = "poetry.core.masonry.api"
41 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | pytest_plugins = []
4 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-transformers/tests/test_placeholder.py:
--------------------------------------------------------------------------------
1 | def test_placeholder():
2 | pass
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry VertexAI Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing VertexAI prompts and completions sent with the official [VertexAI library](https://github.com/googleapis/python-aiplatform).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-vertexai
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
19 |
20 | VertexAIInstrumentor().instrument()
21 | ```
22 |
23 | ## Privacy
24 |
25 | **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26 |
27 | However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28 |
29 | To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30 |
31 | ```bash
32 | TRACELOOP_TRACE_CONTENT=false
33 | ```
34 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 |
4 | from opentelemetry.instrumentation.vertexai.config import Config
5 |
6 |
7 | def dont_throw(func):
8 | """
9 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
10 |
11 | @param func: The function to wrap
12 | @return: The wrapper function
13 | """
14 | # Obtain a logger specific to the function's module
15 | logger = logging.getLogger(func.__module__)
16 |
17 | def wrapper(*args, **kwargs):
18 | try:
19 | return func(*args, **kwargs)
20 | except Exception as e:
21 | logger.debug(
22 | "OpenLLMetry failed to trace in %s, error: %s",
23 | func.__name__,
24 | traceback.format_exc(),
25 | )
26 | if Config.exception_logger:
27 | Config.exception_logger(e)
28 |
29 | return wrapper
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
7 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8 | from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
9 |
10 | pytest_plugins = []
11 |
12 |
13 | @pytest.fixture(scope="session")
14 | def exporter():
15 | exporter = InMemorySpanExporter()
16 | processor = SimpleSpanProcessor(exporter)
17 |
18 | provider = TracerProvider()
19 | provider.add_span_processor(processor)
20 | trace.set_tracer_provider(provider)
21 |
22 | VertexAIInstrumentor().instrument()
23 |
24 | return exporter
25 |
26 |
27 | @pytest.fixture(autouse=True)
28 | def clear_exporter(exporter):
29 | exporter.clear()
30 |
31 |
32 | @pytest.fixture(scope="module")
33 | def vcr_config():
34 | return {"filter_headers": ["authorization"]}
35 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-vertexai/tests/test_placeholder.py:
--------------------------------------------------------------------------------
1 | def test_placeholder():
2 | pass
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/opentelemetry/instrumentation/watsonx/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/opentelemetry/instrumentation/watsonx/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 |
4 | from opentelemetry.instrumentation.watsonx.config import Config
5 |
6 |
7 | def dont_throw(func):
8 | """
9 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
10 |
11 | @param func: The function to wrap
12 | @return: The wrapper function
13 | """
14 | # Obtain a logger specific to the function's module
15 | logger = logging.getLogger(func.__module__)
16 |
17 | def wrapper(*args, **kwargs):
18 | try:
19 | return func(*args, **kwargs)
20 | except Exception as e:
21 | logger.debug(
22 | "OpenLLMetry failed to trace in %s, error: %s",
23 | func.__name__,
24 | traceback.format_exc(),
25 | )
26 | if Config.exception_logger:
27 | Config.exception_logger(e)
28 |
29 | return wrapper
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/opentelemetry/instrumentation/watsonx/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "opentelemetry-instrumentation-watsonx"
3 | version = "0.40.7"
4 | description = "OpenTelemetry IBM Watsonx Instrumentation"
5 | authors = ["Guangya Liu "]
6 | repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-watsonx"
7 | license = "Apache-2.0"
8 | readme = "README.md"
9 |
10 | [[tool.poetry.packages]]
11 | include = "opentelemetry/instrumentation/watsonx"
12 |
13 | [tool.poetry.dependencies]
14 | python = ">=3.9,<4"
15 | opentelemetry-api = "^1.28.0"
16 | opentelemetry-instrumentation = ">=0.50b0"
17 | opentelemetry-semantic-conventions = ">=0.50b0"
18 | opentelemetry-semantic-conventions-ai = "0.4.9"
19 |
20 | [tool.poetry.group.dev.dependencies]
21 | autopep8 = "^2.2.0"
22 | flake8 = "7.0.0"
23 | pytest = "^8.2.2"
24 | pytest-sugar = "1.0.0"
25 |
26 | [tool.poetry.group.test.dependencies]
27 | pytest = "^8.2.2"
28 | pytest-sugar = "1.0.0"
29 | vcrpy = "^6.0.1"
30 | pytest-recording = "^0.13.1"
31 | opentelemetry-sdk = "^1.27.0"
32 | pytest-asyncio = "^0.23.7"
33 | ibm-watson-machine-learning = "1.0.333"
34 |
35 | [build-system]
36 | requires = ["poetry-core"]
37 | build-backend = "poetry.core.masonry.api"
38 |
39 | [tool.poetry.extras]
40 | instruments = ["ibm-watson-machine-learning"]
41 |
42 | [tool.poetry.plugins."opentelemetry_instrumentor"]
43 | ibm-watson-machine-learning = "opentelemetry.instrumentation.watsonx:WatsonxInstrumentor"
44 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 |
5 |
6 | pytest_plugins = []
7 |
8 |
9 | @pytest.fixture
10 | def watson_ai_model():
11 | try:
12 | # Check for required package in the env, skip test if could not found
13 | from ibm_watsonx_ai.foundation_models import ModelInference
14 | except ImportError:
15 | print("no supported ibm_watsonx_ai package found, model creating skipped.")
16 | return None
17 |
18 | watsonx_ai_model = ModelInference(
19 | model_id="google/flan-ul2",
20 | project_id="c1234567-2222-2222-3333-444444444444",
21 | credentials={
22 | "apikey": "test_api_key",
23 | "url": "https://us-south.ml.cloud.ibm.com"
24 | },
25 | )
26 | return watsonx_ai_model
27 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/tests/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/tests/metrics/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import metrics
5 | from opentelemetry.sdk.resources import Resource
6 | from opentelemetry.sdk.metrics import MeterProvider
7 | from opentelemetry.sdk.metrics.export import InMemoryMetricReader
8 | from opentelemetry.instrumentation.watsonx import WatsonxInstrumentor
9 |
10 |
11 | @pytest.fixture(scope="session")
12 | def metrics_test_context():
13 | resource = Resource.create()
14 | reader = InMemoryMetricReader()
15 | provider = MeterProvider(metric_readers=[reader], resource=resource)
16 |
17 | metrics.set_meter_provider(provider)
18 |
19 | try:
20 | # Check for required package in the env, skip test if could not found
21 | from ibm_watsonx_ai.foundation_models import ModelInference
22 | # to avoid lint error
23 | del ModelInference
24 | WatsonxInstrumentor().instrument()
25 | except ImportError:
26 | print("no supported ibm_watsonx_ai package found, Watsonx instrumentation skipped.")
27 |
28 | return provider, reader
29 |
30 |
31 | @pytest.fixture(scope="session", autouse=True)
32 | def clear_metrics_test_context(metrics_test_context):
33 | provider, reader = metrics_test_context
34 |
35 | reader.shutdown()
36 | provider.shutdown()
37 |
38 |
39 | @pytest.fixture(scope="module")
40 | def vcr_config():
41 | return {
42 | "filter_headers": ["authorization"],
43 | "allow_playback_repeats": True,
44 | "decode_compressed_response": True,
45 | }
46 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/tests/traces/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-watsonx/tests/traces/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | import pytest
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
7 | from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8 | from opentelemetry.instrumentation.watsonx import WatsonxInstrumentor
9 |
10 |
11 | @pytest.fixture(scope="session")
12 | def exporter():
13 | exporter = InMemorySpanExporter()
14 | processor = SimpleSpanProcessor(exporter)
15 |
16 | provider = TracerProvider()
17 | provider.add_span_processor(processor)
18 | trace.set_tracer_provider(provider)
19 |
20 | try:
21 | # Check for required package in the env, skip test if could not found
22 | from ibm_watsonx_ai.foundation_models import ModelInference
23 | # to avoid lint error
24 | del ModelInference
25 | WatsonxInstrumentor().instrument()
26 | except ImportError:
27 | print("no supported ibm_watsonx_ai package found, Watsonx instrumentation skipped.")
28 |
29 | return exporter
30 |
31 |
32 | @pytest.fixture(autouse=True)
33 | def clear_exporter(exporter):
34 | exporter.clear()
35 |
36 |
37 | @pytest.fixture(scope="module")
38 | def vcr_config():
39 | return {
40 | "filter_headers": ["authorization"],
41 | "allow_playback_repeats": True,
42 | "decode_compressed_response": True,
43 | }
44 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-weaviate/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-weaviate/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-weaviate/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Weaviate Instrumentation
2 |
3 |
4 |
5 |
6 |
7 | This library allows tracing client-side calls to Weaviate vector DB sent with the official [Weaviate library](https://github.com/weaviate/weaviate-python-client).
8 |
9 | ## Installation
10 |
11 | ```bash
12 | pip install opentelemetry-instrumentation-weaviate
13 | ```
14 |
15 | ## Example usage
16 |
17 | ```python
18 | from opentelemetry.instrumentation.weaviate import WeaviateInstrumentor
19 |
20 | WeaviateInstrumentor().instrument()
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-weaviate/opentelemetry/instrumentation/weaviate/config.py:
--------------------------------------------------------------------------------
1 | class Config:
2 | exception_logger = None
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-weaviate/opentelemetry/instrumentation/weaviate/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 |
4 | from opentelemetry.instrumentation.weaviate.config import Config
5 |
6 |
7 | def dont_throw(func):
8 | """
9 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
10 |
11 | @param func: The function to wrap
12 | @return: The wrapper function
13 | """
14 | # Obtain a logger specific to the function's module
15 | logger = logging.getLogger(func.__module__)
16 |
17 | def wrapper(*args, **kwargs):
18 | try:
19 | return func(*args, **kwargs)
20 | except Exception as e:
21 | logger.debug(
22 | "OpenLLMetry failed to trace in %s, error: %s",
23 | func.__name__,
24 | traceback.format_exc(),
25 | )
26 | if Config.exception_logger:
27 | Config.exception_logger(e)
28 |
29 | return wrapper
30 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-weaviate/opentelemetry/instrumentation/weaviate/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-weaviate/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-instrumentation-weaviate/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Semantic Conventions extensions for gen-AI applications
2 |
3 |
4 |
5 |
6 |
7 | This is an extension of the standard [OpenTelemetry Semantic Conventions](https://github.com/open-telemetry/semantic-conventions) for gen AI applications. It defines additional attributes for spans that are useful for debugging and monitoring prompts, completions, token usage, etc.
8 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | def dont_throw(func):
5 | """
6 | A decorator that wraps the passed in function and logs exceptions instead of throwing them.
7 |
8 | @param func: The function to wrap
9 | @return: The wrapper function
10 | """
11 | # Obtain a logger specific to the function's module
12 | logger = logging.getLogger(func.__module__)
13 |
14 | def wrapper(*args, **kwargs):
15 | try:
16 | return func(*args, **kwargs)
17 | except Exception as e:
18 | logger.warning(
19 | "Failed to execute %s, error: %s", func.__name__, str(e)
20 | ) # TODO: posthog instead
21 | logger.warning(
22 | "Failed to set attributes for openai span, error: %s", str(e)
23 | )
24 |
25 | return wrapper
26 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.4.9"
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.coverage.run]
2 | branch = true
3 | source = ["opentelemetry/semconv/ai"]
4 |
5 | [tool.coverage.report]
6 | exclude_lines = ['if TYPE_CHECKING:']
7 | show_missing = true
8 |
9 | [tool.poetry]
10 | name = "opentelemetry-semantic-conventions-ai"
11 | version = "0.4.9"
12 | description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
13 | authors = [
14 | "Gal Kleinman ",
15 | "Nir Gazit ",
16 | "Tomer Friedman ",
17 | ]
18 | license = "Apache-2.0"
19 | readme = "README.md"
20 |
21 | [[tool.poetry.packages]]
22 | include = "opentelemetry/semconv_ai"
23 |
24 | [tool.poetry.dependencies]
25 | python = ">=3.9,<4"
26 |
27 | [tool.poetry.group.dev.dependencies]
28 | autopep8 = "^2.2.0"
29 | flake8 = "7.0.0"
30 | pytest = "^8.2.2"
31 | pytest-sugar = "1.0.0"
32 |
33 | [build-system]
34 | requires = ["poetry-core"]
35 | build-backend = "poetry.core.masonry.api"
36 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | pytest_plugins = []
4 |
--------------------------------------------------------------------------------
/packages/opentelemetry-semantic-conventions-ai/tests/test_placeholder.py:
--------------------------------------------------------------------------------
1 | def test_placeholder():
2 | pass
3 |
--------------------------------------------------------------------------------
/packages/sample-app/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 |
--------------------------------------------------------------------------------
/packages/sample-app/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/sample-app/README.md:
--------------------------------------------------------------------------------
1 | # sample-app
2 |
3 | Project description here.
4 |
--------------------------------------------------------------------------------
/packages/sample-app/data/vision/elephant.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/sample-app/data/vision/elephant.jpeg
--------------------------------------------------------------------------------
/packages/sample-app/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/__init__.py:
--------------------------------------------------------------------------------
1 | """Sample Application using Traceloop SDK"""
2 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/anthropic_joke_example.py:
--------------------------------------------------------------------------------
1 | from anthropic import Anthropic
2 |
3 | from traceloop.sdk import Traceloop
4 | from traceloop.sdk.decorators import workflow
5 |
6 | Traceloop.init()
7 |
8 |
9 | @workflow(name="pirate_joke_generator")
10 | def joke_workflow():
11 | anthropic = Anthropic()
12 | response = anthropic.messages.create(
13 | max_tokens=1024,
14 | messages=[
15 | {
16 | "role": "user",
17 | "content": "Tell me a joke about OpenTelemetry",
18 | }
19 | ],
20 | model="claude-3-opus-20240229",
21 | )
22 | print(response.content)
23 | return response
24 |
25 |
26 | joke_workflow()
27 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/anthropic_joke_streaming_example.py:
--------------------------------------------------------------------------------
1 | from anthropic import Anthropic
2 | from traceloop.sdk import Traceloop
3 | from traceloop.sdk.decorators import workflow
4 |
5 |
6 | Traceloop.init()
7 |
8 |
9 | @workflow(name="pirate_joke_streaming_generator")
10 | def joke_workflow():
11 | anthropic = Anthropic()
12 | stream = anthropic.messages.create(
13 | max_tokens=1024,
14 | messages=[
15 | {
16 | "role": "user",
17 | "content": "Tell me a joke about OpenTelemetry",
18 | }
19 | ],
20 | model="claude-3-haiku-20240307",
21 | stream=True,
22 | )
23 | response_content = ""
24 | for event in stream:
25 | if event.type == 'content_block_delta' and event.delta.type == 'text_delta':
26 | response_content += event.delta.text
27 | print(response_content)
28 | return stream
29 |
30 |
31 | joke_workflow()
32 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/azure_openai.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import AzureOpenAI
3 | from traceloop.sdk import Traceloop
4 |
5 | Traceloop.init()
6 |
7 | client = AzureOpenAI(
8 | api_key=os.getenv("AZURE_OPENAI_API_KEY"),
9 | api_version="2024-02-01",
10 | azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
11 | )
12 |
13 | deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_ID")
14 |
15 | response = client.chat.completions.create(
16 | model=deployment_name,
17 | messages=[{"role": "user", "content": "Tell me a joke about OpenTelemetry"}],
18 | max_tokens=50,
19 | )
20 | print(response.choices[0].message.content)
21 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/bedrock_example_app.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import json
3 |
4 | from traceloop.sdk import Traceloop
5 | from traceloop.sdk.decorators import task, workflow
6 |
7 | Traceloop.init(app_name="joke_generation_service")
8 | brt = boto3.client(service_name='bedrock-runtime')
9 |
10 |
11 | @task(name="joke_creation")
12 | def create_joke():
13 |
14 | body = json.dumps({
15 | "prompt": "Tell me a joke about opentelemetry",
16 | "max_tokens": 200,
17 | "temperature": 0.5,
18 | "p": 0.5,
19 | })
20 |
21 | response = brt.invoke_model(
22 | body=body,
23 | modelId='cohere.command-text-v14',
24 | accept='application/json',
25 | contentType='application/json'
26 | )
27 |
28 | response_body = json.loads(response.get('body').read())
29 |
30 | return response_body.get('generations')[0].get('text')
31 |
32 |
33 | @workflow(name="pirate_joke_generator")
34 | def joke_workflow():
35 | print(create_joke())
36 |
37 |
38 | joke_workflow()
39 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/classes_decorated_app.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from openai import OpenAI
4 |
5 | from traceloop.sdk import Traceloop
6 | from traceloop.sdk.decorators import agent, workflow
7 |
8 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
9 |
10 | Traceloop.init(app_name="joke_generation_service")
11 |
12 |
13 | @agent(name="base_joke_generator", method_name="generate_joke")
14 | class JokeAgent:
15 | def generate_joke(self):
16 | completion = client.chat.completions.create(
17 | model="gpt-3.5-turbo",
18 | messages=[{"role": "user", "content": "Tell me a joke about Donald Trump"}],
19 | )
20 |
21 | return completion.choices[0].message.content
22 |
23 |
24 | @agent(method_name="generate_joke")
25 | class PirateJokeAgent(JokeAgent):
26 | def generate_joke(self):
27 | completion = client.chat.completions.create(
28 | model="gpt-3.5-turbo",
29 | messages=[
30 | {"role": "system", "content": "You are a funny sarcastic pirate"},
31 | {"role": "user", "content": "Tell me a joke about Donald Trump"},
32 | ],
33 | )
34 |
35 | return completion.choices[0].message.content
36 |
37 |
38 | @workflow(name="jokes_generation")
39 | def joke_generator():
40 | Traceloop.set_association_properties({"user_id": "user_12345"})
41 |
42 | print(f"Simple Joke: {JokeAgent().generate_joke()}")
43 | print(f"Pirate Joke: {PirateJokeAgent().generate_joke()}")
44 |
45 |
46 | joke_generator()
47 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/crewai_example.py:
--------------------------------------------------------------------------------
1 | from crewai import Agent, Task, Crew
2 | from langchain_openai import ChatOpenAI
3 | from traceloop.sdk import Traceloop
4 |
5 | Traceloop.init(app_name="crewai-example")
6 |
7 | llm = ChatOpenAI(model="gpt-4o-mini")
8 |
9 | researcher = Agent(
10 | role='Research Analyst',
11 | goal='Conduct detailed market research',
12 | backstory='Expert in market analysis with keen attention to detail',
13 | llm=llm,
14 | verbose=True
15 | )
16 |
17 | writer = Agent(
18 | role='Content Writer',
19 | goal='Create engaging content based on research',
20 | backstory='Experienced writer with expertise in creating compelling narratives',
21 | llm=llm,
22 | verbose=True
23 | )
24 |
25 | research_task = Task(
26 | description='Research the latest trends in AI and machine learning',
27 | agent=researcher,
28 | expected_output='A comprehensive report on current AI and machine learning trends',
29 | expected_output_type=str
30 | )
31 |
32 | writing_task = Task(
33 | description='Write a blog post about AI trends based on the research',
34 | agent=writer,
35 | expected_output='An engaging blog post covering the latest AI trends',
36 | expected_output_type=str
37 | )
38 |
39 | crew = Crew(
40 | agents=[researcher, writer],
41 | tasks=[research_task, writing_task],
42 | verbose=True
43 | )
44 |
45 | result = crew.kickoff()
46 | print("Final Result:", result)
47 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/gemini.py:
--------------------------------------------------------------------------------
1 | import os
2 | import asyncio
3 | import google.generativeai as genai
4 | from traceloop.sdk import Traceloop
5 | from traceloop.sdk.decorators import workflow
6 |
7 | Traceloop.init(app_name="gemini_example")
8 |
9 | genai.configure(api_key=os.environ.get("GENAI_API_KEY"))
10 |
11 |
12 | @workflow("predict")
13 | def predict_text() -> str:
14 | """Ideation example with a Large Language Model"""
15 |
16 | model = genai.GenerativeModel("gemini-1.5-pro-002")
17 | response = model.generate_content(
18 | "Give me ten interview questions for the role of program manager.",
19 | )
20 |
21 | return response.text
22 |
23 |
24 | @workflow("predict_async")
25 | async def async_predict_text() -> str:
26 | """Async Ideation example with a Large Language Model"""
27 |
28 | model = genai.GenerativeModel("gemini-1.5-pro-002")
29 | response = await model.generate_content_async(
30 | "Give me ten interview questions for the role of program manager.",
31 | )
32 |
33 | return response.text
34 |
35 |
36 | @workflow("chat")
37 | def chat() -> str:
38 | """Chat example with a Large Language Model"""
39 |
40 | model = genai.GenerativeModel("gemini-1.5-pro-002")
41 | chat = model.start_chat()
42 | response = chat.send_message("Hello, how are you?")
43 | response = chat.send_message("What is the capital of France?")
44 |
45 | return response.text
46 |
47 |
48 | if __name__ == "__main__":
49 | print(chat())
50 | print(predict_text())
51 | print(asyncio.run(async_predict_text()))
52 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/groq_example.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from traceloop.sdk.decorators import task, workflow
4 |
5 | from groq import Groq
6 | from traceloop.sdk import Traceloop
7 |
8 | Traceloop.init(app_name="groq_example")
9 |
10 | client = Groq(
11 | api_key=os.environ.get("GROQ_API_KEY"),
12 | )
13 |
14 |
15 | @task(name="generate_joke")
16 | def generate_joke():
17 | completion = client.chat.completions.create(
18 | messages=[
19 | {
20 | "role": "user",
21 | "content": "Tell me a joke about OpenTelemetry",
22 | }
23 | ],
24 | model="llama3-8b-8192",
25 | )
26 |
27 | return completion.choices[0].message.content
28 |
29 |
30 | @workflow(name="joke_generator")
31 | def joke_generator():
32 | joke = generate_joke()
33 | print(joke)
34 |
35 |
36 | joke_generator()
37 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/langchain_agent.py:
--------------------------------------------------------------------------------
1 | from langchain.agents import AgentType
2 | from langchain.agents import initialize_agent, Tool
3 | from langchain.chains import LLMMathChain
4 | from langchain_community.llms.openai import OpenAI
5 | from langchain.utilities import DuckDuckGoSearchAPIWrapper
6 |
7 |
8 | from traceloop.sdk import Traceloop
9 |
10 | Traceloop.init(app_name="langchain_agent")
11 |
12 |
13 | def langchain_app():
14 | llm = OpenAI(temperature=0, streaming=True)
15 | search = DuckDuckGoSearchAPIWrapper()
16 | llm_math_chain = LLMMathChain.from_llm(llm)
17 | tools = [
18 | Tool(
19 | name="Search",
20 | func=search.run,
21 | description="useful for when you need to answer questions about "
22 | + "current events. You should ask targeted questions",
23 | ),
24 | Tool(
25 | name="Calculator",
26 | func=llm_math_chain.run,
27 | description="useful for when you need to answer questions about math",
28 | ),
29 | ]
30 |
31 | # Initialize agent
32 | mrkl = initialize_agent(
33 | tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
34 | )
35 |
36 | print(
37 | mrkl.run(
38 | "What is the full name of the artist who recently released an album called "
39 | + "'The Storm Before the Calm' and are they in the FooBar database? "
40 | + "If so, what albums of theirs are in the FooBar database?"
41 | )
42 | )
43 |
44 |
45 | langchain_app()
46 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/langchain_app.py:
--------------------------------------------------------------------------------
1 | from langchain_core.messages import SystemMessage, HumanMessage
2 | from langchain_core.prompts import ChatPromptTemplate
3 | from langchain_openai import ChatOpenAI
4 | from langchain_core.output_parsers import StrOutputParser
5 |
6 | from traceloop.sdk import Traceloop
7 |
8 | Traceloop.init(app_name="langchain_example")
9 |
10 |
11 | def langchain_app():
12 | chat = ChatOpenAI(temperature=0)
13 |
14 | # Step 1: Get a joke about OpenTelemetry
15 | joke_prompt = ChatPromptTemplate.from_messages([
16 | SystemMessage(content="You are a funny sarcastic nerd."),
17 | HumanMessage(content="Tell me a joke about {subject}.")
18 | ])
19 |
20 | # Get the joke
21 | subject = "OpenTelemetry"
22 | joke_chain = joke_prompt | chat | StrOutputParser()
23 | joke = joke_chain.invoke({"subject": subject})
24 |
25 | print(f"Generated joke: {joke}")
26 |
27 | # Step 2: Translate the joke to Sindarin
28 | translation_prompt = ChatPromptTemplate.from_messages([
29 | SystemMessage(content="You are an Elf."),
30 | HumanMessage(content=f"Translate this joke into Sindarin language:\n{joke}")
31 | ])
32 |
33 | # Get the translation
34 | translation_chain = translation_prompt | chat | StrOutputParser()
35 | translation = translation_chain.invoke({})
36 |
37 | result = {
38 | "subject": subject,
39 | "joke": joke,
40 | "text": translation
41 | }
42 |
43 | print(result)
44 |
45 |
46 | langchain_app()
47 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/langchain_lcel.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from langchain.prompts import ChatPromptTemplate
3 | from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
4 | from langchain_community.utils.openai_functions import (
5 | convert_pydantic_to_openai_function,
6 | )
7 | from langchain_openai import ChatOpenAI
8 | from pydantic import BaseModel, Field
9 |
10 |
11 | from traceloop.sdk import Traceloop
12 |
13 | Traceloop.init(app_name="lcel_example")
14 |
15 |
16 | class Joke(BaseModel):
17 | """Joke to tell user."""
18 |
19 | setup: str = Field(description="question to set up a joke")
20 | punchline: str = Field(description="answer to resolve the joke")
21 |
22 |
23 | async def chain():
24 | openai_functions = [convert_pydantic_to_openai_function(Joke)]
25 |
26 | prompt = ChatPromptTemplate.from_messages(
27 | [("system", "You are helpful assistant"), ("user", "{input}")]
28 | )
29 | model = ChatOpenAI(model="gpt-3.5-turbo")
30 | output_parser = JsonOutputFunctionsParser()
31 |
32 | chain = prompt | model.bind(functions=openai_functions) | output_parser
33 | return await chain.ainvoke(
34 | {"input": "tell me a short joke"}, {"metadata": {"user_id": "1234"}}
35 | )
36 |
37 |
38 | print(asyncio.run(chain()))
39 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/langchain_watsonx.py:
--------------------------------------------------------------------------------
1 | import os
2 | from langchain_core.prompts import PromptTemplate
3 | from langchain.chains import LLMChain
4 | from langchain_ibm import WatsonxLLM
5 |
6 | from traceloop.sdk import Traceloop
7 |
8 | Traceloop.init(app_name="langchain_watsonx")
9 |
10 | parameters = {
11 | "decoding_method": "sample",
12 | "max_new_tokens": 100,
13 | "min_new_tokens": 1,
14 | "temperature": 0.5,
15 | "top_k": 50,
16 | "top_p": 1,
17 | }
18 |
19 | watsonx_llm = WatsonxLLM(
20 | model_id="ibm/granite-13b-instruct-v2",
21 | url="https://us-south.ml.cloud.ibm.com",
22 | project_id=os.environ.get("WATSONX_PROJECT_ID"),
23 | params=parameters,
24 | )
25 |
26 | template = "Generate a random question about {topic}: Question: "
27 | prompt = PromptTemplate.from_template(template)
28 |
29 | llm_chain = LLMChain(prompt=prompt, llm=watsonx_llm)
30 | print(llm_chain.invoke("dog"))
31 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/litellm_example.py:
--------------------------------------------------------------------------------
1 | import litellm
2 |
3 |
4 | def test_traceloop_logging():
5 | try:
6 | litellm.success_callback = ["traceloop"]
7 | from traceloop.sdk import Traceloop
8 | from traceloop.sdk.instruments import Instruments
9 |
10 | Traceloop.init(app_name="...", instruments=set([Instruments.OPENAI]))
11 | litellm.set_verbose = False
12 | response = litellm.completion(
13 | model="gpt-3.5-turbo",
14 | messages=[{"role": "user", "content": "This is a test"}],
15 | max_tokens=10,
16 | temperature=0.7,
17 | timeout=5,
18 | )
19 | print(f"response: {response}")
20 | except Exception:
21 | pass
22 |
23 |
24 | test_traceloop_logging()
25 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/llama_index_chroma_app.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import chromadb
4 | import os
5 |
6 | from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext
7 | from llama_index.vector_stores.chroma import ChromaVectorStore
8 | from llama_index.embeddings.huggingface import HuggingFaceEmbedding
9 | from traceloop.sdk import Traceloop
10 |
11 | os.environ["TOKENIZERS_PARALLELISM"] = "false"
12 |
13 | Traceloop.init(app_name="llama_index_example")
14 |
15 | chroma_client = chromadb.EphemeralClient()
16 | chroma_collection = chroma_client.create_collection("quickstart")
17 |
18 | # define embedding function
19 | embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
20 |
21 | # load documents
22 | documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
23 |
24 | # set up ChromaVectorStore and load in data
25 | vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
26 | storage_context = StorageContext.from_defaults(vector_store=vector_store)
27 | index = VectorStoreIndex.from_documents(
28 | documents, storage_context=storage_context, embed_model=embed_model
29 | )
30 |
31 |
32 | async def main():
33 | # Query Data
34 | query_engine = index.as_query_engine()
35 | response = await query_engine.aquery("What did the author do growing up?")
36 |
37 | print(response)
38 |
39 |
40 | if __name__ == "__main__":
41 | asyncio.run(main())
42 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/llama_index_workflow_app.py:
--------------------------------------------------------------------------------
1 | from llama_index.core.workflow import (
2 | Event,
3 | StartEvent,
4 | StopEvent,
5 | Workflow,
6 | step,
7 | )
8 | from llama_index.llms.openai import OpenAI
9 | from traceloop.sdk import Traceloop
10 |
11 |
12 | class JokeEvent(Event):
13 | joke: str
14 |
15 |
16 | class JokeFlow(Workflow):
17 | llm = OpenAI()
18 |
19 | @step
20 | async def generate_joke(self, ev: StartEvent) -> JokeEvent:
21 | topic = ev.topic
22 |
23 | prompt = f"Write your best joke about {topic}."
24 | response = await self.llm.acomplete(prompt)
25 | return JokeEvent(joke=str(response))
26 |
27 | @step
28 | async def critique_joke(self, ev: JokeEvent) -> StopEvent:
29 | joke = ev.joke
30 |
31 | prompt = f"Give a thorough analysis and critique of the following joke: {joke}"
32 | response = await self.llm.acomplete(prompt)
33 | return StopEvent(result=str(response))
34 |
35 |
36 | async def main():
37 | Traceloop.init(app_name="llama_index_workflow_app")
38 |
39 | w = JokeFlow(timeout=60, verbose=False)
40 | result = await w.run(topic="pirates")
41 | print(str(result))
42 |
43 |
44 | if __name__ == "__main__":
45 | import asyncio
46 |
47 | asyncio.run(main())
48 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/ollama_streaming.py:
--------------------------------------------------------------------------------
1 | from traceloop.sdk import Traceloop
2 | from ollama import chat
3 |
4 | Traceloop.init()
5 |
6 | base_model = "gemma3:1b"
7 |
8 |
9 | def ollama_chat():
10 | stream_response = chat(
11 | model=base_model,
12 | messages=[
13 | {
14 | 'role': 'user',
15 | 'content': 'Tell a joke about opentelemetry'
16 | },
17 | ],
18 | stream=True
19 | )
20 |
21 | full_response = ""
22 | print("Streaming response:")
23 | for chunk in stream_response:
24 | if chunk.message and chunk.message.content:
25 | print(chunk.message.content, end="", flush=True)
26 | full_response += chunk.message.content
27 |
28 |
29 | def main():
30 | ollama_chat()
31 |
32 |
33 | if __name__ == "__main__":
34 | main()
35 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/openai_assistant.py:
--------------------------------------------------------------------------------
1 | import time
2 | from openai import OpenAI
3 | from traceloop.sdk import Traceloop
4 |
5 | Traceloop.init()
6 |
7 | client = OpenAI()
8 |
9 | assistant = client.beta.assistants.create(
10 | name="Math Tutor",
11 | instructions="You are a personal math tutor. Write and run code to answer math questions.",
12 | tools=[{"type": "code_interpreter"}],
13 | model="gpt-4-turbo-preview",
14 | )
15 |
16 | thread = client.beta.threads.create()
17 |
18 | message = client.beta.threads.messages.create(
19 | thread_id=thread.id,
20 | role="user",
21 | content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
22 | )
23 |
24 | run = client.beta.threads.runs.create(
25 | thread_id=thread.id,
26 | assistant_id=assistant.id,
27 | instructions="Please address the user as Jane Doe. The user has a premium account.",
28 | )
29 |
30 | while run.status in ["queued", "in_progress", "cancelling"]:
31 | time.sleep(1) # Wait for 1 second
32 | run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
33 |
34 | if run.status == "completed":
35 | messages = client.beta.threads.messages.list(thread_id=thread.id, order="asc")
36 | for data in messages.data:
37 | print(f"{data.role}: {data.content[0].text.value}")
38 | else:
39 | print(run.status)
40 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/openai_streaming.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 |
5 | from traceloop.sdk import Traceloop
6 | from traceloop.sdk.decorators import workflow
7 |
8 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
9 |
10 | Traceloop.init(app_name="story_service")
11 |
12 |
13 | @workflow(name="streaming_story")
14 | def joke_workflow():
15 | stream = client.chat.completions.create(
16 | model="gpt-4o-2024-05-13",
17 | messages=[{"role": "user", "content": "Tell me a story about opentelemetry"}],
18 | stream=True,
19 | )
20 |
21 | for part in stream:
22 | print(part.choices[0].delta.content or "", end="")
23 | print()
24 |
25 |
26 | joke_workflow()
27 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/openai_structured_outputs.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 | from pydantic import BaseModel
4 | from traceloop.sdk import Traceloop
5 | from traceloop.sdk.decorators import workflow, task
6 |
7 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
8 |
9 | Traceloop.init(app_name="story_service")
10 |
11 |
12 | class StoryStructure(BaseModel):
13 | setting: str
14 | protagonist: str
15 | problem: str
16 | resolution: str
17 | moral: str
18 |
19 |
20 | @task()
21 | def build_joke():
22 | result = client.chat.completions.create(
23 | model="gpt-3.5-turbo",
24 | messages=[{"role": "user", "content": "Tell me a story about opentelemetry"}],
25 | )
26 |
27 | return result.choices[0].message.content
28 |
29 |
30 | @task()
31 | def build_joke_structure(joke: str):
32 | result = client.beta.chat.completions.parse(
33 | model="gpt-4o-2024-08-06",
34 | messages=[
35 | {
36 | "role": "system",
37 | "content": "Extract the story structure from the following.",
38 | },
39 | {"role": "user", "content": joke},
40 | ],
41 | response_format=StoryStructure,
42 | )
43 |
44 | return result.choices[0].message.parsed
45 |
46 |
47 | @workflow(name="joke_structure")
48 | def joke_structure():
49 | joke = build_joke()
50 | return build_joke_structure(joke)
51 |
52 |
53 | structure = joke_structure()
54 | print(structure)
55 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/prompt_registry_example_app.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 |
5 | from traceloop.sdk import Traceloop
6 | from traceloop.sdk.decorators import task, workflow
7 | from traceloop.sdk.prompts import get_prompt
8 |
9 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
10 |
11 | Traceloop.init(app_name="prompt_registry_example_app")
12 |
13 |
14 | @task(name="generate_joke")
15 | def generate_pirate_joke():
16 | prompt_args = get_prompt(key="joke_generator", variables={"persona": "pirate"})
17 | completion = client.chat.completions.create(**prompt_args)
18 |
19 | return completion.choices[0].message.content
20 |
21 |
22 | @workflow(name="joke_generation_using_prompt_registry")
23 | def generate_joke():
24 | print(generate_pirate_joke())
25 |
26 |
27 | generate_joke()
28 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/prompt_registry_vision.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 |
5 | from traceloop.sdk import Traceloop
6 | from traceloop.sdk.decorators import task, workflow
7 | from traceloop.sdk.prompts import get_prompt
8 |
9 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
10 |
11 | Traceloop.init(app_name="prompt_registry_vision")
12 |
13 |
14 | @task(name="describe_picture")
15 | def describe_picture():
16 | prompt_args = get_prompt(key="vision", variables={"words": 2})
17 | completion = client.chat.completions.create(**prompt_args)
18 |
19 | return completion.choices[0].message.content
20 |
21 |
22 | @workflow(name="picture_description")
23 | def picture_description():
24 | print(describe_picture())
25 |
26 |
27 | picture_description()
28 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/replicate_functions.py:
--------------------------------------------------------------------------------
1 | import replicate
2 |
3 | from traceloop.sdk import Traceloop
4 | from traceloop.sdk.decorators import task, workflow
5 |
6 | Traceloop.init(app_name="image_generation_service")
7 |
8 |
9 | @task(name="image_generation")
10 | def generate_image():
11 | images = replicate.run(
12 | "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4",
13 | input={"prompt": "tiny robot"},
14 | )
15 | return images
16 |
17 |
18 | @workflow(name="robot_image_generator")
19 | def image_workflow():
20 | print(generate_image())
21 |
22 |
23 | image_workflow()
24 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/replicate_streaming.py:
--------------------------------------------------------------------------------
1 | import replicate
2 |
3 | from traceloop.sdk import Traceloop
4 | from traceloop.sdk.decorators import task, workflow
5 |
6 | Traceloop.init(app_name="chat_stream_generation_service")
7 |
8 |
9 | @task(name="chat_stream_generation")
10 | def generate_chat_stream():
11 | model_version = "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
12 | chat_stream = replicate.stream(model_version, input={"prompt": "tiny robot"})
13 | for event in chat_stream:
14 | print(str(event), end="")
15 |
16 |
17 | @workflow(name="chat_stream_generator")
18 | def chat_stream_workflow():
19 | generate_chat_stream()
20 |
21 |
22 | chat_stream_workflow()
23 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/thread_pool_example.py:
--------------------------------------------------------------------------------
1 | import pinecone
2 | from traceloop.sdk import Traceloop
3 | from traceloop.sdk.decorators import workflow
4 | from concurrent.futures import ThreadPoolExecutor
5 | import contextvars
6 | import functools
7 |
8 | Traceloop.init("thread_pool_example")
9 |
10 |
11 | @workflow("retrieval_flow")
12 | def do_retrieval(index: pinecone.Index):
13 | with ThreadPoolExecutor(max_workers=3) as executor:
14 | for _ in range(3):
15 | # Note: this is needed instead of calling `submit` directly, like this:
16 | # executor.submit(index.query, [1.0, 2.0, 3.0], top_k=10)
17 | ctx = contextvars.copy_context()
18 | executor.submit(
19 | ctx.run,
20 | functools.partial(index.query, [1.0, 2.0, 3.0], top_k=10),
21 | )
22 |
23 |
24 | def get_index():
25 | INDEX_NAME = "thread-pool-repro"
26 | if INDEX_NAME not in pinecone.list_indexes():
27 | pinecone.create_index(name=INDEX_NAME, dimension=3, metric="dotproduct")
28 | return pinecone.Index(INDEX_NAME)
29 |
30 |
31 | def main():
32 | index = get_index()
33 | do_retrieval(index)
34 |
35 |
36 | if __name__ == "__main__":
37 | main()
38 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/vertexai_streaming.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import vertexai
3 | from traceloop.sdk import Traceloop
4 | from traceloop.sdk.decorators import workflow
5 | from vertexai.generative_models import GenerativeModel
6 |
7 | Traceloop.init(app_name="stream_prediction_service")
8 |
9 | vertexai.init()
10 |
11 |
12 | @workflow("stream_prediction")
13 | async def streaming_prediction() -> str:
14 | """Streaming Text Example with a Large Language Model"""
15 |
16 | model = GenerativeModel(
17 | model_name="gemini-1.5-flash-001",
18 | system_instruction=[
19 | "You are a helpful language translator.",
20 | "Your mission is to translate text in English to French.",
21 | ],
22 | )
23 |
24 | prompt = """
25 | User input: I like bagels.
26 | Answer:
27 | """
28 |
29 | contents = [prompt]
30 |
31 | response = await model.generate_content_async(contents, stream=True)
32 | async for chunk in response:
33 | print(chunk.text)
34 |
35 |
36 | if __name__ == "__main__":
37 | asyncio.run(streaming_prediction())
38 |
--------------------------------------------------------------------------------
/packages/sample-app/sample_app/watsonx_generate.py:
--------------------------------------------------------------------------------
1 | import os
2 | from ibm_watson_machine_learning.foundation_models import Model
3 |
4 | from traceloop.sdk import Traceloop
5 |
6 | from dotenv import load_dotenv
7 | load_dotenv()
8 |
9 | os.environ['OTEL_EXPORTER_OTLP_INSECURE'] = 'True'
10 |
11 | Traceloop.init(app_name="watsonx_example")
12 |
13 |
14 | def get_credentials(api_key):
15 | return {
16 | "url": "https://us-south.ml.cloud.ibm.com",
17 | "apikey": api_key,
18 | }
19 |
20 |
21 | iam_api_key = os.environ["IAM_API_KEY"]
22 | project_id = os.environ["PROJECT_ID"]
23 |
24 | prompt_input = """Calculate result
25 |
26 | Input:
27 | what is the capital of China.
28 |
29 | Output:
30 | """
31 |
32 | model_id = "meta-llama/llama-2-70b-chat"
33 | parameters = {
34 | "decoding_method": "sample",
35 | "max_new_tokens": 60,
36 | "min_new_tokens": 10,
37 | "random_seed": 111,
38 | "temperature": 0.9,
39 | "top_k": 50,
40 | "top_p": 1,
41 | "repetition_penalty": 2
42 | }
43 |
44 | model = Model(
45 | model_id=model_id,
46 | params=parameters,
47 | credentials=get_credentials(iam_api_key),
48 | project_id=project_id
49 | )
50 |
51 | prompt_input = "What is the captical of China"
52 | print(prompt_input)
53 |
54 | generated_response = model.generate(prompt=prompt_input)
55 | print(generated_response["results"][0]["generated_text"])
56 |
--------------------------------------------------------------------------------
/packages/sample-app/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/sample-app/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Unit tests configuration module."""
2 |
3 | pytest_plugins = []
4 |
--------------------------------------------------------------------------------
/packages/sample-app/tests/test_placeholder.py:
--------------------------------------------------------------------------------
1 | def test_placeholder():
2 | pass
3 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | __pycache__,
5 | build,
6 | dist,
7 | .tox,
8 | venv,
9 | .venv,
10 | .pytest_cache
11 | max-line-length = 120
12 | per-file-ignores = __init__.py:F401
13 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/.python-version:
--------------------------------------------------------------------------------
1 | 3.9.5
2 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/README.md:
--------------------------------------------------------------------------------
1 | # traceloop-sdk
2 |
3 | Traceloop’s Python SDK allows you to easily start monitoring and debugging your LLM execution. Tracing is done in a non-intrusive way, built on top of OpenTelemetry. You can choose to export the traces to Traceloop, or to your existing observability stack.
4 |
5 | ```python
6 | Traceloop.init(app_name="joke_generation_service")
7 |
8 | @workflow(name="joke_creation")
9 | def create_joke():
10 | completion = openai.ChatCompletion.create(
11 | model="gpt-3.5-turbo",
12 | messages=[{"role": "user", "content": "Tell me a joke about opentelemetry"}],
13 | )
14 |
15 | return completion.choices[0].message.content
16 | ```
17 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """unit tests."""
2 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/tests/__pycache__/conftest.cpython-311-pytest-7.4.0.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/traceloop-sdk/tests/__pycache__/conftest.cpython-311-pytest-7.4.0.pyc
--------------------------------------------------------------------------------
/packages/traceloop-sdk/tests/test_nested_tasks.py:
--------------------------------------------------------------------------------
1 | from opentelemetry.semconv_ai import SpanAttributes
2 | from traceloop.sdk.decorators import task, workflow
3 | from pytest import raises
4 |
5 |
6 | def test_nested_tasks(exporter):
7 | @workflow(name="some_workflow")
8 | def some_workflow():
9 | return outer_task()
10 |
11 | @task(name="outer_task")
12 | def outer_task():
13 | return inner_task()
14 |
15 | @task(name="inner_task")
16 | def inner_task():
17 | return inner_inner_task()
18 |
19 | @task(name="inner_inner_task")
20 | def inner_inner_task():
21 | return
22 |
23 | some_workflow()
24 |
25 | spans = exporter.get_finished_spans()
26 | assert [span.name for span in spans] == [
27 | "inner_inner_task.task",
28 | "inner_task.task",
29 | "outer_task.task",
30 | "some_workflow.workflow",
31 | ]
32 |
33 | inner_inner_task_span = spans[0]
34 | inner_task_span = spans[1]
35 | outer_task_span = spans[2]
36 | some_workflow_span = spans[3]
37 |
38 | assert (
39 | inner_inner_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH] ==
40 | "outer_task.inner_task"
41 | )
42 | assert (
43 | inner_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH] == "outer_task"
44 | )
45 | with raises(KeyError):
46 | _ = outer_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH]
47 | _ = some_workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH]
48 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/tests/test_privacy_no_prompts.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 | from openai import OpenAI
5 | from opentelemetry.semconv_ai import SpanAttributes
6 | from traceloop.sdk.decorators import workflow, task
7 |
8 |
9 | @pytest.fixture(autouse=True)
10 | def disable_trace_content():
11 | os.environ["TRACELOOP_TRACE_CONTENT"] = "false"
12 | yield
13 | os.environ["TRACELOOP_TRACE_CONTENT"] = "true"
14 |
15 |
16 | @pytest.fixture
17 | def openai_client():
18 | return OpenAI()
19 |
20 |
21 | @pytest.mark.vcr
22 | def test_simple_workflow(exporter, openai_client):
23 | @task(name="joke_creation")
24 | def create_joke():
25 | completion = openai_client.chat.completions.create(
26 | model="gpt-3.5-turbo",
27 | messages=[
28 | {"role": "user", "content": "Tell me a joke about opentelemetry"}
29 | ],
30 | )
31 | return completion.choices[0].message.content
32 |
33 | @workflow(name="pirate_joke_generator")
34 | def joke_workflow():
35 | create_joke()
36 |
37 | joke_workflow()
38 |
39 | spans = exporter.get_finished_spans()
40 | assert [span.name for span in spans] == [
41 | "openai.chat",
42 | "joke_creation.task",
43 | "pirate_joke_generator.workflow",
44 | ]
45 | open_ai_span = spans[0]
46 | assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 15
47 | assert not open_ai_span.attributes.get(f"{SpanAttributes.LLM_PROMPTS}.0.content")
48 | assert not open_ai_span.attributes.get(
49 | f"{SpanAttributes.LLM_PROMPTS}.0.completions"
50 | )
51 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/annotation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/traceloop-sdk/traceloop/sdk/annotation/__init__.py
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/annotation/user_feedback.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | from traceloop.sdk.client.http import HTTPClient
4 | from .base_annotation import BaseAnnotation
5 |
6 |
7 | class UserFeedback(BaseAnnotation):
8 | def __init__(self, http: HTTPClient, app_name: str):
9 | super().__init__(http, app_name, "user_feedback")
10 |
11 |
12 | def create(
13 | self,
14 | annotation_task: str,
15 | entity_instance_id: str,
16 | tags: Dict[str, Any],
17 | ) -> None:
18 | """Create an annotation for a specific task.
19 |
20 | Args:
21 | annotation_task (str): The ID/slug of the annotation task to report to.
22 | Can be found at app.traceloop.com/annotation_tasks/:annotation_task_id
23 | entity_instance_id (str): The ID of the specific entity instance being annotated, should be reported
24 | in the association properties
25 | tags (Dict[str, Any]): Dictionary containing the tags to be reported.
26 | Should match the tags defined in the annotation task
27 |
28 | Example:
29 | ```python
30 | client = Client(api_key="your-key")
31 | client.annotation.create(
32 | annotation_task="task_123",
33 | entity_instance_id="instance_456",
34 | tags={
35 | "sentiment": "positive",
36 | "relevance": 0.95,
37 | "tones": ["happy", "nice"]
38 | },
39 | )
40 | ```
41 | """
42 |
43 | return BaseAnnotation.create(self, annotation_task, entity_instance_id, tags)
44 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/client/__init__.py:
--------------------------------------------------------------------------------
1 | from .client import Client
2 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/config/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 |
4 | def is_tracing_enabled() -> bool:
5 | return (os.getenv("TRACELOOP_TRACING_ENABLED") or "true").lower() == "true"
6 |
7 |
8 | def is_content_tracing_enabled() -> bool:
9 | return (os.getenv("TRACELOOP_TRACE_CONTENT") or "true").lower() == "true"
10 |
11 |
12 | def is_metrics_enabled() -> bool:
13 | return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true"
14 |
15 |
16 | def is_logging_enabled() -> bool:
17 | return (os.getenv("TRACELOOP_LOGGING_ENABLED") or "false").lower() == "true"
18 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/instruments.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class Instruments(Enum):
5 | ALEPHALPHA = "alephalpha"
6 | ANTHROPIC = "anthropic"
7 | BEDROCK = "bedrock"
8 | CHROMA = "chroma"
9 | COHERE = "cohere"
10 | CREW = "crew"
11 | GOOGLE_GENERATIVEAI = "google_generativeai"
12 | GROQ = "groq"
13 | HAYSTACK = "haystack"
14 | LANCEDB = "lancedb"
15 | LANGCHAIN = "langchain"
16 | LLAMA_INDEX = "llama_index"
17 | MARQO = "marqo"
18 | MCP = "mcp"
19 | MILVUS = "milvus"
20 | MISTRAL = "mistral"
21 | OLLAMA = "ollama"
22 | OPENAI = "openai"
23 | PINECONE = "pinecone"
24 | PYMYSQL = "pymysql"
25 | QDRANT = "qdrant"
26 | REDIS = "redis"
27 | REPLICATE = "replicate"
28 | REQUESTS = "requests"
29 | SAGEMAKER = "sagemaker"
30 | TOGETHER = "together"
31 | TRANSFORMERS = "transformers"
32 | URLLIB3 = "urllib3"
33 | VERTEXAI = "vertexai"
34 | WATSONX = "watsonx"
35 | WEAVIATE = "weaviate"
36 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/logging/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/traceloop-sdk/traceloop/sdk/logging/__init__.py
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/metrics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traceloop/openllmetry/6d13035c3f5723974782b16dfe8ab6624180970b/packages/traceloop-sdk/traceloop/sdk/metrics/__init__.py
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/prompts/__init__.py:
--------------------------------------------------------------------------------
1 | from traceloop.sdk.prompts.client import PromptRegistryClient
2 |
3 |
4 | def get_prompt(key, **args):
5 | return PromptRegistryClient().render_prompt(key, **args)
6 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/prompts/registry.py:
--------------------------------------------------------------------------------
1 | import typing
2 |
3 | from traceloop.sdk.prompts.model import Prompt
4 |
5 |
6 | class PromptRegistry:
7 | def __init__(self):
8 | self._prompts: typing.Dict[str, Prompt] = dict()
9 |
10 | def get_prompt_by_key(self, key):
11 | return self._prompts.get(key, None)
12 |
13 | def load(self, prompts_json: dict):
14 | for prompt_obj in prompts_json["prompts"]:
15 | self._prompts[prompt_obj["key"]] = Prompt(**prompt_obj)
16 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/tracing/__init__.py:
--------------------------------------------------------------------------------
1 | from traceloop.sdk.tracing.context_manager import get_tracer
2 | from traceloop.sdk.tracing.tracing import set_workflow_name
3 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/tracing/content_allow_list.py:
--------------------------------------------------------------------------------
1 | # Manages list of associated properties for which content tracing
2 | # (prompts, vector embeddings, etc.) is allowed.
3 | class ContentAllowList:
4 | def __new__(cls) -> "ContentAllowList":
5 | if not hasattr(cls, "instance"):
6 | obj = cls.instance = super(ContentAllowList, cls).__new__(cls)
7 | obj._allow_list: list[dict] = []
8 |
9 | return cls.instance
10 |
11 | def is_allowed(self, association_properties: dict) -> bool:
12 | for allow_list_item in self._allow_list:
13 | if all(
14 | [
15 | association_properties.get(key) == value
16 | for key, value in allow_list_item.items()
17 | ]
18 | ):
19 | return True
20 |
21 | return False
22 |
23 | def load(self, response_json: dict):
24 | self._allow_list = response_json["associationPropertyAllowList"]
25 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/tracing/context_manager.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 |
3 | from traceloop.sdk.tracing.tracing import TracerWrapper
4 |
5 |
6 | @contextmanager
7 | def get_tracer(flush_on_exit: bool = False):
8 | wrapper = TracerWrapper()
9 | try:
10 | yield wrapper.get_tracer()
11 | finally:
12 | if flush_on_exit:
13 | wrapper.flush()
14 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/utils/__init__.py:
--------------------------------------------------------------------------------
1 | def cameltosnake(camel_string: str) -> str:
2 | if not camel_string:
3 | return ""
4 | elif camel_string[0].isupper():
5 | return f"_{camel_string[0].lower()}{cameltosnake(camel_string[1:])}"
6 | else:
7 | return f"{camel_string[0]}{cameltosnake(camel_string[1:])}"
8 |
9 |
10 | def camel_to_snake(s):
11 | if len(s) <= 1:
12 | return s.lower()
13 |
14 | return cameltosnake(s[0].lower() + s[1:])
15 |
16 |
17 | def is_notebook():
18 | try:
19 | from IPython import get_ipython
20 |
21 | ip = get_ipython()
22 | if ip is None:
23 | return False
24 | return True
25 | except Exception:
26 | return False
27 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/utils/json_encoder.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | import json
3 |
4 |
5 | class JSONEncoder(json.JSONEncoder):
6 | def default(self, o):
7 | if isinstance(o, dict):
8 | if "callbacks" in o:
9 | del o["callbacks"]
10 | return o
11 | if dataclasses.is_dataclass(o):
12 | return dataclasses.asdict(o)
13 |
14 | if hasattr(o, "to_json"):
15 | return o.to_json()
16 |
17 | if hasattr(o, "json"):
18 | return o.json()
19 |
20 | if hasattr(o, "__class__"):
21 | return o.__class__.__name__
22 |
23 | return super().default(o)
24 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/utils/package_check.py:
--------------------------------------------------------------------------------
1 | from importlib.metadata import Distribution, distributions
2 |
3 |
4 | def _get_package_name(dist: Distribution) -> str | None:
5 | try:
6 | return dist.name.lower()
7 | except (KeyError, AttributeError):
8 | return None
9 |
10 |
11 | installed_packages = {name for dist in distributions() if (name := _get_package_name(dist)) is not None}
12 |
13 |
14 | def is_package_installed(package_name: str) -> bool:
15 | return package_name.lower() in installed_packages
16 |
--------------------------------------------------------------------------------
/packages/traceloop-sdk/traceloop/sdk/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.40.7"
2 |
--------------------------------------------------------------------------------
/scripts/build-release.sh:
--------------------------------------------------------------------------------
1 | if [ "$(uname)" = "Darwin" ]; then export SEP=" "; else SEP=""; fi
2 | VERSION=$(poetry version | awk '{print $2}')
3 | sed -i$SEP'' "s|{.*path.*|\"==$VERSION\"|" pyproject.toml
4 | poetry build
5 |
--------------------------------------------------------------------------------