├── pytest.ini ├── tests ├── __init__.py ├── configs │ ├── audio │ │ ├── single_provider │ │ │ └── single_provider.json │ │ ├── speech.mp3 │ │ ├── single_with_basic_config │ │ │ └── single_with_basic_config.json │ │ └── single_provider_with_vk_retry_cache │ │ │ └── single_provider_with_vk_retry_cache.json │ ├── images │ │ ├── single_provider │ │ │ └── single_provider.json │ │ ├── single_with_basic_config │ │ │ └── single_with_basic_config.json │ │ ├── single_provider_with_vk_retry_cache │ │ │ └── single_provider_with_vk_retry_cache.json │ │ └── loadbalance_with_two_apikeys │ │ │ └── loadbalance_with_two_apikeys.json │ ├── assistants │ │ ├── single_provider │ │ │ └── single_provider.json │ │ ├── single_with_basic_config │ │ │ └── single_with_basic_config.json │ │ └── single_provider_with_vk_retry_cache │ │ │ └── single_provider_with_vk_retry_cache.json │ ├── moderations │ │ ├── single_provider │ │ │ └── single_provider.json │ │ ├── single_with_basic_config │ │ │ └── single_with_basic_config.json │ │ └── single_provider_with_vk_retry_cache │ │ │ └── single_provider_with_vk_retry_cache.json │ ├── threads │ │ ├── single_provider │ │ │ └── single_provider.json │ │ ├── sample.pdf │ │ ├── single_with_basic_config │ │ │ └── single_with_basic_config.json │ │ └── single_provider_with_vk_retry_cache │ │ │ └── single_provider_with_vk_retry_cache.json │ ├── chat_completions │ │ ├── single_provider │ │ │ └── single_provider.json │ │ ├── single_with_basic_config │ │ │ └── single_with_basic_config.json │ │ ├── single_provider_with_vk_retry_cache │ │ │ └── single_provider_with_vk_retry_cache.json │ │ └── loadbalance_with_two_apikeys │ │ │ └── loadbalance_with_two_apikeys.json │ └── completions │ │ ├── single_provider │ │ └── single_provider.json │ │ ├── single_with_basic_config │ │ └── single_with_basic_config.json │ │ ├── single_provider_with_vk_retry_cache │ │ └── single_provider_with_vk_retry_cache.json │ │ └── loadbalance_with_two_apikeys │ │ └── loadbalance_with_two_apikeys.json ├── manual_test_feedback.py ├── utils.py └── manual_test_async_feedback.py ├── portkey_ai ├── py.typed ├── llms │ ├── __init__.py │ ├── llama_index │ │ └── __init__.py │ ├── mypy.ini │ └── langchain │ │ └── __init__.py ├── _vendor │ ├── __init__.py │ ├── openai │ │ ├── py.typed │ │ ├── cli │ │ │ ├── __init__.py │ │ │ ├── _api │ │ │ │ ├── __init__.py │ │ │ │ ├── fine_tuning │ │ │ │ │ └── __init__.py │ │ │ │ ├── chat │ │ │ │ │ └── __init__.py │ │ │ │ └── _main.py │ │ │ ├── _tools │ │ │ │ ├── __init__.py │ │ │ │ └── _main.py │ │ │ ├── _errors.py │ │ │ └── _models.py │ │ ├── __main__.py │ │ ├── version.py │ │ ├── lib │ │ │ ├── __init__.py │ │ │ ├── .keep │ │ │ ├── streaming │ │ │ │ ├── responses │ │ │ │ │ ├── _types.py │ │ │ │ │ └── __init__.py │ │ │ │ └── __init__.py │ │ │ └── _parsing │ │ │ │ └── __init__.py │ │ ├── types │ │ │ ├── beta │ │ │ │ ├── chat │ │ │ │ │ └── __init__.py │ │ │ │ ├── chatkit │ │ │ │ │ ├── chat_session_status.py │ │ │ │ │ ├── chat_session_rate_limits.py │ │ │ │ │ ├── chat_session_automatic_thread_titling.py │ │ │ │ │ ├── chat_session_rate_limits_param.py │ │ │ │ │ ├── chat_session_history.py │ │ │ │ │ ├── thread_delete_response.py │ │ │ │ │ ├── chat_session_file_upload.py │ │ │ │ │ └── chat_session_expires_after_param.py │ │ │ │ ├── threads │ │ │ │ │ ├── runs │ │ │ │ │ │ ├── run_step_include.py │ │ │ │ │ │ ├── code_interpreter_logs.py │ │ │ │ │ │ ├── tool_call.py │ │ │ │ │ │ ├── message_creation_step_details.py │ │ │ │ │ │ ├── run_step_delta_message_delta.py │ │ │ │ │ │ ├── tool_call_delta.py │ │ │ │ │ │ ├── run_step_delta_event.py │ │ │ │ │ │ └── tool_calls_step_details.py │ │ │ │ │ ├── message_deleted.py │ │ │ │ │ ├── text.py │ │ │ │ │ ├── refusal_content_block.py │ │ │ │ │ ├── text_content_block.py │ │ │ │ │ ├── run_status.py │ │ │ │ │ ├── image_file_content_block.py │ │ │ │ │ ├── image_url_content_block.py │ │ │ │ │ ├── text_delta.py │ │ │ │ │ ├── text_content_block_param.py │ │ │ │ │ ├── annotation.py │ │ │ │ │ ├── refusal_delta_block.py │ │ │ │ │ ├── image_file_content_block_param.py │ │ │ │ │ ├── image_url_content_block_param.py │ │ │ │ │ ├── text_delta_block.py │ │ │ │ │ ├── annotation_delta.py │ │ │ │ │ ├── image_url_delta_block.py │ │ │ │ │ ├── image_file_delta_block.py │ │ │ │ │ ├── message_content_part_param.py │ │ │ │ │ ├── message_delta.py │ │ │ │ │ ├── message_delta_event.py │ │ │ │ │ ├── file_path_annotation.py │ │ │ │ │ ├── image_url_delta.py │ │ │ │ │ ├── image_url.py │ │ │ │ │ ├── message_content_delta.py │ │ │ │ │ ├── image_url_param.py │ │ │ │ │ └── message_content.py │ │ │ │ ├── assistant_tool_choice_function.py │ │ │ │ ├── realtime │ │ │ │ │ ├── realtime_connect_params.py │ │ │ │ │ ├── input_audio_buffer_cleared_event.py │ │ │ │ │ ├── input_audio_buffer_clear_event.py │ │ │ │ │ ├── input_audio_buffer_commit_event.py │ │ │ │ │ ├── response_done_event.py │ │ │ │ │ ├── session_created_event.py │ │ │ │ │ ├── session_updated_event.py │ │ │ │ │ ├── conversation_item_deleted_event.py │ │ │ │ │ ├── input_audio_buffer_clear_event_param.py │ │ │ │ │ ├── input_audio_buffer_commit_event_param.py │ │ │ │ │ ├── response_created_event.py │ │ │ │ │ ├── conversation_item_delete_event.py │ │ │ │ │ ├── conversation_item_retrieve_event.py │ │ │ │ │ ├── conversation_item_delete_event_param.py │ │ │ │ │ └── conversation_item_retrieve_event_param.py │ │ │ │ ├── thread_deleted.py │ │ │ │ ├── assistant_deleted.py │ │ │ │ ├── code_interpreter_tool.py │ │ │ │ ├── assistant_tool_choice_option.py │ │ │ │ ├── assistant_tool_choice_function_param.py │ │ │ │ ├── code_interpreter_tool_param.py │ │ │ │ ├── function_tool.py │ │ │ │ ├── chatkit_upload_file_response.py │ │ │ │ ├── assistant_tool_choice_option_param.py │ │ │ │ ├── function_tool_param.py │ │ │ │ ├── chatkit_upload_file_params.py │ │ │ │ ├── assistant_tool_param.py │ │ │ │ ├── assistant_tool.py │ │ │ │ ├── assistant_tool_choice.py │ │ │ │ ├── assistant_response_format_option.py │ │ │ │ ├── assistant_tool_choice_param.py │ │ │ │ └── assistant_response_format_option_param.py │ │ │ ├── containers │ │ │ │ ├── files │ │ │ │ │ └── __init__.py │ │ │ │ ├── file_create_params.py │ │ │ │ └── __init__.py │ │ │ ├── chat_model.py │ │ │ ├── file_content.py │ │ │ ├── chat │ │ │ │ ├── completions │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── message_list_params.py │ │ │ │ ├── chat_completion_modality.py │ │ │ │ ├── chat_completion_reasoning_effort.py │ │ │ │ ├── chat_completion_role.py │ │ │ │ ├── chat_completion_function_call_option_param.py │ │ │ │ ├── chat_completion_content_part_text.py │ │ │ │ ├── chat_completion_tool_param.py │ │ │ │ ├── chat_completion_content_part_text_param.py │ │ │ │ ├── chat_completion_function_tool.py │ │ │ │ ├── chat_completion_message_tool_call_param.py │ │ │ │ ├── chat_completion_content_part_refusal_param.py │ │ │ │ ├── chat_completion_deleted.py │ │ │ │ ├── chat_completion_tool_union_param.py │ │ │ │ ├── chat_completion_function_tool_param.py │ │ │ │ ├── chat_completion_named_tool_choice_param.py │ │ │ │ ├── chat_completion_named_tool_choice_custom_param.py │ │ │ │ ├── chat_completion_message_tool_call_union_param.py │ │ │ │ └── chat_completion_function_message_param.py │ │ │ ├── audio │ │ │ │ ├── translation.py │ │ │ │ ├── speech_model.py │ │ │ │ ├── transcription_include.py │ │ │ │ ├── translation_create_response.py │ │ │ │ ├── transcription_create_response.py │ │ │ │ ├── transcription_word.py │ │ │ │ ├── transcription_stream_event.py │ │ │ │ └── translation_verbose.py │ │ │ ├── video_model.py │ │ │ ├── video_seconds.py │ │ │ ├── shared │ │ │ │ ├── metadata.py │ │ │ │ ├── function_parameters.py │ │ │ │ ├── reasoning_effort.py │ │ │ │ ├── error_object.py │ │ │ │ ├── response_format_text.py │ │ │ │ ├── response_format_text_python.py │ │ │ │ ├── response_format_json_object.py │ │ │ │ ├── response_format_text_grammar.py │ │ │ │ └── compound_filter.py │ │ │ ├── conversations │ │ │ │ ├── input_file_content.py │ │ │ │ ├── input_text_content.py │ │ │ │ ├── input_image_content.py │ │ │ │ ├── output_text_content.py │ │ │ │ ├── refusal_content.py │ │ │ │ ├── input_file_content_param.py │ │ │ │ ├── input_text_content_param.py │ │ │ │ ├── input_image_content_param.py │ │ │ │ ├── output_text_content_param.py │ │ │ │ ├── refusal_content_param.py │ │ │ │ ├── text_content.py │ │ │ │ ├── conversation_deleted_resource.py │ │ │ │ └── summary_text_content.py │ │ │ ├── uploads │ │ │ │ ├── __init__.py │ │ │ │ ├── part_create_params.py │ │ │ │ └── upload_part.py │ │ │ ├── video_size.py │ │ │ ├── image_model.py │ │ │ ├── audio_model.py │ │ │ ├── realtime │ │ │ │ ├── noise_reduction_type.py │ │ │ │ ├── realtime_connect_params.py │ │ │ │ ├── realtime_tools_config.py │ │ │ │ ├── realtime_mcphttp_error.py │ │ │ │ ├── realtime_mcp_tool_execution_error.py │ │ │ │ ├── realtime_mcp_protocol_error.py │ │ │ │ ├── realtime_truncation.py │ │ │ │ ├── call_reject_params.py │ │ │ │ ├── realtime_mcphttp_error_param.py │ │ │ │ ├── realtime_mcp_tool_execution_error_param.py │ │ │ │ ├── realtime_mcp_protocol_error_param.py │ │ │ │ ├── realtime_transcription_session_audio.py │ │ │ │ ├── call_refer_params.py │ │ │ │ ├── realtime_truncation_param.py │ │ │ │ ├── input_audio_buffer_cleared_event.py │ │ │ │ ├── realtime_transcription_session_audio_param.py │ │ │ │ ├── log_prob_properties.py │ │ │ │ ├── realtime_audio_config.py │ │ │ │ ├── realtime_tool_choice_config.py │ │ │ │ ├── mcp_list_tools_failed.py │ │ │ │ ├── realtime_response_usage_output_token_details.py │ │ │ │ ├── realtime_error_event.py │ │ │ │ ├── mcp_list_tools_completed.py │ │ │ │ ├── input_audio_buffer_clear_event.py │ │ │ │ ├── mcp_list_tools_in_progress.py │ │ │ │ ├── input_audio_buffer_commit_event.py │ │ │ │ ├── output_audio_buffer_clear_event.py │ │ │ │ ├── response_done_event.py │ │ │ │ ├── conversation_item_deleted_event.py │ │ │ │ ├── input_audio_buffer_clear_event_param.py │ │ │ │ ├── realtime_audio_config_param.py │ │ │ │ ├── input_audio_buffer_commit_event_param.py │ │ │ │ ├── output_audio_buffer_clear_event_param.py │ │ │ │ ├── response_created_event.py │ │ │ │ ├── realtime_truncation_retention_ratio.py │ │ │ │ ├── call_create_params.py │ │ │ │ ├── realtime_tool_choice_config_param.py │ │ │ │ ├── conversation_item_delete_event.py │ │ │ │ ├── response_mcp_call_failed.py │ │ │ │ ├── conversation_item_retrieve_event.py │ │ │ │ ├── conversation_item_delete_event_param.py │ │ │ │ ├── response_mcp_call_completed.py │ │ │ │ ├── response_mcp_call_in_progress.py │ │ │ │ ├── conversation_item_retrieve_event_param.py │ │ │ │ ├── realtime_session_client_secret.py │ │ │ │ └── realtime_truncation_retention_ratio_param.py │ │ │ ├── responses │ │ │ │ ├── tool_choice_options.py │ │ │ │ ├── response_status.py │ │ │ │ ├── response_input_message_content_list.py │ │ │ │ ├── response_function_tool_call_item.py │ │ │ │ ├── response_conversation_param.py │ │ │ │ ├── response_function_call_output_item_list.py │ │ │ │ ├── response_input_text.py │ │ │ │ ├── tool_choice_custom.py │ │ │ │ ├── tool_choice_function.py │ │ │ │ ├── response_output_refusal.py │ │ │ │ ├── response_input_text_content.py │ │ │ │ ├── response_audio_done_event.py │ │ │ │ ├── response_includable.py │ │ │ │ ├── response_input_text_param.py │ │ │ │ ├── tool_choice_custom_param.py │ │ │ │ ├── tool_choice_function_param.py │ │ │ │ ├── response_output_refusal_param.py │ │ │ │ ├── response_input_text_content_param.py │ │ │ │ ├── response_audio_transcript_done_event.py │ │ │ │ ├── tool_choice_mcp.py │ │ │ │ ├── response_failed_event.py │ │ │ │ ├── response_created_event.py │ │ │ │ ├── response_queued_event.py │ │ │ │ ├── response_audio_delta_event.py │ │ │ │ ├── response_completed_event.py │ │ │ │ ├── response_incomplete_event.py │ │ │ │ ├── response_in_progress_event.py │ │ │ │ ├── tool_choice_mcp_param.py │ │ │ │ ├── response_audio_transcript_delta_event.py │ │ │ │ ├── response_input_audio.py │ │ │ │ ├── response_mcp_call_failed_event.py │ │ │ │ ├── response_error_event.py │ │ │ │ ├── computer_tool.py │ │ │ │ ├── response_input_content.py │ │ │ │ ├── response_format_text_config_param.py │ │ │ │ ├── response_mcp_call_completed_event.py │ │ │ │ ├── response_function_call_output_item.py │ │ │ │ └── response_input_content_param.py │ │ │ ├── video_create_error.py │ │ │ ├── fine_tuning │ │ │ │ ├── fine_tuning_job_integration.py │ │ │ │ ├── jobs │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── checkpoint_list_params.py │ │ │ │ ├── dpo_method.py │ │ │ │ ├── alpha │ │ │ │ │ └── __init__.py │ │ │ │ ├── supervised_method.py │ │ │ │ ├── dpo_method_param.py │ │ │ │ ├── job_list_events_params.py │ │ │ │ ├── checkpoints │ │ │ │ │ ├── permission_create_params.py │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── permission_delete_response.py │ │ │ │ └── supervised_method_param.py │ │ │ ├── audio_response_format.py │ │ │ ├── file_purpose.py │ │ │ ├── model_deleted.py │ │ │ ├── shared_params │ │ │ │ ├── metadata.py │ │ │ │ ├── function_parameters.py │ │ │ │ ├── reasoning_effort.py │ │ │ │ ├── response_format_text.py │ │ │ │ └── response_format_json_object.py │ │ │ ├── embedding_model.py │ │ │ ├── eval_delete_response.py │ │ │ ├── evals │ │ │ │ ├── eval_api_error.py │ │ │ │ ├── run_delete_response.py │ │ │ │ └── runs │ │ │ │ │ └── __init__.py │ │ │ ├── file_deleted.py │ │ │ ├── moderation_model.py │ │ │ ├── vector_store_deleted.py │ │ │ ├── other_file_chunking_strategy_object.py │ │ │ ├── video_remix_params.py │ │ │ ├── vector_stores │ │ │ │ ├── vector_store_file_deleted.py │ │ │ │ └── file_content_response.py │ │ │ ├── auto_file_chunking_strategy_param.py │ │ │ ├── video_download_content_params.py │ │ │ ├── batch_request_counts.py │ │ │ ├── moderation_text_input_param.py │ │ │ ├── static_file_chunking_strategy_object.py │ │ │ ├── video_delete_response.py │ │ │ ├── moderation_multi_modal_input_param.py │ │ │ ├── image_gen_stream_event.py │ │ │ ├── moderation_create_response.py │ │ │ ├── file_chunking_strategy_param.py │ │ │ ├── image_edit_stream_event.py │ │ │ ├── static_file_chunking_strategy_object_param.py │ │ │ ├── model.py │ │ │ ├── file_chunking_strategy.py │ │ │ ├── upload_complete_params.py │ │ │ ├── graders │ │ │ │ ├── python_grader.py │ │ │ │ └── python_grader_param.py │ │ │ ├── video_list_params.py │ │ │ ├── eval_custom_data_source_config.py │ │ │ ├── static_file_chunking_strategy.py │ │ │ ├── batch_error.py │ │ │ └── moderation_image_url_input_param.py │ │ ├── helpers │ │ │ └── __init__.py │ │ ├── _version.py │ │ ├── _extras │ │ │ ├── __init__.py │ │ │ └── _common.py │ │ ├── _utils │ │ │ ├── _streams.py │ │ │ └── _resources_proxy.py │ │ └── _constants.py │ ├── openai-2.2.0.dist-info │ │ ├── REQUESTED │ │ ├── INSTALLER │ │ ├── entry_points.txt │ │ └── WHEEL │ └── bin │ │ └── openai ├── api_resources │ ├── types │ │ ├── __init__.py │ │ ├── feedback_type.py │ │ └── utils.py │ └── instrumentation │ │ ├── crewai │ │ └── __init__.py │ │ ├── openai │ │ └── __init__.py │ │ ├── litellm │ │ └── __init__.py │ │ └── langgraph │ │ └── __init__.py ├── version.py ├── langchain │ └── __init__.py ├── llamaindex │ └── __init__.py ├── integrations │ └── __init__.py ├── utils │ ├── __init__.py │ └── hashing_utils.py └── _portkey_scripts.py ├── .changes ├── unreleased │ └── .gitkeep ├── v0.1.0.md └── header.tpl.md ├── docs ├── .DS_Store └── images │ ├── azure.png │ ├── bard.png │ ├── Sticker.png │ ├── anyscale.png │ ├── cohere.png │ ├── header.png │ ├── localai.png │ ├── openai.png │ └── anthropic.png ├── vendorize.toml ├── .github ├── pull_request_template.md ├── workflows │ └── verify-conventional-commits.yml └── ISSUE_TEMPLATE │ └── config.yml ├── SECURITY.md ├── SUPPORT.md ├── pyproject.toml └── .changie.yaml /pytest.ini: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portkey_ai/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.changes/unreleased/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portkey_ai/llms/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portkey_ai/api_resources/types/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portkey_ai/llms/llama_index/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portkey_ai/version.py: -------------------------------------------------------------------------------- 1 | VERSION = "2.1.0" 2 | -------------------------------------------------------------------------------- /.changes/v0.1.0.md: -------------------------------------------------------------------------------- 1 | ## v0.1.0 - 2023-09-11 2 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai-2.2.0.dist-info/REQUESTED: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai-2.2.0.dist-info/INSTALLER: -------------------------------------------------------------------------------- 1 | pip 2 | -------------------------------------------------------------------------------- /portkey_ai/llms/mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | ignore_missing_imports = True 3 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/__init__.py: -------------------------------------------------------------------------------- 1 | from ._cli import main as main 2 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/__main__.py: -------------------------------------------------------------------------------- 1 | from .cli import main 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /docs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/.DS_Store -------------------------------------------------------------------------------- /vendorize.toml: -------------------------------------------------------------------------------- 1 | target = "portkey_ai/_vendor" 2 | packages = [ 3 | "openai==2.2.0" 4 | ] -------------------------------------------------------------------------------- /docs/images/azure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/azure.png -------------------------------------------------------------------------------- /docs/images/bard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/bard.png -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/_api/__init__.py: -------------------------------------------------------------------------------- 1 | from ._main import register_commands as register_commands 2 | -------------------------------------------------------------------------------- /docs/images/Sticker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/Sticker.png -------------------------------------------------------------------------------- /docs/images/anyscale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/anyscale.png -------------------------------------------------------------------------------- /docs/images/cohere.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/cohere.png -------------------------------------------------------------------------------- /docs/images/header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/header.png -------------------------------------------------------------------------------- /docs/images/localai.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/localai.png -------------------------------------------------------------------------------- /docs/images/openai.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/openai.png -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai-2.2.0.dist-info/entry_points.txt: -------------------------------------------------------------------------------- 1 | [console_scripts] 2 | openai = openai.cli:main 3 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/_tools/__init__.py: -------------------------------------------------------------------------------- 1 | from ._main import register_commands as register_commands 2 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/version.py: -------------------------------------------------------------------------------- 1 | from ._version import __version__ 2 | 3 | VERSION: str = __version__ 4 | -------------------------------------------------------------------------------- /tests/configs/audio/single_provider/single_provider.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /tests/configs/images/single_provider/single_provider.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /docs/images/anthropic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/docs/images/anthropic.png -------------------------------------------------------------------------------- /tests/configs/assistants/single_provider/single_provider.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /tests/configs/moderations/single_provider/single_provider.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /tests/configs/threads/single_provider/single_provider.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /tests/configs/audio/speech.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/tests/configs/audio/speech.mp3 -------------------------------------------------------------------------------- /tests/configs/audio/single_with_basic_config/single_with_basic_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /tests/configs/images/single_with_basic_config/single_with_basic_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /tests/configs/threads/sample.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Portkey-AI/portkey-python-sdk/HEAD/tests/configs/threads/sample.pdf -------------------------------------------------------------------------------- /tests/configs/threads/single_with_basic_config/single_with_basic_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /tests/configs/assistants/single_with_basic_config/single_with_basic_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /tests/configs/moderations/single_with_basic_config/single_with_basic_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key" 3 | } 4 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai-2.2.0.dist-info/WHEEL: -------------------------------------------------------------------------------- 1 | Wheel-Version: 1.0 2 | Generator: hatchling 1.26.3 3 | Root-Is-Purelib: true 4 | Tag: py3-none-any 5 | -------------------------------------------------------------------------------- /portkey_ai/api_resources/instrumentation/crewai/__init__.py: -------------------------------------------------------------------------------- 1 | from .instrumentation import CrewAIInstrumentor 2 | 3 | __all__ = ["CrewAIInstrumentor"] 4 | -------------------------------------------------------------------------------- /portkey_ai/langchain/__init__.py: -------------------------------------------------------------------------------- 1 | from .portkey_langchain_callback_handler import LangchainCallbackHandler 2 | 3 | __all__ = ["LangchainCallbackHandler"] 4 | -------------------------------------------------------------------------------- /portkey_ai/llamaindex/__init__.py: -------------------------------------------------------------------------------- 1 | from .portkey_llama_callback_handler import LlamaIndexCallbackHandler 2 | 3 | __all__ = ["LlamaIndexCallbackHandler"] 4 | -------------------------------------------------------------------------------- /portkey_ai/integrations/__init__.py: -------------------------------------------------------------------------------- 1 | """Integrations namespace. 2 | 3 | Optional adapters for third-party frameworks live here. Install extras to use. 4 | """ 5 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/lib/__init__.py: -------------------------------------------------------------------------------- 1 | from ._tools import pydantic_function_tool as pydantic_function_tool 2 | from ._parsing import ResponseFormatT as ResponseFormatT 3 | -------------------------------------------------------------------------------- /portkey_ai/llms/langchain/__init__.py: -------------------------------------------------------------------------------- 1 | from .chat import ChatPortkey 2 | from .completion import PortkeyLLM 3 | 4 | __all__ = [ 5 | "ChatPortkey", 6 | "PortkeyLLM", 7 | ] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chat/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/helpers/__init__.py: -------------------------------------------------------------------------------- 1 | from .microphone import Microphone 2 | from .local_audio_player import LocalAudioPlayer 3 | 4 | __all__ = ["Microphone", "LocalAudioPlayer"] 5 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/containers/files/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | -------------------------------------------------------------------------------- /tests/configs/chat_completions/single_provider/single_provider.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "override_params": { 4 | "model": "gpt-3.5-turbo" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /tests/configs/completions/single_provider/single_provider.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "override_params": { 4 | "model": "gpt-3.5-turbo-instruct" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/_version.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | __title__ = "openai" 4 | __version__ = "2.2.0" # x-release-please-version 5 | -------------------------------------------------------------------------------- /portkey_ai/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .json_utils import serialize_kwargs, serialize_args 2 | from .hashing_utils import string_to_uuid 3 | 4 | __all__ = ["serialize_kwargs", "serialize_args", "string_to_uuid"] 5 | -------------------------------------------------------------------------------- /portkey_ai/api_resources/instrumentation/openai/__init__.py: -------------------------------------------------------------------------------- 1 | from portkey_ai.api_resources.instrumentation.openai.instrumentation import ( 2 | OpenaiInstrumentor, 3 | ) 4 | 5 | __all__ = ["OpenaiInstrumentor"] 6 | -------------------------------------------------------------------------------- /tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "override_params": { 4 | "model": "gpt-3.5-turbo" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/_extras/__init__.py: -------------------------------------------------------------------------------- 1 | from .numpy_proxy import numpy as numpy, has_numpy as has_numpy 2 | from .pandas_proxy import pandas as pandas 3 | from .sounddevice_proxy import sounddevice as sounddevice 4 | -------------------------------------------------------------------------------- /portkey_ai/api_resources/instrumentation/litellm/__init__.py: -------------------------------------------------------------------------------- 1 | from portkey_ai.api_resources.instrumentation.litellm.instrumentation import ( 2 | LitellmInstrumentor, 3 | ) 4 | 5 | __all__ = ["LitellmInstrumentor"] 6 | -------------------------------------------------------------------------------- /tests/configs/completions/single_with_basic_config/single_with_basic_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "override_params": { 4 | "model": "gpt-3.5-turbo-instruct" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /portkey_ai/api_resources/instrumentation/langgraph/__init__.py: -------------------------------------------------------------------------------- 1 | from portkey_ai.api_resources.instrumentation.langgraph.instrumentation import ( 2 | LanggraphInstrumentor, 3 | ) 4 | 5 | __all__ = ["LanggraphInstrumentor"] 6 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .shared import chat_model 4 | 5 | __all__ = ["ChatModel"] 6 | 7 | ChatModel = chat_model.ChatModel 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/file_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import TypeAlias 4 | 5 | __all__ = ["FileContent"] 6 | 7 | FileContent: TypeAlias = str 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/lib/.keep: -------------------------------------------------------------------------------- 1 | File generated from our OpenAPI spec by Stainless. 2 | 3 | This directory can be used to store custom files to expand the SDK. 4 | It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/completions/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .message_list_params import MessageListParams as MessageListParams 6 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | **Title:** 2 | 3 | **Description:** 4 | - Detailed change 1 5 | - Detailed change 2 6 | - ... 7 | 8 | **Motivation:** 9 | 10 | 11 | **Related Issues:** 12 | # -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio/translation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..._models import BaseModel 4 | 5 | __all__ = ["Translation"] 6 | 7 | 8 | class Translation(BaseModel): 9 | text: str 10 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/video_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["VideoModel"] 6 | 7 | VideoModel: TypeAlias = Literal["sora-2", "sora-2-pro"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/video_seconds.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["VideoSeconds"] 6 | 7 | VideoSeconds: TypeAlias = Literal["4", "8", "12"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/metadata.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Dict 4 | from typing_extensions import TypeAlias 5 | 6 | __all__ = ["Metadata"] 7 | 8 | Metadata: TypeAlias = Dict[str, str] 9 | -------------------------------------------------------------------------------- /portkey_ai/utils/hashing_utils.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | 4 | def string_to_uuid(input_string: str) -> str: 5 | if input_string is None: 6 | return None 7 | # Using UUID v5 (SHA-1-based) - more secure but slower 8 | return str(uuid.uuid5(uuid.NAMESPACE_DNS, str(input_string))) 9 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | | Version | Supported | 6 | | ------- | ------------------ | 7 | | 0.1.x | :white_check_mark: | 8 | 9 | ## Reporting a Vulnerability 10 | 11 | Please report any security vulnerabilities at `support@portkey.ai`. 12 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio/speech_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["SpeechModel"] 6 | 7 | SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio/transcription_include.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["TranscriptionInclude"] 6 | 7 | TranscriptionInclude: TypeAlias = Literal["logprobs"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/input_file_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..responses.response_input_file import ResponseInputFile 4 | 5 | __all__ = ["InputFileContent"] 6 | 7 | InputFileContent = ResponseInputFile 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/input_text_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..responses.response_input_text import ResponseInputText 4 | 5 | __all__ = ["InputTextContent"] 6 | 7 | InputTextContent = ResponseInputText 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/uploads/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .upload_part import UploadPart as UploadPart 6 | from .part_create_params import PartCreateParams as PartCreateParams 7 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/video_size.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["VideoSize"] 6 | 7 | VideoSize: TypeAlias = Literal["720x1280", "1280x720", "1024x1792", "1792x1024"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/input_image_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..responses.response_input_image import ResponseInputImage 4 | 5 | __all__ = ["InputImageContent"] 6 | 7 | InputImageContent = ResponseInputImage 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/output_text_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..responses.response_output_text import ResponseOutputText 4 | 5 | __all__ = ["OutputTextContent"] 6 | 7 | OutputTextContent = ResponseOutputText 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/refusal_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..responses.response_output_refusal import ResponseOutputRefusal 4 | 5 | __all__ = ["RefusalContent"] 6 | 7 | RefusalContent = ResponseOutputRefusal 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_modality.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ChatCompletionModality"] 6 | 7 | ChatCompletionModality: TypeAlias = Literal["text", "audio"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/image_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ImageModel"] 6 | 7 | ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3", "gpt-image-1", "gpt-image-1-mini"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["AudioModel"] 6 | 7 | AudioModel: TypeAlias = Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/noise_reduction_type.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["NoiseReductionType"] 6 | 7 | NoiseReductionType: TypeAlias = Literal["near_field", "far_field"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/tool_choice_options.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ToolChoiceOptions"] 6 | 7 | ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/function_parameters.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Dict 4 | from typing_extensions import TypeAlias 5 | 6 | __all__ = ["FunctionParameters"] 7 | 8 | FunctionParameters: TypeAlias = Dict[str, object] 9 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/video_create_error.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .._models import BaseModel 4 | 5 | __all__ = ["VideoCreateError"] 6 | 7 | 8 | class VideoCreateError(BaseModel): 9 | code: str 10 | 11 | message: str 12 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/bin/openai: -------------------------------------------------------------------------------- 1 | #!/Users/chandeep/Documents/Workspace/Portkey/SDK/portkey-python-sdk/venv/bin/python3 2 | # -*- coding: utf-8 -*- 3 | import re 4 | import sys 5 | from openai.cli import main 6 | if __name__ == '__main__': 7 | sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) 8 | sys.exit(main()) 9 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_status.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ChatSessionStatus"] 6 | 7 | ChatSessionStatus: TypeAlias = Literal["active", "expired", "cancelled"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_reasoning_effort.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..shared.reasoning_effort import ReasoningEffort 4 | 5 | __all__ = ["ChatCompletionReasoningEffort"] 6 | 7 | ChatCompletionReasoningEffort = ReasoningEffort 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_integration.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject 4 | 5 | FineTuningJobIntegration = FineTuningJobWandbIntegrationObject 6 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio_response_format.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["AudioResponseFormat"] 6 | 7 | AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/file_purpose.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["FilePurpose"] 6 | 7 | FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/model_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .._models import BaseModel 4 | 5 | __all__ = ["ModelDeleted"] 6 | 7 | 8 | class ModelDeleted(BaseModel): 9 | id: str 10 | 11 | deleted: bool 12 | 13 | object: str 14 | -------------------------------------------------------------------------------- /.github/workflows/verify-conventional-commits.yml: -------------------------------------------------------------------------------- 1 | name: verify-conventional-commits 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | conventional-commits-checker: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: verify conventional commits 10 | uses: taskmedia/action-conventional-commits@v1.1.8 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/input_file_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from ..responses.response_input_file_param import ResponseInputFileParam 6 | 7 | InputFileContentParam = ResponseInputFileParam 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/input_text_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from ..responses.response_input_text_param import ResponseInputTextParam 6 | 7 | InputTextContentParam = ResponseInputTextParam 8 | -------------------------------------------------------------------------------- /portkey_ai/api_resources/types/feedback_type.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from typing import List 4 | from pydantic import BaseModel 5 | 6 | 7 | class FeedbackResponse(BaseModel, extra="allow"): 8 | status: Optional[str] = None 9 | message: Optional[str] = None 10 | feedback_ids: Optional[List[str]] = None 11 | -------------------------------------------------------------------------------- /.changes/header.tpl.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), 6 | and is generated by [Changie](https://github.com/miniscruff/changie). 7 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/input_image_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from ..responses.response_input_image_param import ResponseInputImageParam 6 | 7 | InputImageContentParam = ResponseInputImageParam 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/output_text_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from ..responses.response_output_text_param import ResponseOutputTextParam 6 | 7 | OutputTextContentParam = ResponseOutputTextParam 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/refusal_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from ..responses.response_output_refusal_param import ResponseOutputRefusalParam 6 | 7 | RefusalContentParam = ResponseOutputRefusalParam 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared_params/metadata.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Dict 6 | from typing_extensions import TypeAlias 7 | 8 | __all__ = ["Metadata"] 9 | 10 | Metadata: TypeAlias = Dict[str, str] 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Portkey Community Support 4 | url: https://discord.com/invite/DD7vgKK299 5 | about: Please ask and answer questions here. 6 | - name: Portkey Bounty 7 | url: https://discord.com/invite/DD7vgKK299 8 | about: Please report security vulnerabilities here. -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_include.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["RunStepInclude"] 6 | 7 | RunStepInclude: TypeAlias = Literal["step_details.tool_calls[*].file_search.results[*].content"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_role.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ChatCompletionRole"] 6 | 7 | ChatCompletionRole: TypeAlias = Literal["developer", "system", "user", "assistant", "tool", "function"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/embedding_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["EmbeddingModel"] 6 | 7 | EmbeddingModel: TypeAlias = Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/eval_delete_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .._models import BaseModel 4 | 5 | __all__ = ["EvalDeleteResponse"] 6 | 7 | 8 | class EvalDeleteResponse(BaseModel): 9 | deleted: bool 10 | 11 | eval_id: str 12 | 13 | object: str 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_status.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ResponseStatus"] 6 | 7 | ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "cancelled", "queued", "incomplete"] 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/reasoning_effort.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal, TypeAlias 5 | 6 | __all__ = ["ReasoningEffort"] 7 | 8 | ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] 9 | -------------------------------------------------------------------------------- /tests/configs/audio/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "cache": { 4 | "mode": "semantic", 5 | "max_age": 60 6 | }, 7 | "retry": { 8 | "attempts": 5, 9 | "on_status_codes": [ 10 | 429 11 | ] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/jobs/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .checkpoint_list_params import CheckpointListParams as CheckpointListParams 6 | from .fine_tuning_job_checkpoint import FineTuningJobCheckpoint as FineTuningJobCheckpoint 7 | -------------------------------------------------------------------------------- /tests/configs/assistants/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "cache": { 4 | "mode": "semantic", 5 | "max_age": 60 6 | }, 7 | "retry": { 8 | "attempts": 5, 9 | "on_status_codes": [ 10 | 429 11 | ] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /tests/configs/images/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "cache": { 4 | "mode": "semantic", 5 | "max_age": 60 6 | }, 7 | "retry": { 8 | "attempts": 5, 9 | "on_status_codes": [ 10 | 429 11 | ] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /tests/configs/threads/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "cache": { 4 | "mode": "semantic", 5 | "max_age": 60 6 | }, 7 | "retry": { 8 | "attempts": 5, 9 | "on_status_codes": [ 10 | 429 11 | ] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/lib/streaming/responses/_types.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing_extensions import TypeAlias 4 | 5 | from ....types.responses import ParsedResponse 6 | 7 | ParsedResponseSnapshot: TypeAlias = ParsedResponse[object] 8 | """Snapshot type representing an in-progress accumulation of 9 | a `ParsedResponse` object. 10 | """ 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/text_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["TextContent"] 8 | 9 | 10 | class TextContent(BaseModel): 11 | text: str 12 | 13 | type: Literal["text"] 14 | -------------------------------------------------------------------------------- /tests/configs/moderations/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "cache": { 4 | "mode": "semantic", 5 | "max_age": 60 6 | }, 7 | "retry": { 8 | "attempts": 5, 9 | "on_status_codes": [ 10 | 429 11 | ] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_function.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..._models import BaseModel 4 | 5 | __all__ = ["AssistantToolChoiceFunction"] 6 | 7 | 8 | class AssistantToolChoiceFunction(BaseModel): 9 | name: str 10 | """The name of the function to call.""" 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/evals/eval_api_error.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..._models import BaseModel 4 | 5 | __all__ = ["EvalAPIError"] 6 | 7 | 8 | class EvalAPIError(BaseModel): 9 | code: str 10 | """The error code.""" 11 | 12 | message: str 13 | """The error message.""" 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared_params/function_parameters.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Dict 6 | from typing_extensions import TypeAlias 7 | 8 | __all__ = ["FunctionParameters"] 9 | 10 | FunctionParameters: TypeAlias = Dict[str, object] 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/_utils/_streams.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from typing_extensions import Iterator, AsyncIterator 3 | 4 | 5 | def consume_sync_iterator(iterator: Iterator[Any]) -> None: 6 | for _ in iterator: 7 | ... 8 | 9 | 10 | async def consume_async_iterator(iterator: AsyncIterator[Any]) -> None: 11 | async for _ in iterator: 12 | ... 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/file_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["FileDeleted"] 8 | 9 | 10 | class FileDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["file"] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/_api/fine_tuning/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import TYPE_CHECKING 4 | from argparse import ArgumentParser 5 | 6 | from . import jobs 7 | 8 | if TYPE_CHECKING: 9 | from argparse import _SubParsersAction 10 | 11 | 12 | def register(subparser: _SubParsersAction[ArgumentParser]) -> None: 13 | jobs.register(subparser) 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/_api/chat/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import TYPE_CHECKING 4 | from argparse import ArgumentParser 5 | 6 | from . import completions 7 | 8 | if TYPE_CHECKING: 9 | from argparse import _SubParsersAction 10 | 11 | 12 | def register(subparser: _SubParsersAction[ArgumentParser]) -> None: 13 | completions.register(subparser) 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ...._models import BaseModel 4 | 5 | __all__ = ["ChatSessionRateLimits"] 6 | 7 | 8 | class ChatSessionRateLimits(BaseModel): 9 | max_requests_per_1_minute: int 10 | """Maximum allowed requests per one-minute window.""" 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/realtime_connect_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["RealtimeConnectParams"] 8 | 9 | 10 | class RealtimeConnectParams(TypedDict, total=False): 11 | model: Required[str] 12 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/moderation_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ModerationModel"] 6 | 7 | ModerationModel: TypeAlias = Literal[ 8 | "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable" 9 | ] 10 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/thread_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ThreadDeleted"] 8 | 9 | 10 | class ThreadDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["thread.deleted"] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_connect_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["RealtimeConnectParams"] 8 | 9 | 10 | class RealtimeConnectParams(TypedDict, total=False): 11 | call_id: str 12 | 13 | model: str 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ...._models import BaseModel 4 | 5 | __all__ = ["ChatSessionAutomaticThreadTitling"] 6 | 7 | 8 | class ChatSessionAutomaticThreadTitling(BaseModel): 9 | enabled: bool 10 | """Whether automatic thread titling is enabled.""" 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Optional 6 | from typing_extensions import Literal, TypeAlias 7 | 8 | __all__ = ["ReasoningEffort"] 9 | 10 | ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["AssistantDeleted"] 8 | 9 | 10 | class AssistantDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["assistant.deleted"] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_tools_config.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | from typing_extensions import TypeAlias 5 | 6 | from .realtime_tools_config_union import RealtimeToolsConfigUnion 7 | 8 | __all__ = ["RealtimeToolsConfig"] 9 | 10 | RealtimeToolsConfig: TypeAlias = List[RealtimeToolsConfigUnion] 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/vector_store_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["VectorStoreDeleted"] 8 | 9 | 10 | class VectorStoreDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["vector_store.deleted"] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/message_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["MessageDeleted"] 8 | 9 | 10 | class MessageDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["thread.message.deleted"] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/other_file_chunking_strategy_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["OtherFileChunkingStrategyObject"] 8 | 9 | 10 | class OtherFileChunkingStrategyObject(BaseModel): 11 | type: Literal["other"] 12 | """Always `other`.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_mcphttp_error.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["RealtimeMcphttpError"] 8 | 9 | 10 | class RealtimeMcphttpError(BaseModel): 11 | code: int 12 | 13 | message: str 14 | 15 | type: Literal["http_error"] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/text.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | 5 | from ...._models import BaseModel 6 | from .annotation import Annotation 7 | 8 | __all__ = ["Text"] 9 | 10 | 11 | class Text(BaseModel): 12 | annotations: List[Annotation] 13 | 14 | value: str 15 | """The data that makes up the text.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/error_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ErrorObject"] 8 | 9 | 10 | class ErrorObject(BaseModel): 11 | code: Optional[str] = None 12 | 13 | message: str 14 | 15 | param: Optional[str] = None 16 | 17 | type: str 18 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | ## How to file issues and get help 2 | 3 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 4 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 5 | feature request as a new Issue. 6 | 7 | For help and questions about using this project, please contact `support@portkey.ai`. Join the community discussions [here](https://discord.com/invite/DD7vgKK299). -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/refusal_content_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["RefusalContentBlock"] 8 | 9 | 10 | class RefusalContentBlock(BaseModel): 11 | refusal: str 12 | 13 | type: Literal["refusal"] 14 | """Always `refusal`.""" 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/response_format_text.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseFormatText"] 8 | 9 | 10 | class ResponseFormatText(BaseModel): 11 | type: Literal["text"] 12 | """The type of response format being defined. Always `text`.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/code_interpreter_tool.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["CodeInterpreterTool"] 8 | 9 | 10 | class CodeInterpreterTool(BaseModel): 11 | type: Literal["code_interpreter"] 12 | """The type of tool being defined: `code_interpreter`""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_execution_error.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["RealtimeMcpToolExecutionError"] 8 | 9 | 10 | class RealtimeMcpToolExecutionError(BaseModel): 11 | message: str 12 | 13 | type: Literal["tool_execution_error"] 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/lib/streaming/__init__.py: -------------------------------------------------------------------------------- 1 | from ._assistants import ( 2 | AssistantEventHandler as AssistantEventHandler, 3 | AssistantEventHandlerT as AssistantEventHandlerT, 4 | AssistantStreamManager as AssistantStreamManager, 5 | AsyncAssistantEventHandler as AsyncAssistantEventHandler, 6 | AsyncAssistantEventHandlerT as AsyncAssistantEventHandlerT, 7 | AsyncAssistantStreamManager as AsyncAssistantStreamManager, 8 | ) 9 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/text_content_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .text import Text 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["TextContentBlock"] 9 | 10 | 11 | class TextContentBlock(BaseModel): 12 | text: Text 13 | 14 | type: Literal["text"] 15 | """Always `text`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/evals/run_delete_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["RunDeleteResponse"] 8 | 9 | 10 | class RunDeleteResponse(BaseModel): 11 | deleted: Optional[bool] = None 12 | 13 | object: Optional[str] = None 14 | 15 | run_id: Optional[str] = None 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_mcp_protocol_error.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["RealtimeMcpProtocolError"] 8 | 9 | 10 | class RealtimeMcpProtocolError(BaseModel): 11 | code: int 12 | 13 | message: str 14 | 15 | type: Literal["protocol_error"] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_input_message_content_list.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | from typing_extensions import TypeAlias 5 | 6 | from .response_input_content import ResponseInputContent 7 | 8 | __all__ = ["ResponseInputMessageContentList"] 9 | 10 | ResponseInputMessageContentList: TypeAlias = List[ResponseInputContent] 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/video_remix_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["VideoRemixParams"] 8 | 9 | 10 | class VideoRemixParams(TypedDict, total=False): 11 | prompt: Required[str] 12 | """Updated text prompt that directs the remix generation.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .response_function_tool_call import ResponseFunctionToolCall 4 | 5 | __all__ = ["ResponseFunctionToolCallItem"] 6 | 7 | 8 | class ResponseFunctionToolCallItem(ResponseFunctionToolCall): 9 | id: str # type: ignore 10 | """The unique ID of the function tool call.""" 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_conversation_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["ResponseConversationParam"] 8 | 9 | 10 | class ResponseConversationParam(TypedDict, total=False): 11 | id: Required[str] 12 | """The unique ID of the conversation.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/response_format_text_python.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseFormatTextPython"] 8 | 9 | 10 | class ResponseFormatTextPython(BaseModel): 11 | type: Literal["python"] 12 | """The type of response format being defined. Always `python`.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/vector_stores/vector_store_file_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["VectorStoreFileDeleted"] 8 | 9 | 10 | class VectorStoreFileDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["vector_store.file.deleted"] 16 | -------------------------------------------------------------------------------- /tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "override_params": { 4 | "model": "gpt-3.5-turbo" 5 | }, 6 | "cache": { 7 | "mode": "semantic", 8 | "max_age": 60 9 | }, 10 | "retry": { 11 | "attempts": 5, 12 | "on_status_codes": [ 13 | 429 14 | ] 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "virtual_key": "openai-virtual-key", 3 | "override_params": { 4 | "model": "gpt-3.5-turbo-instruct" 5 | }, 6 | "cache": { 7 | "mode": "semantic", 8 | "max_age": 60 9 | }, 10 | "retry": { 11 | "attempts": 5, 12 | "on_status_codes": [ 13 | 429 14 | ] 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/evals/runs/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .output_item_list_params import OutputItemListParams as OutputItemListParams 6 | from .output_item_list_response import OutputItemListResponse as OutputItemListResponse 7 | from .output_item_retrieve_response import OutputItemRetrieveResponse as OutputItemRetrieveResponse 8 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/auto_file_chunking_strategy_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["AutoFileChunkingStrategyParam"] 8 | 9 | 10 | class AutoFileChunkingStrategyParam(TypedDict, total=False): 11 | type: Required[Literal["auto"]] 12 | """Always `auto`.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_option.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Literal, TypeAlias 5 | 6 | from .assistant_tool_choice import AssistantToolChoice 7 | 8 | __all__ = ["AssistantToolChoiceOption"] 9 | 10 | AssistantToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoice] 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/conversation_deleted_resource.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ConversationDeletedResource"] 8 | 9 | 10 | class ConversationDeletedResource(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["conversation.deleted"] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/response_format_json_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseFormatJSONObject"] 8 | 9 | 10 | class ResponseFormatJSONObject(BaseModel): 11 | type: Literal["json_object"] 12 | """The type of response format being defined. Always `json_object`.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio/translation_create_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import TypeAlias 5 | 6 | from .translation import Translation 7 | from .translation_verbose import TranslationVerbose 8 | 9 | __all__ = ["TranslationCreateResponse"] 10 | 11 | TranslationCreateResponse: TypeAlias = Union[Translation, TranslationVerbose] 12 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_function_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["AssistantToolChoiceFunctionParam"] 8 | 9 | 10 | class AssistantToolChoiceFunctionParam(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the function to call.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/run_status.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["RunStatus"] 6 | 7 | RunStatus: TypeAlias = Literal[ 8 | "queued", 9 | "in_progress", 10 | "requires_action", 11 | "cancelling", 12 | "cancelled", 13 | "failed", 14 | "completed", 15 | "incomplete", 16 | "expired", 17 | ] 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_truncation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Literal, TypeAlias 5 | 6 | from .realtime_truncation_retention_ratio import RealtimeTruncationRetentionRatio 7 | 8 | __all__ = ["RealtimeTruncation"] 9 | 10 | RealtimeTruncation: TypeAlias = Union[Literal["auto", "disabled"], RealtimeTruncationRetentionRatio] 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/uploads/part_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from ..._types import FileTypes 8 | 9 | __all__ = ["PartCreateParams"] 10 | 11 | 12 | class PartCreateParams(TypedDict, total=False): 13 | data: Required[FileTypes] 14 | """The chunk of bytes for this Part.""" 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/dpo_method.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | from .dpo_hyperparameters import DpoHyperparameters 7 | 8 | __all__ = ["DpoMethod"] 9 | 10 | 11 | class DpoMethod(BaseModel): 12 | hyperparameters: Optional[DpoHyperparameters] = None 13 | """The hyperparameters used for the DPO fine-tuning job.""" 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_function_call_output_item_list.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | from typing_extensions import TypeAlias 5 | 6 | from .response_function_call_output_item import ResponseFunctionCallOutputItem 7 | 8 | __all__ = ["ResponseFunctionCallOutputItemList"] 9 | 10 | ResponseFunctionCallOutputItemList: TypeAlias = List[ResponseFunctionCallOutputItem] 11 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared_params/response_format_text.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ResponseFormatText"] 8 | 9 | 10 | class ResponseFormatText(TypedDict, total=False): 11 | type: Required[Literal["text"]] 12 | """The type of response format being defined. Always `text`.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio/transcription_create_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import TypeAlias 5 | 6 | from .transcription import Transcription 7 | from .transcription_verbose import TranscriptionVerbose 8 | 9 | __all__ = ["TranscriptionCreateResponse"] 10 | 11 | TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose] 12 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio/transcription_word.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..._models import BaseModel 4 | 5 | __all__ = ["TranscriptionWord"] 6 | 7 | 8 | class TranscriptionWord(BaseModel): 9 | end: float 10 | """End time of the word in seconds.""" 11 | 12 | start: float 13 | """Start time of the word in seconds.""" 14 | 15 | word: str 16 | """The text content of the word.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_function_call_option_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionFunctionCallOptionParam"] 8 | 9 | 10 | class ChatCompletionFunctionCallOptionParam(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the function to call.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .image_file import ImageFile 7 | 8 | __all__ = ["ImageFileContentBlock"] 9 | 10 | 11 | class ImageFileContentBlock(BaseModel): 12 | image_file: ImageFile 13 | 14 | type: Literal["image_file"] 15 | """Always `image_file`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .image_url import ImageURL 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ImageURLContentBlock"] 9 | 10 | 11 | class ImageURLContentBlock(BaseModel): 12 | image_url: ImageURL 13 | 14 | type: Literal["image_url"] 15 | """The type of the content part.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ChatCompletionContentPartText"] 8 | 9 | 10 | class ChatCompletionContentPartText(BaseModel): 11 | text: str 12 | """The text content.""" 13 | 14 | type: Literal["text"] 15 | """The type of the content part.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/call_reject_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["CallRejectParams"] 8 | 9 | 10 | class CallRejectParams(TypedDict, total=False): 11 | status_code: int 12 | """SIP response code to send back to the caller. 13 | 14 | Defaults to `603` (Decline) when omitted. 15 | """ 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/vector_stores/file_content_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["FileContentResponse"] 8 | 9 | 10 | class FileContentResponse(BaseModel): 11 | text: Optional[str] = None 12 | """The text content""" 13 | 14 | type: Optional[str] = None 15 | """The content type (currently only `"text"`)""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_portkey_scripts.py: -------------------------------------------------------------------------------- 1 | """main file""" 2 | import argparse 3 | from .version import VERSION 4 | 5 | 6 | def main(): 7 | """Main function""" 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument( 10 | "--version", 11 | "-v", 12 | action="version", 13 | version=f"portkey_ai {VERSION}", 14 | help="Print version and exit.", 15 | ) 16 | 17 | _ = parser.parse_args() 18 | 19 | 20 | if __name__ == "__main__": 21 | main() 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/_extras/_common.py: -------------------------------------------------------------------------------- 1 | from .._exceptions import OpenAIError 2 | 3 | INSTRUCTIONS = """ 4 | 5 | OpenAI error: 6 | 7 | missing `{library}` 8 | 9 | This feature requires additional dependencies: 10 | 11 | $ pip install openai[{extra}] 12 | 13 | """ 14 | 15 | 16 | def format_instructions(*, library: str, extra: str) -> str: 17 | return INSTRUCTIONS.format(library=library, extra=extra) 18 | 19 | 20 | class MissingDependencyError(OpenAIError): 21 | pass 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/code_interpreter_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["CodeInterpreterToolParam"] 8 | 9 | 10 | class CodeInterpreterToolParam(TypedDict, total=False): 11 | type: Required[Literal["code_interpreter"]] 12 | """The type of tool being defined: `code_interpreter`""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_input_text.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseInputText"] 8 | 9 | 10 | class ResponseInputText(BaseModel): 11 | text: str 12 | """The text input to the model.""" 13 | 14 | type: Literal["input_text"] 15 | """The type of the input item. Always `input_text`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/text_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | 5 | from ...._models import BaseModel 6 | from .annotation_delta import AnnotationDelta 7 | 8 | __all__ = ["TextDelta"] 9 | 10 | 11 | class TextDelta(BaseModel): 12 | annotations: Optional[List[AnnotationDelta]] = None 13 | 14 | value: Optional[str] = None 15 | """The data that makes up the text.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/alpha/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .grader_run_params import GraderRunParams as GraderRunParams 6 | from .grader_run_response import GraderRunResponse as GraderRunResponse 7 | from .grader_validate_params import GraderValidateParams as GraderValidateParams 8 | from .grader_validate_response import GraderValidateResponse as GraderValidateResponse 9 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/tool_choice_custom.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ToolChoiceCustom"] 8 | 9 | 10 | class ToolChoiceCustom(BaseModel): 11 | name: str 12 | """The name of the custom tool to call.""" 13 | 14 | type: Literal["custom"] 15 | """For custom tool calling, the type is always `custom`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["ChatSessionRateLimitsParam"] 8 | 9 | 10 | class ChatSessionRateLimitsParam(TypedDict, total=False): 11 | max_requests_per_1_minute: int 12 | """Maximum number of requests allowed per minute for the session. Defaults to 10.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/function_tool.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | from ..shared.function_definition import FunctionDefinition 7 | 8 | __all__ = ["FunctionTool"] 9 | 10 | 11 | class FunctionTool(BaseModel): 12 | function: FunctionDefinition 13 | 14 | type: Literal["function"] 15 | """The type of tool being defined: `function`""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_mcphttp_error_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["RealtimeMcphttpErrorParam"] 8 | 9 | 10 | class RealtimeMcphttpErrorParam(TypedDict, total=False): 11 | code: Required[int] 12 | 13 | message: Required[str] 14 | 15 | type: Required[Literal["http_error"]] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/tool_choice_function.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ToolChoiceFunction"] 8 | 9 | 10 | class ToolChoiceFunction(BaseModel): 11 | name: str 12 | """The name of the function to call.""" 13 | 14 | type: Literal["function"] 15 | """For function calling, the type is always `function`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_execution_error_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["RealtimeMcpToolExecutionErrorParam"] 8 | 9 | 10 | class RealtimeMcpToolExecutionErrorParam(TypedDict, total=False): 11 | message: Required[str] 12 | 13 | type: Required[Literal["tool_execution_error"]] 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_output_refusal.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseOutputRefusal"] 8 | 9 | 10 | class ResponseOutputRefusal(BaseModel): 11 | refusal: str 12 | """The refusal explanation from the model.""" 13 | 14 | type: Literal["refusal"] 15 | """The type of the refusal. Always `refusal`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/video_download_content_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, TypedDict 6 | 7 | __all__ = ["VideoDownloadContentParams"] 8 | 9 | 10 | class VideoDownloadContentParams(TypedDict, total=False): 11 | variant: Literal["video", "thumbnail", "spritesheet"] 12 | """Which downloadable asset to return. Defaults to the MP4 video.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/supervised_method.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | from .supervised_hyperparameters import SupervisedHyperparameters 7 | 8 | __all__ = ["SupervisedMethod"] 9 | 10 | 11 | class SupervisedMethod(BaseModel): 12 | hyperparameters: Optional[SupervisedHyperparameters] = None 13 | """The hyperparameters used for the fine-tuning job.""" 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_input_text_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseInputTextContent"] 8 | 9 | 10 | class ResponseInputTextContent(BaseModel): 11 | text: str 12 | """The text input to the model.""" 13 | 14 | type: Literal["input_text"] 15 | """The type of the input item. Always `input_text`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared_params/response_format_json_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ResponseFormatJSONObject"] 8 | 9 | 10 | class ResponseFormatJSONObject(TypedDict, total=False): 11 | type: Required[Literal["json_object"]] 12 | """The type of response format being defined. Always `json_object`.""" 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_mcp_protocol_error_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["RealtimeMcpProtocolErrorParam"] 8 | 9 | 10 | class RealtimeMcpProtocolErrorParam(TypedDict, total=False): 11 | code: Required[int] 12 | 13 | message: Required[str] 14 | 15 | type: Required[Literal["protocol_error"]] 16 | -------------------------------------------------------------------------------- /tests/manual_test_feedback.py: -------------------------------------------------------------------------------- 1 | import os 2 | from portkey_ai import Portkey 3 | 4 | 5 | portkey = Portkey( 6 | api_key=os.environ.get("PORTKEY_API_KEY"), 7 | ) 8 | 9 | 10 | traceId = "" 11 | 12 | 13 | print("Step: Create Feedback") 14 | result = portkey.feedback.create( 15 | trace_id=traceId, 16 | value="1", 17 | ) 18 | print(result) 19 | 20 | 21 | print("Step: Update Feedback") 22 | result = portkey.feedback.update( 23 | feedback_id=traceId, 24 | value="7", 25 | ) 26 | print(result) 27 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/batch_request_counts.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .._models import BaseModel 4 | 5 | __all__ = ["BatchRequestCounts"] 6 | 7 | 8 | class BatchRequestCounts(BaseModel): 9 | completed: int 10 | """Number of requests that have been completed successfully.""" 11 | 12 | failed: int 13 | """Number of requests that have failed.""" 14 | 15 | total: int 16 | """Total number of requests in the batch.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/dpo_method_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | from .dpo_hyperparameters_param import DpoHyperparametersParam 8 | 9 | __all__ = ["DpoMethodParam"] 10 | 11 | 12 | class DpoMethodParam(TypedDict, total=False): 13 | hyperparameters: DpoHyperparametersParam 14 | """The hyperparameters used for the DPO fine-tuning job.""" 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/job_list_events_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["JobListEventsParams"] 8 | 9 | 10 | class JobListEventsParams(TypedDict, total=False): 11 | after: str 12 | """Identifier for the last event from the previous pagination request.""" 13 | 14 | limit: int 15 | """Number of events to retrieve.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/moderation_text_input_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ModerationTextInputParam"] 8 | 9 | 10 | class ModerationTextInputParam(TypedDict, total=False): 11 | text: Required[str] 12 | """A string of text to classify.""" 13 | 14 | type: Required[Literal["text"]] 15 | """Always `text`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit_upload_file_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ..._utils import PropertyInfo 7 | from .file_part import FilePart 8 | from .image_part import ImagePart 9 | 10 | __all__ = ["ChatKitUploadFileResponse"] 11 | 12 | ChatKitUploadFileResponse: TypeAlias = Annotated[Union[FilePart, ImagePart], PropertyInfo(discriminator="type")] 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/conversations/summary_text_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["SummaryTextContent"] 8 | 9 | 10 | class SummaryTextContent(BaseModel): 11 | text: str 12 | """A summary of the reasoning output from the model so far.""" 13 | 14 | type: Literal["summary_text"] 15 | """The type of the object. Always `summary_text`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/text_content_block_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["TextContentBlockParam"] 8 | 9 | 10 | class TextContentBlockParam(TypedDict, total=False): 11 | text: Required[str] 12 | """Text content to be sent to the model""" 13 | 14 | type: Required[Literal["text"]] 15 | """Always `text`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | from .realtime_transcription_session_audio_input import RealtimeTranscriptionSessionAudioInput 7 | 8 | __all__ = ["RealtimeTranscriptionSessionAudio"] 9 | 10 | 11 | class RealtimeTranscriptionSessionAudio(BaseModel): 12 | input: Optional[RealtimeTranscriptionSessionAudioInput] = None 13 | -------------------------------------------------------------------------------- /portkey_ai/api_resources/types/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import httpx 4 | 5 | from portkey_ai.api_resources.global_constants import PORTKEY_HEADER_PREFIX 6 | 7 | 8 | def parse_headers(headers: Optional[httpx.Headers]) -> dict: 9 | if headers is None: 10 | return {} 11 | 12 | _headers = {} 13 | for k, v in headers.items(): 14 | if k.startswith(PORTKEY_HEADER_PREFIX): 15 | k = k.replace(PORTKEY_HEADER_PREFIX, "") 16 | _headers[k] = v 17 | 18 | return _headers 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_option_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import Literal, TypeAlias 7 | 8 | from .assistant_tool_choice_param import AssistantToolChoiceParam 9 | 10 | __all__ = ["AssistantToolChoiceOptionParam"] 11 | 12 | AssistantToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam] 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/containers/file_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | from ..._types import FileTypes 8 | 9 | __all__ = ["FileCreateParams"] 10 | 11 | 12 | class FileCreateParams(TypedDict, total=False): 13 | file: FileTypes 14 | """The File object (not file name) to be uploaded.""" 15 | 16 | file_id: str 17 | """Name of the file to create.""" 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from ...._types import SequenceNotStr 8 | 9 | __all__ = ["PermissionCreateParams"] 10 | 11 | 12 | class PermissionCreateParams(TypedDict, total=False): 13 | project_ids: Required[SequenceNotStr[str]] 14 | """The project identifiers to grant access to.""" 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/call_refer_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["CallReferParams"] 8 | 9 | 10 | class CallReferParams(TypedDict, total=False): 11 | target_uri: Required[str] 12 | """URI that should appear in the SIP Refer-To header. 13 | 14 | Supports values like `tel:+14155550123` or `sip:agent@example.com`. 15 | """ 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_audio_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseAudioDoneEvent"] 8 | 9 | 10 | class ResponseAudioDoneEvent(BaseModel): 11 | sequence_number: int 12 | """The sequence number of the delta.""" 13 | 14 | type: Literal["response.audio.done"] 15 | """The type of the event. Always `response.audio.done`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypeAlias 6 | 7 | from .chat_completion_function_tool_param import ( 8 | FunctionDefinition as FunctionDefinition, 9 | ChatCompletionFunctionToolParam, 10 | ) 11 | 12 | __all__ = ["ChatCompletionToolParam", "FunctionDefinition"] 13 | 14 | ChatCompletionToolParam: TypeAlias = ChatCompletionFunctionToolParam 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/jobs/checkpoint_list_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["CheckpointListParams"] 8 | 9 | 10 | class CheckpointListParams(TypedDict, total=False): 11 | after: str 12 | """Identifier for the last checkpoint ID from the previous pagination request.""" 13 | 14 | limit: int 15 | """Number of checkpoints to retrieve.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/response_format_text_grammar.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseFormatTextGrammar"] 8 | 9 | 10 | class ResponseFormatTextGrammar(BaseModel): 11 | grammar: str 12 | """The custom grammar for the model to follow.""" 13 | 14 | type: Literal["grammar"] 15 | """The type of response format being defined. Always `grammar`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_truncation_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import Literal, TypeAlias 7 | 8 | from .realtime_truncation_retention_ratio_param import RealtimeTruncationRetentionRatioParam 9 | 10 | __all__ = ["RealtimeTruncationParam"] 11 | 12 | RealtimeTruncationParam: TypeAlias = Union[Literal["auto", "disabled"], RealtimeTruncationRetentionRatioParam] 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_includable.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ResponseIncludable"] 6 | 7 | ResponseIncludable: TypeAlias = Literal[ 8 | "code_interpreter_call.outputs", 9 | "computer_call_output.output.image_url", 10 | "file_search_call.results", 11 | "message.input_image.image_url", 12 | "message.output_text.logprobs", 13 | "reasoning.encrypted_content", 14 | ] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/static_file_chunking_strategy_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .._models import BaseModel 6 | from .static_file_chunking_strategy import StaticFileChunkingStrategy 7 | 8 | __all__ = ["StaticFileChunkingStrategyObject"] 9 | 10 | 11 | class StaticFileChunkingStrategyObject(BaseModel): 12 | static: StaticFileChunkingStrategy 13 | 14 | type: Literal["static"] 15 | """Always `static`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/_constants.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | import httpx 4 | 5 | RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response" 6 | OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to" 7 | 8 | # default timeout is 10 minutes 9 | DEFAULT_TIMEOUT = httpx.Timeout(timeout=600, connect=5.0) 10 | DEFAULT_MAX_RETRIES = 1 11 | DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=100) 12 | 13 | INITIAL_RETRY_DELAY = 0.5 14 | MAX_RETRY_DELAY = 8.0 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/annotation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ...._utils import PropertyInfo 7 | from .file_path_annotation import FilePathAnnotation 8 | from .file_citation_annotation import FileCitationAnnotation 9 | 10 | __all__ = ["Annotation"] 11 | 12 | Annotation: TypeAlias = Annotated[Union[FileCitationAnnotation, FilePathAnnotation], PropertyInfo(discriminator="type")] 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/refusal_delta_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["RefusalDeltaBlock"] 9 | 10 | 11 | class RefusalDeltaBlock(BaseModel): 12 | index: int 13 | """The index of the refusal part in the message.""" 14 | 15 | type: Literal["refusal"] 16 | """Always `refusal`.""" 17 | 18 | refusal: Optional[str] = None 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/containers/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .file_list_params import FileListParams as FileListParams 6 | from .file_create_params import FileCreateParams as FileCreateParams 7 | from .file_list_response import FileListResponse as FileListResponse 8 | from .file_create_response import FileCreateResponse as FileCreateResponse 9 | from .file_retrieve_response import FileRetrieveResponse as FileRetrieveResponse 10 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_cleared_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["InputAudioBufferClearedEvent"] 8 | 9 | 10 | class InputAudioBufferClearedEvent(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | type: Literal["input_audio_buffer.cleared"] 15 | """The event type, must be `input_audio_buffer.cleared`.""" 16 | -------------------------------------------------------------------------------- /tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json: -------------------------------------------------------------------------------- 1 | { 2 | "strategy": { 3 | "mode": "loadbalance" 4 | }, 5 | "targets": [ 6 | { 7 | "virtual_key": "openai-virtual-key", 8 | "override_params": { 9 | "model": "dall-e-2" 10 | } 11 | }, 12 | { 13 | "virtual_key": "stability-virtual-key", 14 | "override_params": { 15 | "model": "stable-diffusion-v1-6" 16 | } 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_cleared_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["InputAudioBufferClearedEvent"] 8 | 9 | 10 | class InputAudioBufferClearedEvent(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | type: Literal["input_audio_buffer.cleared"] 15 | """The event type, must be `input_audio_buffer.cleared`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionContentPartTextParam"] 8 | 9 | 10 | class ChatCompletionContentPartTextParam(TypedDict, total=False): 11 | text: Required[str] 12 | """The text content.""" 13 | 14 | type: Required[Literal["text"]] 15 | """The type of the content part.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/supervised_method_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | from .supervised_hyperparameters_param import SupervisedHyperparametersParam 8 | 9 | __all__ = ["SupervisedMethodParam"] 10 | 11 | 12 | class SupervisedMethodParam(TypedDict, total=False): 13 | hyperparameters: SupervisedHyperparametersParam 14 | """The hyperparameters used for the fine-tuning job.""" 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_input_text_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ResponseInputTextParam"] 8 | 9 | 10 | class ResponseInputTextParam(TypedDict, total=False): 11 | text: Required[str] 12 | """The text input to the model.""" 13 | 14 | type: Required[Literal["input_text"]] 15 | """The type of the input item. Always `input_text`.""" 16 | -------------------------------------------------------------------------------- /tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json: -------------------------------------------------------------------------------- 1 | { 2 | "strategy": { 3 | "mode": "loadbalance" 4 | }, 5 | "targets": [ 6 | { 7 | "virtual_key": "openai-virtual-key", 8 | "override_params": { 9 | "model": "gpt-3.5-turbo" 10 | } 11 | }, 12 | { 13 | "virtual_key": "anthropic-virtual-key", 14 | "override_params": { 15 | "model": "claude-2.1" 16 | } 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | from ..shared.function_definition import FunctionDefinition 7 | 8 | __all__ = ["ChatCompletionFunctionTool"] 9 | 10 | 11 | class ChatCompletionFunctionTool(BaseModel): 12 | function: FunctionDefinition 13 | 14 | type: Literal["function"] 15 | """The type of the tool. Currently, only `function` is supported.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/tool_choice_custom_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ToolChoiceCustomParam"] 8 | 9 | 10 | class ToolChoiceCustomParam(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the custom tool to call.""" 13 | 14 | type: Required[Literal["custom"]] 15 | """For custom tool calling, the type is always `custom`.""" 16 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.mypy] 6 | exclude = ['portkey_ai/_vendor', 'tests'] 7 | ignore_missing_imports = true 8 | follow_imports = "silent" 9 | disable_error_code = ['import-untyped', 'import-not-found'] 10 | 11 | [[tool.mypy.overrides]] 12 | module = 'portkey_ai._vendor.*' 13 | ignore_errors = true 14 | 15 | [tool.black] 16 | force-exclude = '''(portkey_ai/_vendor)/''' 17 | 18 | [tool.ruff] 19 | exclude = ["portkey_ai/_vendor", "tests"] 20 | ignore = ["E501"] 21 | line-length = 88 -------------------------------------------------------------------------------- /tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json: -------------------------------------------------------------------------------- 1 | { 2 | "strategy": { 3 | "mode": "loadbalance" 4 | }, 5 | "targets": [ 6 | { 7 | "virtual_key": "openai-virtual-key", 8 | "override_params": { 9 | "model": "gpt-3.5-turbo-instruct" 10 | } 11 | }, 12 | { 13 | "virtual_key": "anthropic-virtual-key", 14 | "override_params": { 15 | "model": "claude-2.1" 16 | } 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/_tools/_main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import TYPE_CHECKING 4 | from argparse import ArgumentParser 5 | 6 | from . import migrate, fine_tunes 7 | 8 | if TYPE_CHECKING: 9 | from argparse import _SubParsersAction 10 | 11 | 12 | def register_commands(parser: ArgumentParser, subparser: _SubParsersAction[ArgumentParser]) -> None: 13 | migrate.register(subparser) 14 | 15 | namespaced = parser.add_subparsers(title="Tools", help="Convenience client side tools") 16 | 17 | fine_tunes.register(namespaced) 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/tool_choice_function_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ToolChoiceFunctionParam"] 8 | 9 | 10 | class ToolChoiceFunctionParam(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the function to call.""" 13 | 14 | type: Required[Literal["function"]] 15 | """For function calling, the type is always `function`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | from .realtime_transcription_session_audio_input_param import RealtimeTranscriptionSessionAudioInputParam 8 | 9 | __all__ = ["RealtimeTranscriptionSessionAudioParam"] 10 | 11 | 12 | class RealtimeTranscriptionSessionAudioParam(TypedDict, total=False): 13 | input: RealtimeTranscriptionSessionAudioInputParam 14 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any, Dict 3 | 4 | 5 | def read_json_file(path: str) -> Dict[str, Any]: 6 | return json.load(open(path, "r")) 7 | 8 | 9 | def check_chat_streaming_chunk(chunk) -> bool: 10 | stop_reason = chunk.choices[0].finish_reason 11 | if isinstance(stop_reason, str) is True: 12 | return chunk.choices[0].delta.content == "" 13 | else: 14 | return isinstance(chunk.choices[0].delta.content, str) 15 | 16 | 17 | def check_text_streaming_chunk(chunk) -> bool: 18 | return isinstance(chunk.choices[0].text, str) 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .image_file_param import ImageFileParam 8 | 9 | __all__ = ["ImageFileContentBlockParam"] 10 | 11 | 12 | class ImageFileContentBlockParam(TypedDict, total=False): 13 | image_file: Required[ImageFileParam] 14 | 15 | type: Required[Literal["image_file"]] 16 | """Always `image_file`.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .image_url_param import ImageURLParam 8 | 9 | __all__ = ["ImageURLContentBlockParam"] 10 | 11 | 12 | class ImageURLContentBlockParam(TypedDict, total=False): 13 | image_url: Required[ImageURLParam] 14 | 15 | type: Required[Literal["image_url"]] 16 | """The type of the content part.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/text_delta_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .text_delta import TextDelta 8 | 9 | __all__ = ["TextDeltaBlock"] 10 | 11 | 12 | class TextDeltaBlock(BaseModel): 13 | index: int 14 | """The index of the content part in the message.""" 15 | 16 | type: Literal["text"] 17 | """Always `text`.""" 18 | 19 | text: Optional[TextDelta] = None 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypeAlias 6 | 7 | from .chat_completion_message_function_tool_call_param import ( 8 | Function as Function, 9 | ChatCompletionMessageFunctionToolCallParam, 10 | ) 11 | 12 | __all__ = ["ChatCompletionMessageToolCallParam", "Function"] 13 | 14 | ChatCompletionMessageToolCallParam: TypeAlias = ChatCompletionMessageFunctionToolCallParam 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/log_prob_properties.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["LogProbProperties"] 8 | 9 | 10 | class LogProbProperties(BaseModel): 11 | token: str 12 | """The token that was used to generate the log probability.""" 13 | 14 | bytes: List[int] 15 | """The bytes that were used to generate the log probability.""" 16 | 17 | logprob: float 18 | """The log probability of the token.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_audio_config.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | from .realtime_audio_config_input import RealtimeAudioConfigInput 7 | from .realtime_audio_config_output import RealtimeAudioConfigOutput 8 | 9 | __all__ = ["RealtimeAudioConfig"] 10 | 11 | 12 | class RealtimeAudioConfig(BaseModel): 13 | input: Optional[RealtimeAudioConfigInput] = None 14 | 15 | output: Optional[RealtimeAudioConfigOutput] = None 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_tool_choice_config.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import TypeAlias 5 | 6 | from ..responses.tool_choice_mcp import ToolChoiceMcp 7 | from ..responses.tool_choice_options import ToolChoiceOptions 8 | from ..responses.tool_choice_function import ToolChoiceFunction 9 | 10 | __all__ = ["RealtimeToolChoiceConfig"] 11 | 12 | RealtimeToolChoiceConfig: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunction, ToolChoiceMcp] 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ResponseOutputRefusalParam"] 8 | 9 | 10 | class ResponseOutputRefusalParam(TypedDict, total=False): 11 | refusal: Required[str] 12 | """The refusal explanation from the model.""" 13 | 14 | type: Required[Literal["refusal"]] 15 | """The type of the refusal. Always `refusal`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/function_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from ..shared_params.function_definition import FunctionDefinition 8 | 9 | __all__ = ["FunctionToolParam"] 10 | 11 | 12 | class FunctionToolParam(TypedDict, total=False): 13 | function: Required[FunctionDefinition] 14 | 15 | type: Required[Literal["function"]] 16 | """The type of tool being defined: `function`""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_input_text_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ResponseInputTextContentParam"] 8 | 9 | 10 | class ResponseInputTextContentParam(TypedDict, total=False): 11 | text: Required[str] 12 | """The text input to the model.""" 13 | 14 | type: Required[Literal["input_text"]] 15 | """The type of the input item. Always `input_text`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/video_delete_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["VideoDeleteResponse"] 8 | 9 | 10 | class VideoDeleteResponse(BaseModel): 11 | id: str 12 | """Identifier of the deleted video.""" 13 | 14 | deleted: bool 15 | """Indicates that the video resource was deleted.""" 16 | 17 | object: Literal["video.deleted"] 18 | """The object type that signals the deletion response.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/_errors.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import sys 4 | 5 | import pydantic 6 | 7 | from ._utils import Colors, organization_info 8 | from .._exceptions import APIError, OpenAIError 9 | 10 | 11 | class CLIError(OpenAIError): ... 12 | 13 | 14 | class SilentCLIError(CLIError): ... 15 | 16 | 17 | def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None: 18 | if isinstance(err, SilentCLIError): 19 | return 20 | 21 | sys.stderr.write("{}{}Error:{} {}\n".format(organization_info(), Colors.FAIL, Colors.ENDC, err)) 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/_models.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from typing_extensions import ClassVar 3 | 4 | import pydantic 5 | 6 | from .. import _models 7 | from .._compat import PYDANTIC_V1, ConfigDict 8 | 9 | 10 | class BaseModel(_models.BaseModel): 11 | if PYDANTIC_V1: 12 | 13 | class Config(pydantic.BaseConfig): # type: ignore 14 | extra: Any = pydantic.Extra.ignore # type: ignore 15 | arbitrary_types_allowed: bool = True 16 | else: 17 | model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True) 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit_upload_file_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from ..._types import FileTypes 8 | 9 | __all__ = ["ChatKitUploadFileParams"] 10 | 11 | 12 | class ChatKitUploadFileParams(TypedDict, total=False): 13 | file: Required[FileTypes] 14 | """Binary file contents to store with the ChatKit session. 15 | 16 | Supports PDFs and PNG, JPG, JPEG, GIF, or WEBP images. 17 | """ 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/moderation_multi_modal_input_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .moderation_text_input_param import ModerationTextInputParam 9 | from .moderation_image_url_input_param import ModerationImageURLInputParam 10 | 11 | __all__ = ["ModerationMultiModalInputParam"] 12 | 13 | ModerationMultiModalInputParam: TypeAlias = Union[ModerationImageURLInputParam, ModerationTextInputParam] 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_failed.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["McpListToolsFailed"] 8 | 9 | 10 | class McpListToolsFailed(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the MCP list tools item.""" 16 | 17 | type: Literal["mcp_list_tools.failed"] 18 | """The event type, must be `mcp_list_tools.failed`.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_audio_transcript_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseAudioTranscriptDoneEvent"] 8 | 9 | 10 | class ResponseAudioTranscriptDoneEvent(BaseModel): 11 | sequence_number: int 12 | """The sequence number of this event.""" 13 | 14 | type: Literal["response.audio.transcript.done"] 15 | """The type of the event. Always `response.audio.transcript.done`.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_output_token_details.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["RealtimeResponseUsageOutputTokenDetails"] 8 | 9 | 10 | class RealtimeResponseUsageOutputTokenDetails(BaseModel): 11 | audio_tokens: Optional[int] = None 12 | """The number of audio tokens used in the Response.""" 13 | 14 | text_tokens: Optional[int] = None 15 | """The number of text tokens used in the Response.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_history.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ChatSessionHistory"] 8 | 9 | 10 | class ChatSessionHistory(BaseModel): 11 | enabled: bool 12 | """Indicates if chat history is persisted for the session.""" 13 | 14 | recent_threads: Optional[int] = None 15 | """Number of prior threads surfaced in history views. 16 | 17 | Defaults to null when all history is retained. 18 | """ 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_refusal_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionContentPartRefusalParam"] 8 | 9 | 10 | class ChatCompletionContentPartRefusalParam(TypedDict, total=False): 11 | refusal: Required[str] 12 | """The refusal message generated by the model.""" 13 | 14 | type: Required[Literal["refusal"]] 15 | """The type of the content part.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ChatCompletionDeleted"] 8 | 9 | 10 | class ChatCompletionDeleted(BaseModel): 11 | id: str 12 | """The ID of the chat completion that was deleted.""" 13 | 14 | deleted: bool 15 | """Whether the chat completion was deleted.""" 16 | 17 | object: Literal["chat.completion.deleted"] 18 | """The type of object being deleted.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_error_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | from .realtime_error import RealtimeError 7 | 8 | __all__ = ["RealtimeErrorEvent"] 9 | 10 | 11 | class RealtimeErrorEvent(BaseModel): 12 | error: RealtimeError 13 | """Details of the error.""" 14 | 15 | event_id: str 16 | """The unique ID of the server event.""" 17 | 18 | type: Literal["error"] 19 | """The event type, must be `error`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/cli/_api/_main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from argparse import ArgumentParser 4 | 5 | from . import chat, audio, files, image, models, completions, fine_tuning 6 | 7 | 8 | def register_commands(parser: ArgumentParser) -> None: 9 | subparsers = parser.add_subparsers(help="All API subcommands") 10 | 11 | chat.register(subparsers) 12 | image.register(subparsers) 13 | audio.register(subparsers) 14 | files.register(subparsers) 15 | models.register(subparsers) 16 | completions.register(subparsers) 17 | fine_tuning.register(subparsers) 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .function_tool_param import FunctionToolParam 9 | from .file_search_tool_param import FileSearchToolParam 10 | from .code_interpreter_tool_param import CodeInterpreterToolParam 11 | 12 | __all__ = ["AssistantToolParam"] 13 | 14 | AssistantToolParam: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_completed.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["McpListToolsCompleted"] 8 | 9 | 10 | class McpListToolsCompleted(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the MCP list tools item.""" 16 | 17 | type: Literal["mcp_list_tools.completed"] 18 | """The event type, must be `mcp_list_tools.completed`.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_tool.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ..._utils import PropertyInfo 7 | from .function_tool import FunctionTool 8 | from .file_search_tool import FileSearchTool 9 | from .code_interpreter_tool import CodeInterpreterTool 10 | 11 | __all__ = ["AssistantTool"] 12 | 13 | AssistantTool: TypeAlias = Annotated[ 14 | Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type") 15 | ] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/image_gen_stream_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from .._utils import PropertyInfo 7 | from .image_gen_completed_event import ImageGenCompletedEvent 8 | from .image_gen_partial_image_event import ImageGenPartialImageEvent 9 | 10 | __all__ = ["ImageGenStreamEvent"] 11 | 12 | ImageGenStreamEvent: TypeAlias = Annotated[ 13 | Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type") 14 | ] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/tool_choice_mcp.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ToolChoiceMcp"] 9 | 10 | 11 | class ToolChoiceMcp(BaseModel): 12 | server_label: str 13 | """The label of the MCP server to use.""" 14 | 15 | type: Literal["mcp"] 16 | """For MCP tools, the type is always `mcp`.""" 17 | 18 | name: Optional[str] = None 19 | """The name of the tool to call on the server.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_tool_union_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam 9 | from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam 10 | 11 | __all__ = ["ChatCompletionToolUnionParam"] 12 | 13 | ChatCompletionToolUnionParam: TypeAlias = Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam] 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/moderation_create_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | 5 | from .._models import BaseModel 6 | from .moderation import Moderation 7 | 8 | __all__ = ["ModerationCreateResponse"] 9 | 10 | 11 | class ModerationCreateResponse(BaseModel): 12 | id: str 13 | """The unique identifier for the moderation request.""" 14 | 15 | model: str 16 | """The model used to generate the moderation results.""" 17 | 18 | results: List[Moderation] 19 | """A list of moderation objects.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["InputAudioBufferClearEvent"] 9 | 10 | 11 | class InputAudioBufferClearEvent(BaseModel): 12 | type: Literal["input_audio_buffer.clear"] 13 | """The event type, must be `input_audio_buffer.clear`.""" 14 | 15 | event_id: Optional[str] = None 16 | """Optional client-generated ID used to identify this event.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_in_progress.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["McpListToolsInProgress"] 8 | 9 | 10 | class McpListToolsInProgress(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the MCP list tools item.""" 16 | 17 | type: Literal["mcp_list_tools.in_progress"] 18 | """The event type, must be `mcp_list_tools.in_progress`.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit/thread_delete_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ThreadDeleteResponse"] 8 | 9 | 10 | class ThreadDeleteResponse(BaseModel): 11 | id: str 12 | """Identifier of the deleted thread.""" 13 | 14 | deleted: bool 15 | """Indicates that the thread has been deleted.""" 16 | 17 | object: Literal["chatkit.thread.deleted"] 18 | """Type discriminator that is always `chatkit.thread.deleted`.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/annotation_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ...._utils import PropertyInfo 7 | from .file_path_delta_annotation import FilePathDeltaAnnotation 8 | from .file_citation_delta_annotation import FileCitationDeltaAnnotation 9 | 10 | __all__ = ["AnnotationDelta"] 11 | 12 | AnnotationDelta: TypeAlias = Annotated[ 13 | Union[FileCitationDeltaAnnotation, FilePathDeltaAnnotation], PropertyInfo(discriminator="type") 14 | ] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/file_chunking_strategy_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam 9 | from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam 10 | 11 | __all__ = ["FileChunkingStrategyParam"] 12 | 13 | FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam] 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/image_edit_stream_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from .._utils import PropertyInfo 7 | from .image_edit_completed_event import ImageEditCompletedEvent 8 | from .image_edit_partial_image_event import ImageEditPartialImageEvent 9 | 10 | __all__ = ["ImageEditStreamEvent"] 11 | 12 | ImageEditStreamEvent: TypeAlias = Annotated[ 13 | Union[ImageEditPartialImageEvent, ImageEditCompletedEvent], PropertyInfo(discriminator="type") 14 | ] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["InputAudioBufferCommitEvent"] 9 | 10 | 11 | class InputAudioBufferCommitEvent(BaseModel): 12 | type: Literal["input_audio_buffer.commit"] 13 | """The event type, must be `input_audio_buffer.commit`.""" 14 | 15 | event_id: Optional[str] = None 16 | """Optional client-generated ID used to identify this event.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/lib/_parsing/__init__.py: -------------------------------------------------------------------------------- 1 | from ._completions import ( 2 | ResponseFormatT as ResponseFormatT, 3 | has_parseable_input, 4 | has_parseable_input as has_parseable_input, 5 | maybe_parse_content as maybe_parse_content, 6 | validate_input_tools as validate_input_tools, 7 | parse_chat_completion as parse_chat_completion, 8 | get_input_tool_by_name as get_input_tool_by_name, 9 | solve_response_format_t as solve_response_format_t, 10 | parse_function_tool_arguments as parse_function_tool_arguments, 11 | type_to_response_format_param as type_to_response_format_param, 12 | ) 13 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_clear_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["InputAudioBufferClearEvent"] 9 | 10 | 11 | class InputAudioBufferClearEvent(BaseModel): 12 | type: Literal["input_audio_buffer.clear"] 13 | """The event type, must be `input_audio_buffer.clear`.""" 14 | 15 | event_id: Optional[str] = None 16 | """Optional client-generated ID used to identify this event.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_url_delta_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .image_url_delta import ImageURLDelta 8 | 9 | __all__ = ["ImageURLDeltaBlock"] 10 | 11 | 12 | class ImageURLDeltaBlock(BaseModel): 13 | index: int 14 | """The index of the content part in the message.""" 15 | 16 | type: Literal["image_url"] 17 | """Always `image_url`.""" 18 | 19 | image_url: Optional[ImageURLDelta] = None 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_logs.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | 8 | __all__ = ["CodeInterpreterLogs"] 9 | 10 | 11 | class CodeInterpreterLogs(BaseModel): 12 | index: int 13 | """The index of the output in the outputs array.""" 14 | 15 | type: Literal["logs"] 16 | """Always `logs`.""" 17 | 18 | logs: Optional[str] = None 19 | """The text output from the Code Interpreter tool call.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["OutputAudioBufferClearEvent"] 9 | 10 | 11 | class OutputAudioBufferClearEvent(BaseModel): 12 | type: Literal["output_audio_buffer.clear"] 13 | """The event type, must be `output_audio_buffer.clear`.""" 14 | 15 | event_id: Optional[str] = None 16 | """The unique ID of the client event used for error handling.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/response_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | from .realtime_response import RealtimeResponse 7 | 8 | __all__ = ["ResponseDoneEvent"] 9 | 10 | 11 | class ResponseDoneEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | response: RealtimeResponse 16 | """The response resource.""" 17 | 18 | type: Literal["response.done"] 19 | """The event type, must be `response.done`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_file_upload.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ChatSessionFileUpload"] 8 | 9 | 10 | class ChatSessionFileUpload(BaseModel): 11 | enabled: bool 12 | """Indicates if uploads are enabled for the session.""" 13 | 14 | max_file_size: Optional[int] = None 15 | """Maximum upload size in megabytes.""" 16 | 17 | max_files: Optional[int] = None 18 | """Maximum number of uploads allowed during the session.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_commit_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["InputAudioBufferCommitEvent"] 9 | 10 | 11 | class InputAudioBufferCommitEvent(BaseModel): 12 | type: Literal["input_audio_buffer.commit"] 13 | """The event type, must be `input_audio_buffer.commit`.""" 14 | 15 | event_id: Optional[str] = None 16 | """Optional client-generated ID used to identify this event.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/response_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .realtime_response import RealtimeResponse 7 | 8 | __all__ = ["ResponseDoneEvent"] 9 | 10 | 11 | class ResponseDoneEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | response: RealtimeResponse 16 | """The response resource.""" 17 | 18 | type: Literal["response.done"] 19 | """The event type, must be `response.done`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/session_created_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .session import Session 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["SessionCreatedEvent"] 9 | 10 | 11 | class SessionCreatedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | session: Session 16 | """Realtime session object configuration.""" 17 | 18 | type: Literal["session.created"] 19 | """The event type, must be `session.created`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/session_updated_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .session import Session 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["SessionUpdatedEvent"] 9 | 10 | 11 | class SessionUpdatedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | session: Session 16 | """Realtime session object configuration.""" 17 | 18 | type: Literal["session.updated"] 19 | """The event type, must be `session.updated`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_file_delta_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .image_file_delta import ImageFileDelta 8 | 9 | __all__ = ["ImageFileDeltaBlock"] 10 | 11 | 12 | class ImageFileDeltaBlock(BaseModel): 13 | index: int 14 | """The index of the content part in the message.""" 15 | 16 | type: Literal["image_file"] 17 | """Always `image_file`.""" 18 | 19 | image_file: Optional[ImageFileDelta] = None 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/conversation_item_deleted_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ConversationItemDeletedEvent"] 8 | 9 | 10 | class ConversationItemDeletedEvent(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the item that was deleted.""" 16 | 17 | type: Literal["conversation.item.deleted"] 18 | """The event type, must be `conversation.item.deleted`.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["InputAudioBufferClearEventParam"] 8 | 9 | 10 | class InputAudioBufferClearEventParam(TypedDict, total=False): 11 | type: Required[Literal["input_audio_buffer.clear"]] 12 | """The event type, must be `input_audio_buffer.clear`.""" 13 | 14 | event_id: str 15 | """Optional client-generated ID used to identify this event.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | from .realtime_audio_config_input_param import RealtimeAudioConfigInputParam 8 | from .realtime_audio_config_output_param import RealtimeAudioConfigOutputParam 9 | 10 | __all__ = ["RealtimeAudioConfigParam"] 11 | 12 | 13 | class RealtimeAudioConfigParam(TypedDict, total=False): 14 | input: RealtimeAudioConfigInputParam 15 | 16 | output: RealtimeAudioConfigOutputParam 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_failed_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .response import Response 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ResponseFailedEvent"] 9 | 10 | 11 | class ResponseFailedEvent(BaseModel): 12 | response: Response 13 | """The response that failed.""" 14 | 15 | sequence_number: int 16 | """The sequence number of this event.""" 17 | 18 | type: Literal["response.failed"] 19 | """The type of the event. Always `response.failed`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_deleted_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ConversationItemDeletedEvent"] 8 | 9 | 10 | class ConversationItemDeletedEvent(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the item that was deleted.""" 16 | 17 | type: Literal["conversation.item.deleted"] 18 | """The event type, must be `conversation.item.deleted`.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["InputAudioBufferClearEventParam"] 8 | 9 | 10 | class InputAudioBufferClearEventParam(TypedDict, total=False): 11 | type: Required[Literal["input_audio_buffer.clear"]] 12 | """The event type, must be `input_audio_buffer.clear`.""" 13 | 14 | event_id: str 15 | """Optional client-generated ID used to identify this event.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["InputAudioBufferCommitEventParam"] 8 | 9 | 10 | class InputAudioBufferCommitEventParam(TypedDict, total=False): 11 | type: Required[Literal["input_audio_buffer.commit"]] 12 | """The event type, must be `input_audio_buffer.commit`.""" 13 | 14 | event_id: str 15 | """Optional client-generated ID used to identify this event.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["OutputAudioBufferClearEventParam"] 8 | 9 | 10 | class OutputAudioBufferClearEventParam(TypedDict, total=False): 11 | type: Required[Literal["output_audio_buffer.clear"]] 12 | """The event type, must be `output_audio_buffer.clear`.""" 13 | 14 | event_id: str 15 | """The unique ID of the client event used for error handling.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["InputAudioBufferCommitEventParam"] 8 | 9 | 10 | class InputAudioBufferCommitEventParam(TypedDict, total=False): 11 | type: Required[Literal["input_audio_buffer.commit"]] 12 | """The event type, must be `input_audio_buffer.commit`.""" 13 | 14 | event_id: str 15 | """Optional client-generated ID used to identify this event.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/response_created_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | from .realtime_response import RealtimeResponse 7 | 8 | __all__ = ["ResponseCreatedEvent"] 9 | 10 | 11 | class ResponseCreatedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | response: RealtimeResponse 16 | """The response resource.""" 17 | 18 | type: Literal["response.created"] 19 | """The event type, must be `response.created`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_created_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .response import Response 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ResponseCreatedEvent"] 9 | 10 | 11 | class ResponseCreatedEvent(BaseModel): 12 | response: Response 13 | """The response that was created.""" 14 | 15 | sequence_number: int 16 | """The sequence number for this event.""" 17 | 18 | type: Literal["response.created"] 19 | """The type of the event. Always `response.created`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/static_file_chunking_strategy_object_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam 8 | 9 | __all__ = ["StaticFileChunkingStrategyObjectParam"] 10 | 11 | 12 | class StaticFileChunkingStrategyObjectParam(TypedDict, total=False): 13 | static: Required[StaticFileChunkingStrategyParam] 14 | 15 | type: Required[Literal["static"]] 16 | """Always `static`.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/lib/streaming/responses/__init__.py: -------------------------------------------------------------------------------- 1 | from ._events import ( 2 | ResponseTextDoneEvent as ResponseTextDoneEvent, 3 | ResponseTextDeltaEvent as ResponseTextDeltaEvent, 4 | ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, 5 | ) 6 | from ._responses import ( 7 | ResponseStream as ResponseStream, 8 | AsyncResponseStream as AsyncResponseStream, 9 | ResponseStreamEvent as ResponseStreamEvent, 10 | ResponseStreamState as ResponseStreamState, 11 | ResponseStreamManager as ResponseStreamManager, 12 | AsyncResponseStreamManager as AsyncResponseStreamManager, 13 | ) 14 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/response_created_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .realtime_response import RealtimeResponse 7 | 8 | __all__ = ["ResponseCreatedEvent"] 9 | 10 | 11 | class ResponseCreatedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | response: RealtimeResponse 16 | """The response resource.""" 17 | 18 | type: Literal["response.created"] 19 | """The event type, must be `response.created`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_queued_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .response import Response 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ResponseQueuedEvent"] 9 | 10 | 11 | class ResponseQueuedEvent(BaseModel): 12 | response: Response 13 | """The full response object that is queued.""" 14 | 15 | sequence_number: int 16 | """The sequence number for this event.""" 17 | 18 | type: Literal["response.queued"] 19 | """The type of the event. Always 'response.queued'.""" 20 | -------------------------------------------------------------------------------- /.changie.yaml: -------------------------------------------------------------------------------- 1 | changesDir: .changes 2 | unreleasedDir: unreleased 3 | headerPath: header.tpl.md 4 | changelogPath: CHANGELOG.md 5 | versionExt: md 6 | versionFormat: '## {{.Version}} - {{.Time.Format "2006-01-02"}}' 7 | kindFormat: '### {{.Kind}}' 8 | changeFormat: '* {{.Body}}' 9 | kinds: 10 | - label: Added 11 | auto: minor 12 | - label: Changed 13 | auto: major 14 | - label: Deprecated 15 | auto: minor 16 | - label: Removed 17 | auto: major 18 | - label: Fixed 19 | auto: patch 20 | - label: Security 21 | auto: patch 22 | newlines: 23 | afterChangelogHeader: 1 24 | beforeChangelogVersion: 1 25 | endOfVersion: 1 26 | envPrefix: CHANGIE_ 27 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio/transcription_stream_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ..._utils import PropertyInfo 7 | from .transcription_text_done_event import TranscriptionTextDoneEvent 8 | from .transcription_text_delta_event import TranscriptionTextDeltaEvent 9 | 10 | __all__ = ["TranscriptionStreamEvent"] 11 | 12 | TranscriptionStreamEvent: TypeAlias = Annotated[ 13 | Union[TranscriptionTextDeltaEvent, TranscriptionTextDoneEvent], PropertyInfo(discriminator="type") 14 | ] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_expires_after_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatSessionExpiresAfterParam"] 8 | 9 | 10 | class ChatSessionExpiresAfterParam(TypedDict, total=False): 11 | anchor: Required[Literal["created_at"]] 12 | """Base timestamp used to calculate expiration. Currently fixed to `created_at`.""" 13 | 14 | seconds: Required[int] 15 | """Number of seconds after the anchor when the session expires.""" 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/runs/tool_call.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ....._utils import PropertyInfo 7 | from .function_tool_call import FunctionToolCall 8 | from .file_search_tool_call import FileSearchToolCall 9 | from .code_interpreter_tool_call import CodeInterpreterToolCall 10 | 11 | __all__ = ["ToolCall"] 12 | 13 | ToolCall: TypeAlias = Annotated[ 14 | Union[CodeInterpreterToolCall, FileSearchToolCall, FunctionToolCall], PropertyInfo(discriminator="type") 15 | ] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from ..shared_params.function_definition import FunctionDefinition 8 | 9 | __all__ = ["ChatCompletionFunctionToolParam"] 10 | 11 | 12 | class ChatCompletionFunctionToolParam(TypedDict, total=False): 13 | function: Required[FunctionDefinition] 14 | 15 | type: Required[Literal["function"]] 16 | """The type of the tool. Currently, only `function` is supported.""" 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["Model"] 8 | 9 | 10 | class Model(BaseModel): 11 | id: str 12 | """The model identifier, which can be referenced in the API endpoints.""" 13 | 14 | created: int 15 | """The Unix timestamp (in seconds) when the model was created.""" 16 | 17 | object: Literal["model"] 18 | """The object type, which is always "model".""" 19 | 20 | owned_by: str 21 | """The organization that owns the model.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_audio_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseAudioDeltaEvent"] 8 | 9 | 10 | class ResponseAudioDeltaEvent(BaseModel): 11 | delta: str 12 | """A chunk of Base64 encoded response audio bytes.""" 13 | 14 | sequence_number: int 15 | """A sequence number for this chunk of the stream response.""" 16 | 17 | type: Literal["response.audio.delta"] 18 | """The type of the event. Always `response.audio.delta`.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_completed_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .response import Response 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ResponseCompletedEvent"] 9 | 10 | 11 | class ResponseCompletedEvent(BaseModel): 12 | response: Response 13 | """Properties of the completed response.""" 14 | 15 | sequence_number: int 16 | """The sequence number for this event.""" 17 | 18 | type: Literal["response.completed"] 19 | """The type of the event. Always `response.completed`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_incomplete_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .response import Response 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ResponseIncompleteEvent"] 9 | 10 | 11 | class ResponseIncompleteEvent(BaseModel): 12 | response: Response 13 | """The response that was incomplete.""" 14 | 15 | sequence_number: int 16 | """The sequence number of this event.""" 17 | 18 | type: Literal["response.incomplete"] 19 | """The type of the event. Always `response.incomplete`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_tool_choice.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | from .assistant_tool_choice_function import AssistantToolChoiceFunction 8 | 9 | __all__ = ["AssistantToolChoice"] 10 | 11 | 12 | class AssistantToolChoice(BaseModel): 13 | type: Literal["function", "code_interpreter", "file_search"] 14 | """The type of the tool. If type is `function`, the function name must be set""" 15 | 16 | function: Optional[AssistantToolChoiceFunction] = None 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/runs/message_creation_step_details.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ....._models import BaseModel 6 | 7 | __all__ = ["MessageCreationStepDetails", "MessageCreation"] 8 | 9 | 10 | class MessageCreation(BaseModel): 11 | message_id: str 12 | """The ID of the message that was created by this run step.""" 13 | 14 | 15 | class MessageCreationStepDetails(BaseModel): 16 | message_creation: MessageCreation 17 | 18 | type: Literal["message_creation"] 19 | """Always `message_creation`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/file_chunking_strategy.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from .._utils import PropertyInfo 7 | from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject 8 | from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject 9 | 10 | __all__ = ["FileChunkingStrategy"] 11 | 12 | FileChunkingStrategy: TypeAlias = Annotated[ 13 | Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type") 14 | ] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["RealtimeTruncationRetentionRatio"] 8 | 9 | 10 | class RealtimeTruncationRetentionRatio(BaseModel): 11 | retention_ratio: float 12 | """ 13 | Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the 14 | conversation exceeds the input token limit. 15 | """ 16 | 17 | type: Literal["retention_ratio"] 18 | """Use retention ratio truncation.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_in_progress_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .response import Response 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ResponseInProgressEvent"] 9 | 10 | 11 | class ResponseInProgressEvent(BaseModel): 12 | response: Response 13 | """The response that is in progress.""" 14 | 15 | sequence_number: int 16 | """The sequence number of this event.""" 17 | 18 | type: Literal["response.in_progress"] 19 | """The type of the event. Always `response.in_progress`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/upload_complete_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from .._types import SequenceNotStr 8 | 9 | __all__ = ["UploadCompleteParams"] 10 | 11 | 12 | class UploadCompleteParams(TypedDict, total=False): 13 | part_ids: Required[SequenceNotStr[str]] 14 | """The ordered list of Part IDs.""" 15 | 16 | md5: str 17 | """ 18 | The optional md5 checksum for the file contents to verify if the bytes uploaded 19 | matches what you expect. 20 | """ 21 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/call_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from .realtime_session_create_request_param import RealtimeSessionCreateRequestParam 8 | 9 | __all__ = ["CallCreateParams"] 10 | 11 | 12 | class CallCreateParams(TypedDict, total=False): 13 | sdp: Required[str] 14 | """WebRTC Session Description Protocol (SDP) offer generated by the caller.""" 15 | 16 | session: RealtimeSessionCreateRequestParam 17 | """Realtime session object configuration.""" 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/message_content_part_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .text_content_block_param import TextContentBlockParam 9 | from .image_url_content_block_param import ImageURLContentBlockParam 10 | from .image_file_content_block_param import ImageFileContentBlockParam 11 | 12 | __all__ = ["MessageContentPartParam"] 13 | 14 | MessageContentPartParam: TypeAlias = Union[ImageFileContentBlockParam, ImageURLContentBlockParam, TextContentBlockParam] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_tool_choice_config_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from ..responses.tool_choice_options import ToolChoiceOptions 9 | from ..responses.tool_choice_mcp_param import ToolChoiceMcpParam 10 | from ..responses.tool_choice_function_param import ToolChoiceFunctionParam 11 | 12 | __all__ = ["RealtimeToolChoiceConfigParam"] 13 | 14 | RealtimeToolChoiceConfigParam: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunctionParam, ToolChoiceMcpParam] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .permission_create_params import PermissionCreateParams as PermissionCreateParams 6 | from .permission_create_response import PermissionCreateResponse as PermissionCreateResponse 7 | from .permission_delete_response import PermissionDeleteResponse as PermissionDeleteResponse 8 | from .permission_retrieve_params import PermissionRetrieveParams as PermissionRetrieveParams 9 | from .permission_retrieve_response import PermissionRetrieveResponse as PermissionRetrieveResponse 10 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/graders/python_grader.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["PythonGrader"] 9 | 10 | 11 | class PythonGrader(BaseModel): 12 | name: str 13 | """The name of the grader.""" 14 | 15 | source: str 16 | """The source code of the python script.""" 17 | 18 | type: Literal["python"] 19 | """The object type, which is always `python`.""" 20 | 21 | image_tag: Optional[str] = None 22 | """The image tag to use for the python script.""" 23 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/tool_choice_mcp_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Optional 6 | from typing_extensions import Literal, Required, TypedDict 7 | 8 | __all__ = ["ToolChoiceMcpParam"] 9 | 10 | 11 | class ToolChoiceMcpParam(TypedDict, total=False): 12 | server_label: Required[str] 13 | """The label of the MCP server to use.""" 14 | 15 | type: Required[Literal["mcp"]] 16 | """For MCP tools, the type is always `mcp`.""" 17 | 18 | name: Optional[str] 19 | """The name of the tool to call on the server.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_response_format_option.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Literal, TypeAlias 5 | 6 | from ..shared.response_format_text import ResponseFormatText 7 | from ..shared.response_format_json_object import ResponseFormatJSONObject 8 | from ..shared.response_format_json_schema import ResponseFormatJSONSchema 9 | 10 | __all__ = ["AssistantResponseFormatOption"] 11 | 12 | AssistantResponseFormatOption: TypeAlias = Union[ 13 | Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema 14 | ] 15 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_audio_transcript_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseAudioTranscriptDeltaEvent"] 8 | 9 | 10 | class ResponseAudioTranscriptDeltaEvent(BaseModel): 11 | delta: str 12 | """The partial transcript of the audio response.""" 13 | 14 | sequence_number: int 15 | """The sequence number of this event.""" 16 | 17 | type: Literal["response.audio.transcript.delta"] 18 | """The type of the event. Always `response.audio.transcript.delta`.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/video_list_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, TypedDict 6 | 7 | __all__ = ["VideoListParams"] 8 | 9 | 10 | class VideoListParams(TypedDict, total=False): 11 | after: str 12 | """Identifier for the last item from the previous pagination request""" 13 | 14 | limit: int 15 | """Number of items to retrieve""" 16 | 17 | order: Literal["asc", "desc"] 18 | """Sort order of results by timestamp. 19 | 20 | Use `asc` for ascending order or `desc` for descending order. 21 | """ 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/message_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .message_content_delta import MessageContentDelta 8 | 9 | __all__ = ["MessageDelta"] 10 | 11 | 12 | class MessageDelta(BaseModel): 13 | content: Optional[List[MessageContentDelta]] = None 14 | """The content of the message in array of text and/or images.""" 15 | 16 | role: Optional[Literal["user", "assistant"]] = None 17 | """The entity that produced the message. One of `user` or `assistant`.""" 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ConversationItemDeleteEvent"] 9 | 10 | 11 | class ConversationItemDeleteEvent(BaseModel): 12 | item_id: str 13 | """The ID of the item to delete.""" 14 | 15 | type: Literal["conversation.item.delete"] 16 | """The event type, must be `conversation.item.delete`.""" 17 | 18 | event_id: Optional[str] = None 19 | """Optional client-generated ID used to identify this event.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam 8 | 9 | __all__ = ["AssistantToolChoiceParam"] 10 | 11 | 12 | class AssistantToolChoiceParam(TypedDict, total=False): 13 | type: Required[Literal["function", "code_interpreter", "file_search"]] 14 | """The type of the tool. If type is `function`, the function name must be set""" 15 | 16 | function: AssistantToolChoiceFunctionParam 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_delete_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ConversationItemDeleteEvent"] 9 | 10 | 11 | class ConversationItemDeleteEvent(BaseModel): 12 | item_id: str 13 | """The ID of the item to delete.""" 14 | 15 | type: Literal["conversation.item.delete"] 16 | """The event type, must be `conversation.item.delete`.""" 17 | 18 | event_id: Optional[str] = None 19 | """Optional client-generated ID used to identify this event.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/response_mcp_call_failed.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseMcpCallFailed"] 8 | 9 | 10 | class ResponseMcpCallFailed(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the MCP tool call item.""" 16 | 17 | output_index: int 18 | """The index of the output item in the response.""" 19 | 20 | type: Literal["response.mcp_call.failed"] 21 | """The event type, must be `response.mcp_call.failed`.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/graders/python_grader_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["PythonGraderParam"] 8 | 9 | 10 | class PythonGraderParam(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the grader.""" 13 | 14 | source: Required[str] 15 | """The source code of the python script.""" 16 | 17 | type: Required[Literal["python"]] 18 | """The object type, which is always `python`.""" 19 | 20 | image_tag: str 21 | """The image tag to use for the python script.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ConversationItemRetrieveEvent"] 9 | 10 | 11 | class ConversationItemRetrieveEvent(BaseModel): 12 | item_id: str 13 | """The ID of the item to retrieve.""" 14 | 15 | type: Literal["conversation.item.retrieve"] 16 | """The event type, must be `conversation.item.retrieve`.""" 17 | 18 | event_id: Optional[str] = None 19 | """Optional client-generated ID used to identify this event.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_retrieve_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ConversationItemRetrieveEvent"] 9 | 10 | 11 | class ConversationItemRetrieveEvent(BaseModel): 12 | item_id: str 13 | """The ID of the item to retrieve.""" 14 | 15 | type: Literal["conversation.item.retrieve"] 16 | """The event type, must be `conversation.item.retrieve`.""" 17 | 18 | event_id: Optional[str] = None 19 | """Optional client-generated ID used to identify this event.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionNamedToolChoiceParam", "Function"] 8 | 9 | 10 | class Function(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the function to call.""" 13 | 14 | 15 | class ChatCompletionNamedToolChoiceParam(TypedDict, total=False): 16 | function: Required[Function] 17 | 18 | type: Required[Literal["function"]] 19 | """For function calling, the type is always `function`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_delete_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["PermissionDeleteResponse"] 8 | 9 | 10 | class PermissionDeleteResponse(BaseModel): 11 | id: str 12 | """The ID of the fine-tuned model checkpoint permission that was deleted.""" 13 | 14 | deleted: bool 15 | """Whether the fine-tuned model checkpoint permission was successfully deleted.""" 16 | 17 | object: Literal["checkpoint.permission"] 18 | """The object type, which is always "checkpoint.permission".""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/message_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .message_delta import MessageDelta 7 | 8 | __all__ = ["MessageDeltaEvent"] 9 | 10 | 11 | class MessageDeltaEvent(BaseModel): 12 | id: str 13 | """The identifier of the message, which can be referenced in API endpoints.""" 14 | 15 | delta: MessageDelta 16 | """The delta containing the fields that have changed on the Message.""" 17 | 18 | object: Literal["thread.message.delta"] 19 | """The object type, which is always `thread.message.delta`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ConversationItemDeleteEventParam"] 8 | 9 | 10 | class ConversationItemDeleteEventParam(TypedDict, total=False): 11 | item_id: Required[str] 12 | """The ID of the item to delete.""" 13 | 14 | type: Required[Literal["conversation.item.delete"]] 15 | """The event type, must be `conversation.item.delete`.""" 16 | 17 | event_id: str 18 | """Optional client-generated ID used to identify this event.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/response_mcp_call_completed.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseMcpCallCompleted"] 8 | 9 | 10 | class ResponseMcpCallCompleted(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the MCP tool call item.""" 16 | 17 | output_index: int 18 | """The index of the output item in the response.""" 19 | 20 | type: Literal["response.mcp_call.completed"] 21 | """The event type, must be `response.mcp_call.completed`.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_delete_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ConversationItemDeleteEventParam"] 8 | 9 | 10 | class ConversationItemDeleteEventParam(TypedDict, total=False): 11 | item_id: Required[str] 12 | """The ID of the item to delete.""" 13 | 14 | type: Required[Literal["conversation.item.delete"]] 15 | """The event type, must be `conversation.item.delete`.""" 16 | 17 | event_id: str 18 | """Optional client-generated ID used to identify this event.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_message_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | 8 | __all__ = ["RunStepDeltaMessageDelta", "MessageCreation"] 9 | 10 | 11 | class MessageCreation(BaseModel): 12 | message_id: Optional[str] = None 13 | """The ID of the message that was created by this run step.""" 14 | 15 | 16 | class RunStepDeltaMessageDelta(BaseModel): 17 | type: Literal["message_creation"] 18 | """Always `message_creation`.""" 19 | 20 | message_creation: Optional[MessageCreation] = None 21 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_custom_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionNamedToolChoiceCustomParam", "Custom"] 8 | 9 | 10 | class Custom(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the custom tool to call.""" 13 | 14 | 15 | class ChatCompletionNamedToolChoiceCustomParam(TypedDict, total=False): 16 | custom: Required[Custom] 17 | 18 | type: Required[Literal["custom"]] 19 | """For custom tool calling, the type is always `custom`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/response_mcp_call_in_progress.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseMcpCallInProgress"] 8 | 9 | 10 | class ResponseMcpCallInProgress(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the MCP tool call item.""" 16 | 17 | output_index: int 18 | """The index of the output item in the response.""" 19 | 20 | type: Literal["response.mcp_call.in_progress"] 21 | """The event type, must be `response.mcp_call.in_progress`.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/file_path_annotation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["FilePathAnnotation", "FilePath"] 8 | 9 | 10 | class FilePath(BaseModel): 11 | file_id: str 12 | """The ID of the file that was generated.""" 13 | 14 | 15 | class FilePathAnnotation(BaseModel): 16 | end_index: int 17 | 18 | file_path: FilePath 19 | 20 | start_index: int 21 | 22 | text: str 23 | """The text in the message content that needs to be replaced.""" 24 | 25 | type: Literal["file_path"] 26 | """Always `file_path`.""" 27 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/runs/tool_call_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ....._utils import PropertyInfo 7 | from .function_tool_call_delta import FunctionToolCallDelta 8 | from .file_search_tool_call_delta import FileSearchToolCallDelta 9 | from .code_interpreter_tool_call_delta import CodeInterpreterToolCallDelta 10 | 11 | __all__ = ["ToolCallDelta"] 12 | 13 | ToolCallDelta: TypeAlias = Annotated[ 14 | Union[CodeInterpreterToolCallDelta, FileSearchToolCallDelta, FunctionToolCallDelta], 15 | PropertyInfo(discriminator="type"), 16 | ] 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ConversationItemRetrieveEventParam"] 8 | 9 | 10 | class ConversationItemRetrieveEventParam(TypedDict, total=False): 11 | item_id: Required[str] 12 | """The ID of the item to retrieve.""" 13 | 14 | type: Required[Literal["conversation.item.retrieve"]] 15 | """The event type, must be `conversation.item.retrieve`.""" 16 | 17 | event_id: str 18 | """Optional client-generated ID used to identify this event.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_input_audio.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseInputAudio", "InputAudio"] 8 | 9 | 10 | class InputAudio(BaseModel): 11 | data: str 12 | """Base64-encoded audio data.""" 13 | 14 | format: Literal["mp3", "wav"] 15 | """The format of the audio data. Currently supported formats are `mp3` and `wav`.""" 16 | 17 | 18 | class ResponseInputAudio(BaseModel): 19 | input_audio: InputAudio 20 | 21 | type: Literal["input_audio"] 22 | """The type of the input item. Always `input_audio`.""" 23 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/shared/compound_filter.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Union 4 | from typing_extensions import Literal, TypeAlias 5 | 6 | from ..._models import BaseModel 7 | from .comparison_filter import ComparisonFilter 8 | 9 | __all__ = ["CompoundFilter", "Filter"] 10 | 11 | Filter: TypeAlias = Union[ComparisonFilter, object] 12 | 13 | 14 | class CompoundFilter(BaseModel): 15 | filters: List[Filter] 16 | """Array of filters to combine. 17 | 18 | Items can be `ComparisonFilter` or `CompoundFilter`. 19 | """ 20 | 21 | type: Literal["and", "or"] 22 | """Type of operation: `and` or `or`.""" 23 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/uploads/upload_part.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["UploadPart"] 8 | 9 | 10 | class UploadPart(BaseModel): 11 | id: str 12 | """The upload Part unique identifier, which can be referenced in API endpoints.""" 13 | 14 | created_at: int 15 | """The Unix timestamp (in seconds) for when the Part was created.""" 16 | 17 | object: Literal["upload.part"] 18 | """The object type, which is always `upload.part`.""" 19 | 20 | upload_id: str 21 | """The ID of the Upload object that this Part was added to.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_retrieve_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ConversationItemRetrieveEventParam"] 8 | 9 | 10 | class ConversationItemRetrieveEventParam(TypedDict, total=False): 11 | item_id: Required[str] 12 | """The ID of the item to retrieve.""" 13 | 14 | type: Required[Literal["conversation.item.retrieve"]] 15 | """The event type, must be `conversation.item.retrieve`.""" 16 | 17 | event_id: str 18 | """Optional client-generated ID used to identify this event.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_url_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ImageURLDelta"] 9 | 10 | 11 | class ImageURLDelta(BaseModel): 12 | detail: Optional[Literal["auto", "low", "high"]] = None 13 | """Specifies the detail level of the image. 14 | 15 | `low` uses fewer tokens, you can opt in to high resolution using `high`. 16 | """ 17 | 18 | url: Optional[str] = None 19 | """ 20 | The URL of the image, must be a supported image types: jpeg, jpg, png, gif, 21 | webp. 22 | """ 23 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ....._models import BaseModel 6 | from .run_step_delta import RunStepDelta 7 | 8 | __all__ = ["RunStepDeltaEvent"] 9 | 10 | 11 | class RunStepDeltaEvent(BaseModel): 12 | id: str 13 | """The identifier of the run step, which can be referenced in API endpoints.""" 14 | 15 | delta: RunStepDelta 16 | """The delta containing the fields that have changed on the run step.""" 17 | 18 | object: Literal["thread.run.step.delta"] 19 | """The object type, which is always `thread.run.step.delta`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/runs/tool_calls_step_details.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | from typing_extensions import Literal 5 | 6 | from .tool_call import ToolCall 7 | from ....._models import BaseModel 8 | 9 | __all__ = ["ToolCallsStepDetails"] 10 | 11 | 12 | class ToolCallsStepDetails(BaseModel): 13 | tool_calls: List[ToolCall] 14 | """An array of tool calls the run step was involved in. 15 | 16 | These can be associated with one of three types of tools: `code_interpreter`, 17 | `file_search`, or `function`. 18 | """ 19 | 20 | type: Literal["tool_calls"] 21 | """Always `tool_calls`.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_session_client_secret.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from ..._models import BaseModel 4 | 5 | __all__ = ["RealtimeSessionClientSecret"] 6 | 7 | 8 | class RealtimeSessionClientSecret(BaseModel): 9 | expires_at: int 10 | """Timestamp for when the token expires. 11 | 12 | Currently, all tokens expire after one minute. 13 | """ 14 | 15 | value: str 16 | """ 17 | Ephemeral key usable in client environments to authenticate connections to the 18 | Realtime API. Use this in client-side environments rather than a standard API 19 | token, which should only be used server-side. 20 | """ 21 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["RealtimeTruncationRetentionRatioParam"] 8 | 9 | 10 | class RealtimeTruncationRetentionRatioParam(TypedDict, total=False): 11 | retention_ratio: Required[float] 12 | """ 13 | Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the 14 | conversation exceeds the input token limit. 15 | """ 16 | 17 | type: Required[Literal["retention_ratio"]] 18 | """Use retention ratio truncation.""" 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/_utils/_resources_proxy.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any 4 | from typing_extensions import override 5 | 6 | from ._proxy import LazyProxy 7 | 8 | 9 | class ResourcesProxy(LazyProxy[Any]): 10 | """A proxy for the `openai.resources` module. 11 | 12 | This is used so that we can lazily import `openai.resources` only when 13 | needed *and* so that users can just import `openai` and reference `openai.resources` 14 | """ 15 | 16 | @override 17 | def __load__(self) -> Any: 18 | import importlib 19 | 20 | mod = importlib.import_module("openai.resources") 21 | return mod 22 | 23 | 24 | resources = ResourcesProxy().__as_proxied__() 25 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_union_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .chat_completion_message_custom_tool_call_param import ChatCompletionMessageCustomToolCallParam 9 | from .chat_completion_message_function_tool_call_param import ChatCompletionMessageFunctionToolCallParam 10 | 11 | __all__ = ["ChatCompletionMessageToolCallUnionParam"] 12 | 13 | ChatCompletionMessageToolCallUnionParam: TypeAlias = Union[ 14 | ChatCompletionMessageFunctionToolCallParam, ChatCompletionMessageCustomToolCallParam 15 | ] 16 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/completions/message_list_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, TypedDict 6 | 7 | __all__ = ["MessageListParams"] 8 | 9 | 10 | class MessageListParams(TypedDict, total=False): 11 | after: str 12 | """Identifier for the last message from the previous pagination request.""" 13 | 14 | limit: int 15 | """Number of messages to retrieve.""" 16 | 17 | order: Literal["asc", "desc"] 18 | """Sort order for messages by timestamp. 19 | 20 | Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. 21 | """ 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/eval_custom_data_source_config.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Dict 4 | from typing_extensions import Literal 5 | 6 | from pydantic import Field as FieldInfo 7 | 8 | from .._models import BaseModel 9 | 10 | __all__ = ["EvalCustomDataSourceConfig"] 11 | 12 | 13 | class EvalCustomDataSourceConfig(BaseModel): 14 | schema_: Dict[str, object] = FieldInfo(alias="schema") 15 | """ 16 | The json schema for the run data source items. Learn how to build JSON schemas 17 | [here](https://json-schema.org/). 18 | """ 19 | 20 | type: Literal["custom"] 21 | """The type of data source. Always `custom`.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseMcpCallFailedEvent"] 8 | 9 | 10 | class ResponseMcpCallFailedEvent(BaseModel): 11 | item_id: str 12 | """The ID of the MCP tool call item that failed.""" 13 | 14 | output_index: int 15 | """The index of the output item that failed.""" 16 | 17 | sequence_number: int 18 | """The sequence number of this event.""" 19 | 20 | type: Literal["response.mcp_call.failed"] 21 | """The type of the event. Always 'response.mcp_call.failed'.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/static_file_chunking_strategy.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .._models import BaseModel 4 | 5 | __all__ = ["StaticFileChunkingStrategy"] 6 | 7 | 8 | class StaticFileChunkingStrategy(BaseModel): 9 | chunk_overlap_tokens: int 10 | """The number of tokens that overlap between chunks. The default value is `400`. 11 | 12 | Note that the overlap must not exceed half of `max_chunk_size_tokens`. 13 | """ 14 | 15 | max_chunk_size_tokens: int 16 | """The maximum number of tokens in each chunk. 17 | 18 | The default value is `800`. The minimum value is `100` and the maximum value is 19 | `4096`. 20 | """ 21 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_url.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ImageURL"] 9 | 10 | 11 | class ImageURL(BaseModel): 12 | url: str 13 | """ 14 | The external URL of the image, must be a supported image types: jpeg, jpg, png, 15 | gif, webp. 16 | """ 17 | 18 | detail: Optional[Literal["auto", "low", "high"]] = None 19 | """Specifies the detail level of the image. 20 | 21 | `low` uses fewer tokens, you can opt in to high resolution using `high`. Default 22 | value is `auto` 23 | """ 24 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/chat/chat_completion_function_message_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Optional 6 | from typing_extensions import Literal, Required, TypedDict 7 | 8 | __all__ = ["ChatCompletionFunctionMessageParam"] 9 | 10 | 11 | class ChatCompletionFunctionMessageParam(TypedDict, total=False): 12 | content: Required[Optional[str]] 13 | """The contents of the function message.""" 14 | 15 | name: Required[str] 16 | """The name of the function to call.""" 17 | 18 | role: Required[Literal["function"]] 19 | """The role of the messages author, in this case `function`.""" 20 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_error_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | 8 | __all__ = ["ResponseErrorEvent"] 9 | 10 | 11 | class ResponseErrorEvent(BaseModel): 12 | code: Optional[str] = None 13 | """The error code.""" 14 | 15 | message: str 16 | """The error message.""" 17 | 18 | param: Optional[str] = None 19 | """The error parameter.""" 20 | 21 | sequence_number: int 22 | """The sequence number of this event.""" 23 | 24 | type: Literal["error"] 25 | """The type of the event. Always `error`.""" 26 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/message_content_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ...._utils import PropertyInfo 7 | from .text_delta_block import TextDeltaBlock 8 | from .refusal_delta_block import RefusalDeltaBlock 9 | from .image_url_delta_block import ImageURLDeltaBlock 10 | from .image_file_delta_block import ImageFileDeltaBlock 11 | 12 | __all__ = ["MessageContentDelta"] 13 | 14 | MessageContentDelta: TypeAlias = Annotated[ 15 | Union[ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock], 16 | PropertyInfo(discriminator="type"), 17 | ] 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/computer_tool.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ComputerTool"] 8 | 9 | 10 | class ComputerTool(BaseModel): 11 | display_height: int 12 | """The height of the computer display.""" 13 | 14 | display_width: int 15 | """The width of the computer display.""" 16 | 17 | environment: Literal["windows", "mac", "linux", "ubuntu", "browser"] 18 | """The type of computer environment to control.""" 19 | 20 | type: Literal["computer_use_preview"] 21 | """The type of the computer use tool. Always `computer_use_preview`.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_input_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ..._utils import PropertyInfo 7 | from .response_input_file import ResponseInputFile 8 | from .response_input_text import ResponseInputText 9 | from .response_input_audio import ResponseInputAudio 10 | from .response_input_image import ResponseInputImage 11 | 12 | __all__ = ["ResponseInputContent"] 13 | 14 | ResponseInputContent: TypeAlias = Annotated[ 15 | Union[ResponseInputText, ResponseInputImage, ResponseInputFile, ResponseInputAudio], 16 | PropertyInfo(discriminator="type"), 17 | ] 18 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/batch_error.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["BatchError"] 8 | 9 | 10 | class BatchError(BaseModel): 11 | code: Optional[str] = None 12 | """An error code identifying the error type.""" 13 | 14 | line: Optional[int] = None 15 | """The line number of the input file where the error occurred, if applicable.""" 16 | 17 | message: Optional[str] = None 18 | """A human-readable message providing more details about the error.""" 19 | 20 | param: Optional[str] = None 21 | """The name of the parameter that caused the error, if applicable.""" 22 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/image_url_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ImageURLParam"] 8 | 9 | 10 | class ImageURLParam(TypedDict, total=False): 11 | url: Required[str] 12 | """ 13 | The external URL of the image, must be a supported image types: jpeg, jpg, png, 14 | gif, webp. 15 | """ 16 | 17 | detail: Literal["auto", "low", "high"] 18 | """Specifies the detail level of the image. 19 | 20 | `low` uses fewer tokens, you can opt in to high resolution using `high`. Default 21 | value is `auto` 22 | """ 23 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/audio/translation_verbose.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | 5 | from ..._models import BaseModel 6 | from .transcription_segment import TranscriptionSegment 7 | 8 | __all__ = ["TranslationVerbose"] 9 | 10 | 11 | class TranslationVerbose(BaseModel): 12 | duration: float 13 | """The duration of the input audio.""" 14 | 15 | language: str 16 | """The language of the output translation (always `english`).""" 17 | 18 | text: str 19 | """The translated text.""" 20 | 21 | segments: Optional[List[TranscriptionSegment]] = None 22 | """Segments of the translated text and their corresponding details.""" 23 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/assistant_response_format_option_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import Literal, TypeAlias 7 | 8 | from ..shared_params.response_format_text import ResponseFormatText 9 | from ..shared_params.response_format_json_object import ResponseFormatJSONObject 10 | from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema 11 | 12 | __all__ = ["AssistantResponseFormatOptionParam"] 13 | 14 | AssistantResponseFormatOptionParam: TypeAlias = Union[ 15 | Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema 16 | ] 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/beta/threads/message_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ...._utils import PropertyInfo 7 | from .text_content_block import TextContentBlock 8 | from .refusal_content_block import RefusalContentBlock 9 | from .image_url_content_block import ImageURLContentBlock 10 | from .image_file_content_block import ImageFileContentBlock 11 | 12 | __all__ = ["MessageContent"] 13 | 14 | 15 | MessageContent: TypeAlias = Annotated[ 16 | Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock], 17 | PropertyInfo(discriminator="type"), 18 | ] 19 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_format_text_config_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from ..shared_params.response_format_text import ResponseFormatText 9 | from ..shared_params.response_format_json_object import ResponseFormatJSONObject 10 | from .response_format_text_json_schema_config_param import ResponseFormatTextJSONSchemaConfigParam 11 | 12 | __all__ = ["ResponseFormatTextConfigParam"] 13 | 14 | ResponseFormatTextConfigParam: TypeAlias = Union[ 15 | ResponseFormatText, ResponseFormatTextJSONSchemaConfigParam, ResponseFormatJSONObject 16 | ] 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseMcpCallCompletedEvent"] 8 | 9 | 10 | class ResponseMcpCallCompletedEvent(BaseModel): 11 | item_id: str 12 | """The ID of the MCP tool call item that completed.""" 13 | 14 | output_index: int 15 | """The index of the output item that completed.""" 16 | 17 | sequence_number: int 18 | """The sequence number of this event.""" 19 | 20 | type: Literal["response.mcp_call.completed"] 21 | """The type of the event. Always 'response.mcp_call.completed'.""" 22 | -------------------------------------------------------------------------------- /tests/manual_test_async_feedback.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from portkey_ai import AsyncPortkey 3 | import os 4 | 5 | 6 | portkey = AsyncPortkey( 7 | api_key=os.environ.get("PORTKEY_API_KEY"), 8 | ) 9 | 10 | 11 | traceId = "0c41ce35-b321-4484-bead-1c21eae02996" 12 | 13 | 14 | async def main(): 15 | print("Step: Create Feedback") 16 | result = await portkey.feedback.create( 17 | trace_id=traceId, 18 | value="1", 19 | ) 20 | print(result) 21 | 22 | update_feedback_id = result.feedback_ids[0] 23 | 24 | print("Step: Update Feedback") 25 | result = await portkey.feedback.update( 26 | feedback_id=update_feedback_id, 27 | value="7", 28 | ) 29 | print(result) 30 | 31 | 32 | asyncio.run(main()) 33 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/moderation_image_url_input_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ModerationImageURLInputParam", "ImageURL"] 8 | 9 | 10 | class ImageURL(TypedDict, total=False): 11 | url: Required[str] 12 | """Either a URL of the image or the base64 encoded image data.""" 13 | 14 | 15 | class ModerationImageURLInputParam(TypedDict, total=False): 16 | image_url: Required[ImageURL] 17 | """Contains either an image URL or a data URL for a base64 encoded image.""" 18 | 19 | type: Required[Literal["image_url"]] 20 | """Always `image_url`.""" 21 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_function_call_output_item.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ..._utils import PropertyInfo 7 | from .response_input_file_content import ResponseInputFileContent 8 | from .response_input_text_content import ResponseInputTextContent 9 | from .response_input_image_content import ResponseInputImageContent 10 | 11 | __all__ = ["ResponseFunctionCallOutputItem"] 12 | 13 | ResponseFunctionCallOutputItem: TypeAlias = Annotated[ 14 | Union[ResponseInputTextContent, ResponseInputImageContent, ResponseInputFileContent], 15 | PropertyInfo(discriminator="type"), 16 | ] 17 | -------------------------------------------------------------------------------- /portkey_ai/_vendor/openai/types/responses/response_input_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .response_input_file_param import ResponseInputFileParam 9 | from .response_input_text_param import ResponseInputTextParam 10 | from .response_input_audio_param import ResponseInputAudioParam 11 | from .response_input_image_param import ResponseInputImageParam 12 | 13 | __all__ = ["ResponseInputContentParam"] 14 | 15 | ResponseInputContentParam: TypeAlias = Union[ 16 | ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam, ResponseInputAudioParam 17 | ] 18 | --------------------------------------------------------------------------------