├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ └── feature_request.yml ├── pull_request_template.md └── workflows │ ├── ci.yml │ ├── create-releases.yml │ ├── publish-pypi.yml │ └── release-doctor.yml ├── .gitignore ├── .inline-snapshot └── external │ ├── .gitignore │ ├── 173417d553406f034f643e5db3f8d591fb691ebac56f5ae39a22cc7d455c5353.bin │ ├── 2018feb66ae13fcf5333d61b95849decc68d3f63bd38172889367e1afb1e04f7.bin │ ├── 4cc50a6135d254573a502310e6af1246f55edb6ad95fa24059f160996b68866d.bin │ ├── 569c877e69429d4cbc1577d2cd6dd33878095c68badc6b6654a69769b391a1c1.bin │ ├── 7e5ea4d12e7cc064399b6631415e65923f182256b6e6b752950a3aaa2ad2320a.bin │ ├── 83b060bae42eb41c4f1edbb7c1542b954b37d9dfd1910b964ddebc9677e6ae85.bin │ ├── a247c49c5fcd492bfb7a02a3306ad615ed8d8f649888ebfddfbc3ee151f44d46.bin │ ├── a491adda08c3d4fde95f5b2ee3f60f7f745f1a56d82e62f58031cc2add502380.bin │ ├── c6aa7e397b7123c3501f25df3a05d4daf7e8ad6d61ffa406ab5361fe36a8d5b1.bin │ ├── d615580118391ee13492193e3a8bb74642d23ac1ca13fe37cb6e889b66f759f6.bin │ ├── e2aad469b71d1d4894ff833ea147020a9d875eb7ce644a0ff355581690a4cbfd.bin │ └── f82268f2fefd5cfbc7eeb59c297688be2f6ca0849a6e4f17851b517310841d9b.bin ├── .python-version ├── .release-please-manifest.json ├── .stats.yml ├── Brewfile ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── api.md ├── bin ├── check-release-environment └── publish-pypi ├── examples ├── .keep ├── assistant.py ├── assistant_stream.py ├── assistant_stream_helpers.py ├── async_demo.py ├── audio.py ├── azure.py ├── azure_ad.py ├── demo.py ├── generate_file.sh ├── module_client.py ├── parsing.py ├── parsing_stream.py ├── parsing_tools.py ├── parsing_tools_stream.py ├── picture.py ├── realtime │ ├── audio_util.py │ ├── azure_realtime.py │ └── push_to_talk_app.py ├── streaming.py └── uploads.py ├── helpers.md ├── mypy.ini ├── noxfile.py ├── pyproject.toml ├── release-please-config.json ├── requirements-dev.lock ├── requirements.lock ├── scripts ├── bootstrap ├── format ├── lint ├── mock ├── test └── utils │ └── ruffen-docs.py ├── src └── openai │ ├── __init__.py │ ├── __main__.py │ ├── _base_client.py │ ├── _client.py │ ├── _compat.py │ ├── _constants.py │ ├── _exceptions.py │ ├── _extras │ ├── __init__.py │ ├── _common.py │ ├── numpy_proxy.py │ └── pandas_proxy.py │ ├── _files.py │ ├── _legacy_response.py │ ├── _models.py │ ├── _module_client.py │ ├── _qs.py │ ├── _resource.py │ ├── _response.py │ ├── _streaming.py │ ├── _types.py │ ├── _utils │ ├── __init__.py │ ├── _logs.py │ ├── _proxy.py │ ├── _reflection.py │ ├── _streams.py │ ├── _sync.py │ ├── _transform.py │ ├── _typing.py │ └── _utils.py │ ├── _version.py │ ├── cli │ ├── __init__.py │ ├── _api │ │ ├── __init__.py │ │ ├── _main.py │ │ ├── audio.py │ │ ├── chat │ │ │ ├── __init__.py │ │ │ └── completions.py │ │ ├── completions.py │ │ ├── files.py │ │ ├── image.py │ │ └── models.py │ ├── _cli.py │ ├── _errors.py │ ├── _models.py │ ├── _progress.py │ ├── _tools │ │ ├── __init__.py │ │ ├── _main.py │ │ ├── fine_tunes.py │ │ └── migrate.py │ └── _utils.py │ ├── lib │ ├── .keep │ ├── __init__.py │ ├── _old_api.py │ ├── _parsing │ │ ├── __init__.py │ │ └── _completions.py │ ├── _pydantic.py │ ├── _tools.py │ ├── _validators.py │ ├── azure.py │ └── streaming │ │ ├── __init__.py │ │ ├── _assistants.py │ │ ├── _deltas.py │ │ └── chat │ │ ├── __init__.py │ │ ├── _completions.py │ │ ├── _events.py │ │ └── _types.py │ ├── pagination.py │ ├── py.typed │ ├── resources │ ├── __init__.py │ ├── audio │ │ ├── __init__.py │ │ ├── audio.py │ │ ├── speech.py │ │ ├── transcriptions.py │ │ └── translations.py │ ├── batches.py │ ├── beta │ │ ├── __init__.py │ │ ├── assistants.py │ │ ├── beta.py │ │ ├── chat │ │ │ ├── __init__.py │ │ │ ├── chat.py │ │ │ └── completions.py │ │ ├── realtime │ │ │ ├── __init__.py │ │ │ ├── realtime.py │ │ │ └── sessions.py │ │ ├── threads │ │ │ ├── __init__.py │ │ │ ├── messages.py │ │ │ ├── runs │ │ │ │ ├── __init__.py │ │ │ │ ├── runs.py │ │ │ │ └── steps.py │ │ │ └── threads.py │ │ └── vector_stores │ │ │ ├── __init__.py │ │ │ ├── file_batches.py │ │ │ ├── files.py │ │ │ └── vector_stores.py │ ├── chat │ │ ├── __init__.py │ │ ├── chat.py │ │ └── completions.py │ ├── completions.py │ ├── embeddings.py │ ├── files.py │ ├── fine_tuning │ │ ├── __init__.py │ │ ├── fine_tuning.py │ │ └── jobs │ │ │ ├── __init__.py │ │ │ ├── checkpoints.py │ │ │ └── jobs.py │ ├── images.py │ ├── models.py │ ├── moderations.py │ └── uploads │ │ ├── __init__.py │ │ ├── parts.py │ │ └── uploads.py │ ├── types │ ├── __init__.py │ ├── audio │ │ ├── __init__.py │ │ ├── speech_create_params.py │ │ ├── speech_model.py │ │ ├── transcription.py │ │ ├── transcription_create_params.py │ │ ├── transcription_create_response.py │ │ ├── transcription_segment.py │ │ ├── transcription_verbose.py │ │ ├── transcription_word.py │ │ ├── translation.py │ │ ├── translation_create_params.py │ │ ├── translation_create_response.py │ │ └── translation_verbose.py │ ├── audio_model.py │ ├── audio_response_format.py │ ├── batch.py │ ├── batch_create_params.py │ ├── batch_error.py │ ├── batch_list_params.py │ ├── batch_request_counts.py │ ├── beta │ │ ├── __init__.py │ │ ├── assistant.py │ │ ├── assistant_create_params.py │ │ ├── assistant_deleted.py │ │ ├── assistant_list_params.py │ │ ├── assistant_response_format_option.py │ │ ├── assistant_response_format_option_param.py │ │ ├── assistant_stream_event.py │ │ ├── assistant_tool.py │ │ ├── assistant_tool_choice.py │ │ ├── assistant_tool_choice_function.py │ │ ├── assistant_tool_choice_function_param.py │ │ ├── assistant_tool_choice_option.py │ │ ├── assistant_tool_choice_option_param.py │ │ ├── assistant_tool_choice_param.py │ │ ├── assistant_tool_param.py │ │ ├── assistant_update_params.py │ │ ├── auto_file_chunking_strategy_param.py │ │ ├── chat │ │ │ └── __init__.py │ │ ├── code_interpreter_tool.py │ │ ├── code_interpreter_tool_param.py │ │ ├── file_chunking_strategy.py │ │ ├── file_chunking_strategy_param.py │ │ ├── file_search_tool.py │ │ ├── file_search_tool_param.py │ │ ├── function_tool.py │ │ ├── function_tool_param.py │ │ ├── other_file_chunking_strategy_object.py │ │ ├── realtime │ │ │ ├── __init__.py │ │ │ ├── conversation_created_event.py │ │ │ ├── conversation_item.py │ │ │ ├── conversation_item_content.py │ │ │ ├── conversation_item_content_param.py │ │ │ ├── conversation_item_create_event.py │ │ │ ├── conversation_item_create_event_param.py │ │ │ ├── conversation_item_created_event.py │ │ │ ├── conversation_item_delete_event.py │ │ │ ├── conversation_item_delete_event_param.py │ │ │ ├── conversation_item_deleted_event.py │ │ │ ├── conversation_item_input_audio_transcription_completed_event.py │ │ │ ├── conversation_item_input_audio_transcription_failed_event.py │ │ │ ├── conversation_item_param.py │ │ │ ├── conversation_item_truncate_event.py │ │ │ ├── conversation_item_truncate_event_param.py │ │ │ ├── conversation_item_truncated_event.py │ │ │ ├── error_event.py │ │ │ ├── input_audio_buffer_append_event.py │ │ │ ├── input_audio_buffer_append_event_param.py │ │ │ ├── input_audio_buffer_clear_event.py │ │ │ ├── input_audio_buffer_clear_event_param.py │ │ │ ├── input_audio_buffer_cleared_event.py │ │ │ ├── input_audio_buffer_commit_event.py │ │ │ ├── input_audio_buffer_commit_event_param.py │ │ │ ├── input_audio_buffer_committed_event.py │ │ │ ├── input_audio_buffer_speech_started_event.py │ │ │ ├── input_audio_buffer_speech_stopped_event.py │ │ │ ├── rate_limits_updated_event.py │ │ │ ├── realtime_client_event.py │ │ │ ├── realtime_client_event_param.py │ │ │ ├── realtime_connect_params.py │ │ │ ├── realtime_response.py │ │ │ ├── realtime_response_status.py │ │ │ ├── realtime_response_usage.py │ │ │ ├── realtime_server_event.py │ │ │ ├── response_audio_delta_event.py │ │ │ ├── response_audio_done_event.py │ │ │ ├── response_audio_transcript_delta_event.py │ │ │ ├── response_audio_transcript_done_event.py │ │ │ ├── response_cancel_event.py │ │ │ ├── response_cancel_event_param.py │ │ │ ├── response_content_part_added_event.py │ │ │ ├── response_content_part_done_event.py │ │ │ ├── response_create_event.py │ │ │ ├── response_create_event_param.py │ │ │ ├── response_created_event.py │ │ │ ├── response_done_event.py │ │ │ ├── response_function_call_arguments_delta_event.py │ │ │ ├── response_function_call_arguments_done_event.py │ │ │ ├── response_output_item_added_event.py │ │ │ ├── response_output_item_done_event.py │ │ │ ├── response_text_delta_event.py │ │ │ ├── response_text_done_event.py │ │ │ ├── session.py │ │ │ ├── session_create_params.py │ │ │ ├── session_create_response.py │ │ │ ├── session_created_event.py │ │ │ ├── session_update_event.py │ │ │ ├── session_update_event_param.py │ │ │ └── session_updated_event.py │ │ ├── static_file_chunking_strategy.py │ │ ├── static_file_chunking_strategy_object.py │ │ ├── static_file_chunking_strategy_object_param.py │ │ ├── static_file_chunking_strategy_param.py │ │ ├── thread.py │ │ ├── thread_create_and_run_params.py │ │ ├── thread_create_params.py │ │ ├── thread_deleted.py │ │ ├── thread_update_params.py │ │ ├── threads │ │ │ ├── __init__.py │ │ │ ├── annotation.py │ │ │ ├── annotation_delta.py │ │ │ ├── file_citation_annotation.py │ │ │ ├── file_citation_delta_annotation.py │ │ │ ├── file_path_annotation.py │ │ │ ├── file_path_delta_annotation.py │ │ │ ├── image_file.py │ │ │ ├── image_file_content_block.py │ │ │ ├── image_file_content_block_param.py │ │ │ ├── image_file_delta.py │ │ │ ├── image_file_delta_block.py │ │ │ ├── image_file_param.py │ │ │ ├── image_url.py │ │ │ ├── image_url_content_block.py │ │ │ ├── image_url_content_block_param.py │ │ │ ├── image_url_delta.py │ │ │ ├── image_url_delta_block.py │ │ │ ├── image_url_param.py │ │ │ ├── message.py │ │ │ ├── message_content.py │ │ │ ├── message_content_delta.py │ │ │ ├── message_content_part_param.py │ │ │ ├── message_create_params.py │ │ │ ├── message_deleted.py │ │ │ ├── message_delta.py │ │ │ ├── message_delta_event.py │ │ │ ├── message_list_params.py │ │ │ ├── message_update_params.py │ │ │ ├── refusal_content_block.py │ │ │ ├── refusal_delta_block.py │ │ │ ├── required_action_function_tool_call.py │ │ │ ├── run.py │ │ │ ├── run_create_params.py │ │ │ ├── run_list_params.py │ │ │ ├── run_status.py │ │ │ ├── run_submit_tool_outputs_params.py │ │ │ ├── run_update_params.py │ │ │ ├── runs │ │ │ │ ├── __init__.py │ │ │ │ ├── code_interpreter_logs.py │ │ │ │ ├── code_interpreter_output_image.py │ │ │ │ ├── code_interpreter_tool_call.py │ │ │ │ ├── code_interpreter_tool_call_delta.py │ │ │ │ ├── file_search_tool_call.py │ │ │ │ ├── file_search_tool_call_delta.py │ │ │ │ ├── function_tool_call.py │ │ │ │ ├── function_tool_call_delta.py │ │ │ │ ├── message_creation_step_details.py │ │ │ │ ├── run_step.py │ │ │ │ ├── run_step_delta.py │ │ │ │ ├── run_step_delta_event.py │ │ │ │ ├── run_step_delta_message_delta.py │ │ │ │ ├── run_step_include.py │ │ │ │ ├── step_list_params.py │ │ │ │ ├── step_retrieve_params.py │ │ │ │ ├── tool_call.py │ │ │ │ ├── tool_call_delta.py │ │ │ │ ├── tool_call_delta_object.py │ │ │ │ └── tool_calls_step_details.py │ │ │ ├── text.py │ │ │ ├── text_content_block.py │ │ │ ├── text_content_block_param.py │ │ │ ├── text_delta.py │ │ │ └── text_delta_block.py │ │ ├── vector_store.py │ │ ├── vector_store_create_params.py │ │ ├── vector_store_deleted.py │ │ ├── vector_store_list_params.py │ │ ├── vector_store_update_params.py │ │ └── vector_stores │ │ │ ├── __init__.py │ │ │ ├── file_batch_create_params.py │ │ │ ├── file_batch_list_files_params.py │ │ │ ├── file_create_params.py │ │ │ ├── file_list_params.py │ │ │ ├── vector_store_file.py │ │ │ ├── vector_store_file_batch.py │ │ │ └── vector_store_file_deleted.py │ ├── chat │ │ ├── __init__.py │ │ ├── chat_completion.py │ │ ├── chat_completion_assistant_message_param.py │ │ ├── chat_completion_audio.py │ │ ├── chat_completion_audio_param.py │ │ ├── chat_completion_chunk.py │ │ ├── chat_completion_content_part_image_param.py │ │ ├── chat_completion_content_part_input_audio_param.py │ │ ├── chat_completion_content_part_param.py │ │ ├── chat_completion_content_part_refusal_param.py │ │ ├── chat_completion_content_part_text_param.py │ │ ├── chat_completion_developer_message_param.py │ │ ├── chat_completion_function_call_option_param.py │ │ ├── chat_completion_function_message_param.py │ │ ├── chat_completion_message.py │ │ ├── chat_completion_message_param.py │ │ ├── chat_completion_message_tool_call.py │ │ ├── chat_completion_message_tool_call_param.py │ │ ├── chat_completion_modality.py │ │ ├── chat_completion_named_tool_choice_param.py │ │ ├── chat_completion_prediction_content_param.py │ │ ├── chat_completion_reasoning_effort.py │ │ ├── chat_completion_role.py │ │ ├── chat_completion_stream_options_param.py │ │ ├── chat_completion_system_message_param.py │ │ ├── chat_completion_token_logprob.py │ │ ├── chat_completion_tool_choice_option_param.py │ │ ├── chat_completion_tool_message_param.py │ │ ├── chat_completion_tool_param.py │ │ ├── chat_completion_user_message_param.py │ │ ├── completion_create_params.py │ │ ├── parsed_chat_completion.py │ │ └── parsed_function_tool_call.py │ ├── chat_model.py │ ├── completion.py │ ├── completion_choice.py │ ├── completion_create_params.py │ ├── completion_usage.py │ ├── create_embedding_response.py │ ├── embedding.py │ ├── embedding_create_params.py │ ├── embedding_model.py │ ├── file_content.py │ ├── file_create_params.py │ ├── file_deleted.py │ ├── file_list_params.py │ ├── file_object.py │ ├── file_purpose.py │ ├── fine_tuning │ │ ├── __init__.py │ │ ├── fine_tuning_job.py │ │ ├── fine_tuning_job_event.py │ │ ├── fine_tuning_job_integration.py │ │ ├── fine_tuning_job_wandb_integration.py │ │ ├── fine_tuning_job_wandb_integration_object.py │ │ ├── job_create_params.py │ │ ├── job_list_events_params.py │ │ ├── job_list_params.py │ │ └── jobs │ │ │ ├── __init__.py │ │ │ ├── checkpoint_list_params.py │ │ │ └── fine_tuning_job_checkpoint.py │ ├── image.py │ ├── image_create_variation_params.py │ ├── image_edit_params.py │ ├── image_generate_params.py │ ├── image_model.py │ ├── images_response.py │ ├── model.py │ ├── model_deleted.py │ ├── moderation.py │ ├── moderation_create_params.py │ ├── moderation_create_response.py │ ├── moderation_image_url_input_param.py │ ├── moderation_model.py │ ├── moderation_multi_modal_input_param.py │ ├── moderation_text_input_param.py │ ├── shared │ │ ├── __init__.py │ │ ├── error_object.py │ │ ├── function_definition.py │ │ ├── function_parameters.py │ │ ├── metadata.py │ │ ├── response_format_json_object.py │ │ ├── response_format_json_schema.py │ │ └── response_format_text.py │ ├── shared_params │ │ ├── __init__.py │ │ ├── function_definition.py │ │ ├── function_parameters.py │ │ ├── metadata.py │ │ ├── response_format_json_object.py │ │ ├── response_format_json_schema.py │ │ └── response_format_text.py │ ├── upload.py │ ├── upload_complete_params.py │ ├── upload_create_params.py │ ├── uploads │ │ ├── __init__.py │ │ ├── part_create_params.py │ │ └── upload_part.py │ └── websocket_connection_options.py │ └── version.py └── tests ├── __init__.py ├── api_resources ├── __init__.py ├── audio │ ├── __init__.py │ ├── test_speech.py │ ├── test_transcriptions.py │ └── test_translations.py ├── beta │ ├── __init__.py │ ├── realtime │ │ ├── __init__.py │ │ └── test_sessions.py │ ├── test_assistants.py │ ├── test_realtime.py │ ├── test_threads.py │ ├── test_vector_stores.py │ ├── threads │ │ ├── __init__.py │ │ ├── runs │ │ │ ├── __init__.py │ │ │ └── test_steps.py │ │ ├── test_messages.py │ │ └── test_runs.py │ └── vector_stores │ │ ├── __init__.py │ │ ├── test_file_batches.py │ │ └── test_files.py ├── chat │ ├── __init__.py │ └── test_completions.py ├── fine_tuning │ ├── __init__.py │ ├── jobs │ │ ├── __init__.py │ │ └── test_checkpoints.py │ └── test_jobs.py ├── test_batches.py ├── test_completions.py ├── test_embeddings.py ├── test_files.py ├── test_images.py ├── test_models.py ├── test_moderations.py ├── test_uploads.py └── uploads │ ├── __init__.py │ └── test_parts.py ├── conftest.py ├── lib ├── __init__.py ├── chat │ ├── __init__.py │ ├── _utils.py │ ├── test_completions.py │ └── test_completions_streaming.py ├── schema_types │ └── query.py ├── test_assistants.py ├── test_audio.py ├── test_azure.py ├── test_old_api.py └── test_pydantic.py ├── sample_file.txt ├── test_client.py ├── test_deepcopy.py ├── test_extract_files.py ├── test_files.py ├── test_legacy_response.py ├── test_models.py ├── test_module_client.py ├── test_qs.py ├── test_required_args.py ├── test_response.py ├── test_streaming.py ├── test_transform.py ├── test_utils ├── test_logging.py ├── test_proxy.py └── test_typing.py └── utils.py /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG VARIANT="3.9" 2 | FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} 3 | 4 | USER vscode 5 | 6 | RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.35.0" RYE_INSTALL_OPTION="--yes" bash 7 | ENV PATH=/home/vscode/.rye/shims:$PATH 8 | 9 | RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc 10 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/debian 3 | { 4 | "name": "Debian", 5 | "build": { 6 | "dockerfile": "Dockerfile", 7 | "context": ".." 8 | }, 9 | 10 | "postStartCommand": "rye sync --all-features", 11 | 12 | "customizations": { 13 | "vscode": { 14 | "extensions": [ 15 | "ms-python.python" 16 | ], 17 | "settings": { 18 | "terminal.integrated.shell.linux": "/bin/bash", 19 | "python.pythonPath": ".venv/bin/python", 20 | "python.defaultInterpreterPath": ".venv/bin/python", 21 | "python.typeChecking": "basic", 22 | "terminal.integrated.env.linux": { 23 | "PATH": "/home/vscode/.rye/shims:${env:PATH}" 24 | } 25 | } 26 | } 27 | } 28 | 29 | // Features to add to the dev container. More info: https://containers.dev/features. 30 | // "features": {}, 31 | 32 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 33 | // "forwardPorts": [], 34 | 35 | // Configure tool-specific properties. 36 | // "customizations": {}, 37 | 38 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. 39 | // "remoteUser": "root" 40 | } 41 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # This file is used to automatically assign reviewers to PRs 2 | # For more information see: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners 3 | 4 | * @openai/sdks-team 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: OpenAI support 4 | url: https://help.openai.com/ 5 | about: | 6 | Please only file issues here that you believe represent actual bugs or feature requests for the OpenAI Python library. 7 | If you're having general trouble with the OpenAI API, please visit our help center to get support. 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea for this library 3 | labels: ['feature-request'] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this feature request! 9 | - type: checkboxes 10 | id: non_api 11 | attributes: 12 | label: Confirm this is a feature request for the Python library and not the underlying OpenAI API. 13 | description: Feature requests for the underlying OpenAI API should be reported on our [Developer Community](https://community.openai.com/c/api/7) 14 | options: 15 | - label: This is a feature request for the Python library 16 | required: true 17 | - type: textarea 18 | id: feature 19 | attributes: 20 | label: Describe the feature or improvement you're requesting 21 | description: A clear and concise description of what you want to happen. 22 | validations: 23 | required: true 24 | - type: textarea 25 | id: context 26 | attributes: 27 | label: Additional context 28 | description: Add any other context about the feature request here. 29 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | - [ ] I understand that this repository is auto-generated and my pull request may not be merged 7 | 8 | ## Changes being requested 9 | 10 | ## Additional context & links 11 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - main 9 | 10 | jobs: 11 | lint: 12 | name: lint 13 | runs-on: ubuntu-latest 14 | if: github.repository == 'openai/openai-python' 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Install Rye 20 | run: | 21 | curl -sSf https://rye.astral.sh/get | bash 22 | echo "$HOME/.rye/shims" >> $GITHUB_PATH 23 | env: 24 | RYE_VERSION: '0.35.0' 25 | RYE_INSTALL_OPTION: '--yes' 26 | 27 | - name: Install dependencies 28 | run: rye sync --all-features 29 | 30 | - name: Run lints 31 | run: ./scripts/lint 32 | 33 | test: 34 | name: test 35 | runs-on: ubuntu-latest 36 | if: github.repository == 'openai/openai-python' 37 | 38 | steps: 39 | - uses: actions/checkout@v4 40 | 41 | - name: Install Rye 42 | run: | 43 | curl -sSf https://rye.astral.sh/get | bash 44 | echo "$HOME/.rye/shims" >> $GITHUB_PATH 45 | env: 46 | RYE_VERSION: '0.35.0' 47 | RYE_INSTALL_OPTION: '--yes' 48 | 49 | - name: Bootstrap 50 | run: ./scripts/bootstrap 51 | 52 | - name: Run tests 53 | run: ./scripts/test 54 | -------------------------------------------------------------------------------- /.github/workflows/create-releases.yml: -------------------------------------------------------------------------------- 1 | name: Create releases 2 | on: 3 | schedule: 4 | - cron: '0 5 * * *' # every day at 5am UTC 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | release: 11 | name: release 12 | if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-python' 13 | runs-on: ubuntu-latest 14 | environment: publish 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - uses: stainless-api/trigger-release-please@v1 20 | id: release 21 | with: 22 | repo: ${{ github.event.repository.full_name }} 23 | stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} 24 | 25 | - name: Install Rye 26 | if: ${{ steps.release.outputs.releases_created }} 27 | run: | 28 | curl -sSf https://rye.astral.sh/get | bash 29 | echo "$HOME/.rye/shims" >> $GITHUB_PATH 30 | env: 31 | RYE_VERSION: '0.35.0' 32 | RYE_INSTALL_OPTION: '--yes' 33 | 34 | - name: Publish to PyPI 35 | if: ${{ steps.release.outputs.releases_created }} 36 | run: | 37 | bash ./bin/publish-pypi 38 | env: 39 | PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} 40 | -------------------------------------------------------------------------------- /.github/workflows/publish-pypi.yml: -------------------------------------------------------------------------------- 1 | # workflow for re-running publishing to PyPI in case it fails for some reason 2 | # you can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml 3 | name: Publish PyPI 4 | on: 5 | workflow_dispatch: 6 | 7 | jobs: 8 | publish: 9 | name: publish 10 | runs-on: ubuntu-latest 11 | environment: publish 12 | 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Install Rye 17 | run: | 18 | curl -sSf https://rye.astral.sh/get | bash 19 | echo "$HOME/.rye/shims" >> $GITHUB_PATH 20 | env: 21 | RYE_VERSION: '0.35.0' 22 | RYE_INSTALL_OPTION: '--yes' 23 | 24 | - name: Publish to PyPI 25 | run: | 26 | bash ./bin/publish-pypi 27 | env: 28 | PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} 29 | -------------------------------------------------------------------------------- /.github/workflows/release-doctor.yml: -------------------------------------------------------------------------------- 1 | name: Release Doctor 2 | on: 3 | push: 4 | branches: 5 | - main 6 | workflow_dispatch: 7 | 8 | jobs: 9 | release_doctor: 10 | name: release doctor 11 | runs-on: ubuntu-latest 12 | environment: publish 13 | if: github.repository == 'openai/openai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - name: Check release environment 19 | run: | 20 | bash ./bin/check-release-environment 21 | env: 22 | STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} 23 | PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .prism.log 2 | .vscode 3 | _dev 4 | 5 | __pycache__ 6 | .mypy_cache 7 | 8 | dist 9 | 10 | .venv 11 | .idea 12 | 13 | .env 14 | .envrc 15 | codegen.log 16 | Brewfile.lock.json 17 | -------------------------------------------------------------------------------- /.inline-snapshot/external/.gitignore: -------------------------------------------------------------------------------- 1 | # ignore all snapshots which are not refered in the source 2 | *-new.* 3 | -------------------------------------------------------------------------------- /.inline-snapshot/external/4cc50a6135d254573a502310e6af1246f55edb6ad95fa24059f160996b68866d.bin: -------------------------------------------------------------------------------- 1 | data: {"id":"chatcmpl-ABfw3Oqj8RD0z6aJiiX37oTjV2HFh","object":"chat.completion.chunk","created":1727346171,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_7568d46099","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]} 2 | 3 | data: {"id":"chatcmpl-ABfw3Oqj8RD0z6aJiiX37oTjV2HFh","object":"chat.completion.chunk","created":1727346171,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_7568d46099","choices":[{"index":0,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]} 4 | 5 | data: {"id":"chatcmpl-ABfw3Oqj8RD0z6aJiiX37oTjV2HFh","object":"chat.completion.chunk","created":1727346171,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_7568d46099","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"length"}]} 6 | 7 | data: {"id":"chatcmpl-ABfw3Oqj8RD0z6aJiiX37oTjV2HFh","object":"chat.completion.chunk","created":1727346171,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_7568d46099","choices":[],"usage":{"prompt_tokens":79,"completion_tokens":1,"total_tokens":80,"completion_tokens_details":{"reasoning_tokens":0}}} 8 | 9 | data: [DONE] 10 | 11 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.9.18 2 | -------------------------------------------------------------------------------- /.release-please-manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | ".": "1.61.0" 3 | } -------------------------------------------------------------------------------- /.stats.yml: -------------------------------------------------------------------------------- 1 | configured_endpoints: 69 2 | openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml 3 | -------------------------------------------------------------------------------- /Brewfile: -------------------------------------------------------------------------------- 1 | brew "rye" 2 | 3 | -------------------------------------------------------------------------------- /bin/check-release-environment: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | errors=() 4 | 5 | if [ -z "${STAINLESS_API_KEY}" ]; then 6 | errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") 7 | fi 8 | 9 | if [ -z "${PYPI_TOKEN}" ]; then 10 | errors+=("The OPENAI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") 11 | fi 12 | 13 | lenErrors=${#errors[@]} 14 | 15 | if [[ lenErrors -gt 0 ]]; then 16 | echo -e "Found the following errors in the release environment:\n" 17 | 18 | for error in "${errors[@]}"; do 19 | echo -e "- $error\n" 20 | done 21 | 22 | exit 1 23 | fi 24 | 25 | echo "The environment is ready to push releases!" 26 | -------------------------------------------------------------------------------- /bin/publish-pypi: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eux 4 | mkdir -p dist 5 | rye build --clean 6 | # Patching importlib-metadata version until upstream library version is updated 7 | # https://github.com/pypa/twine/issues/977#issuecomment-2189800841 8 | "$HOME/.rye/self/bin/python3" -m pip install 'importlib-metadata==7.2.1' 9 | rye publish --yes --token=$PYPI_TOKEN 10 | -------------------------------------------------------------------------------- /examples/.keep: -------------------------------------------------------------------------------- 1 | File generated from our OpenAPI spec by Stainless. 2 | 3 | This directory can be used to store example files demonstrating usage of this SDK. 4 | It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. -------------------------------------------------------------------------------- /examples/assistant.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | # gets API Key from environment variable OPENAI_API_KEY 4 | client = openai.OpenAI() 5 | 6 | assistant = client.beta.assistants.create( 7 | name="Math Tutor", 8 | instructions="You are a personal math tutor. Write and run code to answer math questions.", 9 | tools=[{"type": "code_interpreter"}], 10 | model="gpt-4-1106-preview", 11 | ) 12 | 13 | thread = client.beta.threads.create() 14 | 15 | message = client.beta.threads.messages.create( 16 | thread_id=thread.id, 17 | role="user", 18 | content="I need to solve the equation `3x + 11 = 14`. Can you help me?", 19 | ) 20 | 21 | run = client.beta.threads.runs.create_and_poll( 22 | thread_id=thread.id, 23 | assistant_id=assistant.id, 24 | instructions="Please address the user as Jane Doe. The user has a premium account.", 25 | ) 26 | 27 | print("Run completed with status: " + run.status) 28 | 29 | if run.status == "completed": 30 | messages = client.beta.threads.messages.list(thread_id=thread.id) 31 | 32 | print("messages: ") 33 | for message in messages: 34 | assert message.content[0].type == "text" 35 | print({"role": message.role, "message": message.content[0].text.value}) 36 | 37 | client.beta.assistants.delete(assistant.id) 38 | -------------------------------------------------------------------------------- /examples/assistant_stream.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | # gets API Key from environment variable OPENAI_API_KEY 4 | client = openai.OpenAI() 5 | 6 | assistant = client.beta.assistants.create( 7 | name="Math Tutor", 8 | instructions="You are a personal math tutor. Write and run code to answer math questions.", 9 | tools=[{"type": "code_interpreter"}], 10 | model="gpt-4-1106-preview", 11 | ) 12 | 13 | thread = client.beta.threads.create() 14 | 15 | message = client.beta.threads.messages.create( 16 | thread_id=thread.id, 17 | role="user", 18 | content="I need to solve the equation `3x + 11 = 14`. Can you help me?", 19 | ) 20 | 21 | print("starting run stream") 22 | 23 | stream = client.beta.threads.runs.create( 24 | thread_id=thread.id, 25 | assistant_id=assistant.id, 26 | instructions="Please address the user as Jane Doe. The user has a premium account.", 27 | stream=True, 28 | ) 29 | 30 | for event in stream: 31 | print(event.model_dump_json(indent=2, exclude_unset=True)) 32 | 33 | client.beta.assistants.delete(assistant.id) 34 | -------------------------------------------------------------------------------- /examples/async_demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env -S poetry run python 2 | 3 | import asyncio 4 | 5 | from openai import AsyncOpenAI 6 | 7 | # gets API Key from environment variable OPENAI_API_KEY 8 | client = AsyncOpenAI() 9 | 10 | 11 | async def main() -> None: 12 | stream = await client.completions.create( 13 | model="gpt-3.5-turbo-instruct", 14 | prompt="Say this is a test", 15 | stream=True, 16 | ) 17 | async for completion in stream: 18 | print(completion.choices[0].text, end="") 19 | print() 20 | 21 | 22 | asyncio.run(main()) 23 | -------------------------------------------------------------------------------- /examples/generate_file.sh: -------------------------------------------------------------------------------- 1 | # generate a text file with random data for testing file uploads 2 | wanted_size=$((1024*2048*512)) 3 | file_size=$(( ((wanted_size/12)+1)*12 )) 4 | read_size=$((file_size*3/4)) 5 | 6 | echo "wanted=$wanted_size file=$file_size read=$read_size" 7 | 8 | dd if=/dev/urandom bs=$read_size count=1 | base64 > /tmp/small_test_file.txt 9 | 10 | truncate -s "$wanted_size" /tmp/big_test_file.txt 11 | -------------------------------------------------------------------------------- /examples/module_client.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | # will default to `os.environ['OPENAI_API_KEY']` if not explicitly set 4 | openai.api_key = "..." 5 | 6 | # all client options can be configured just like the `OpenAI` instantiation counterpart 7 | openai.base_url = "https://..." 8 | openai.default_headers = {"x-foo": "true"} 9 | 10 | # all API calls work in the exact same fashion as well 11 | stream = openai.chat.completions.create( 12 | model="gpt-4", 13 | messages=[ 14 | { 15 | "role": "user", 16 | "content": "How do I output all files in a directory using Python?", 17 | }, 18 | ], 19 | stream=True, 20 | ) 21 | 22 | for chunk in stream: 23 | print(chunk.choices[0].delta.content or "", end="", flush=True) 24 | 25 | print() 26 | -------------------------------------------------------------------------------- /examples/parsing.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import rich 4 | from pydantic import BaseModel 5 | 6 | from openai import OpenAI 7 | 8 | 9 | class Step(BaseModel): 10 | explanation: str 11 | output: str 12 | 13 | 14 | class MathResponse(BaseModel): 15 | steps: List[Step] 16 | final_answer: str 17 | 18 | 19 | client = OpenAI() 20 | 21 | completion = client.beta.chat.completions.parse( 22 | model="gpt-4o-2024-08-06", 23 | messages=[ 24 | {"role": "system", "content": "You are a helpful math tutor."}, 25 | {"role": "user", "content": "solve 8x + 31 = 2"}, 26 | ], 27 | response_format=MathResponse, 28 | ) 29 | 30 | message = completion.choices[0].message 31 | if message.parsed: 32 | rich.print(message.parsed.steps) 33 | 34 | print("answer: ", message.parsed.final_answer) 35 | else: 36 | print(message.refusal) 37 | -------------------------------------------------------------------------------- /examples/parsing_stream.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import rich 4 | from pydantic import BaseModel 5 | 6 | from openai import OpenAI 7 | 8 | 9 | class Step(BaseModel): 10 | explanation: str 11 | output: str 12 | 13 | 14 | class MathResponse(BaseModel): 15 | steps: List[Step] 16 | final_answer: str 17 | 18 | 19 | client = OpenAI() 20 | 21 | with client.beta.chat.completions.stream( 22 | model="gpt-4o-2024-08-06", 23 | messages=[ 24 | {"role": "system", "content": "You are a helpful math tutor."}, 25 | {"role": "user", "content": "solve 8x + 31 = 2"}, 26 | ], 27 | response_format=MathResponse, 28 | ) as stream: 29 | for event in stream: 30 | if event.type == "content.delta": 31 | print(event.delta, end="", flush=True) 32 | elif event.type == "content.done": 33 | print("\n") 34 | if event.parsed is not None: 35 | print(f"answer: {event.parsed.final_answer}") 36 | elif event.type == "refusal.delta": 37 | print(event.delta, end="", flush=True) 38 | elif event.type == "refusal.done": 39 | print() 40 | 41 | print("---------------") 42 | rich.print(stream.get_final_completion()) 43 | -------------------------------------------------------------------------------- /examples/parsing_tools_stream.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import rich 4 | from pydantic import BaseModel 5 | 6 | import openai 7 | from openai import OpenAI 8 | 9 | 10 | class GetWeather(BaseModel): 11 | city: str 12 | country: str 13 | 14 | 15 | client = OpenAI() 16 | 17 | 18 | with client.beta.chat.completions.stream( 19 | model="gpt-4o-2024-08-06", 20 | messages=[ 21 | { 22 | "role": "user", 23 | "content": "What's the weather like in SF and New York?", 24 | }, 25 | ], 26 | tools=[ 27 | # because we're using `.parse_stream()`, the returned tool calls 28 | # will be automatically deserialized into this `GetWeather` type 29 | openai.pydantic_function_tool(GetWeather, name="get_weather"), 30 | ], 31 | parallel_tool_calls=True, 32 | ) as stream: 33 | for event in stream: 34 | if event.type == "tool_calls.function.arguments.delta" or event.type == "tool_calls.function.arguments.done": 35 | rich.get_console().print(event, width=80) 36 | 37 | print("----\n") 38 | rich.print(stream.get_final_completion()) 39 | -------------------------------------------------------------------------------- /examples/picture.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from openai import OpenAI 4 | 5 | # gets OPENAI_API_KEY from your environment variables 6 | openai = OpenAI() 7 | 8 | prompt = "An astronaut lounging in a tropical resort in space, pixel art" 9 | model = "dall-e-3" 10 | 11 | 12 | def main() -> None: 13 | # Generate an image based on the prompt 14 | response = openai.images.generate(prompt=prompt, model=model) 15 | 16 | # Prints response containing a URL link to image 17 | print(response) 18 | 19 | 20 | if __name__ == "__main__": 21 | main() 22 | -------------------------------------------------------------------------------- /examples/uploads.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | import rich 5 | 6 | from openai import OpenAI 7 | 8 | # generate this file using `./generate_file.sh` 9 | file = Path("/tmp/big_test_file.txt") 10 | 11 | client = OpenAI() 12 | 13 | 14 | def from_disk() -> None: 15 | print("uploading file from disk") 16 | 17 | upload = client.uploads.upload_file_chunked( 18 | file=file, 19 | mime_type="txt", 20 | purpose="batch", 21 | ) 22 | rich.print(upload) 23 | 24 | 25 | def from_in_memory() -> None: 26 | print("uploading file from memory") 27 | 28 | # read the data into memory ourselves to simulate 29 | # it coming from somewhere else 30 | data = file.read_bytes() 31 | filename = "my_file.txt" 32 | 33 | upload = client.uploads.upload_file_chunked( 34 | file=data, 35 | filename=filename, 36 | bytes=len(data), 37 | mime_type="txt", 38 | purpose="batch", 39 | ) 40 | rich.print(upload) 41 | 42 | 43 | if "memory" in sys.argv: 44 | from_in_memory() 45 | else: 46 | from_disk() 47 | -------------------------------------------------------------------------------- /noxfile.py: -------------------------------------------------------------------------------- 1 | import nox 2 | 3 | 4 | @nox.session(reuse_venv=True, name="test-pydantic-v1") 5 | def test_pydantic_v1(session: nox.Session) -> None: 6 | session.install("-r", "requirements-dev.lock") 7 | session.install("pydantic<2") 8 | 9 | session.run("pytest", "--showlocals", "--ignore=tests/functional", *session.posargs) 10 | -------------------------------------------------------------------------------- /scripts/bootstrap: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "$0")/.." 6 | 7 | if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then 8 | brew bundle check >/dev/null 2>&1 || { 9 | echo "==> Installing Homebrew dependencies…" 10 | brew bundle 11 | } 12 | fi 13 | 14 | echo "==> Installing Python dependencies…" 15 | 16 | # experimental uv support makes installations significantly faster 17 | rye config --set-bool behavior.use-uv=true 18 | 19 | rye sync 20 | -------------------------------------------------------------------------------- /scripts/format: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "$0")/.." 6 | 7 | echo "==> Running formatters" 8 | rye run format 9 | -------------------------------------------------------------------------------- /scripts/lint: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "$0")/.." 6 | 7 | echo "==> Running lints" 8 | rye run lint 9 | 10 | echo "==> Making sure it imports" 11 | rye run python -c 'import openai' 12 | -------------------------------------------------------------------------------- /scripts/mock: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "$0")/.." 6 | 7 | if [[ -n "$1" && "$1" != '--'* ]]; then 8 | URL="$1" 9 | shift 10 | else 11 | URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)" 12 | fi 13 | 14 | # Check if the URL is empty 15 | if [ -z "$URL" ]; then 16 | echo "Error: No OpenAPI spec path/url provided or found in .stats.yml" 17 | exit 1 18 | fi 19 | 20 | echo "==> Starting mock server with URL ${URL}" 21 | 22 | # Run prism mock on the given spec 23 | if [ "$1" == "--daemon" ]; then 24 | npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & 25 | 26 | # Wait for server to come online 27 | echo -n "Waiting for server" 28 | while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do 29 | echo -n "." 30 | sleep 0.1 31 | done 32 | 33 | if grep -q "✖ fatal" ".prism.log"; then 34 | cat .prism.log 35 | exit 1 36 | fi 37 | 38 | echo 39 | else 40 | npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" 41 | fi 42 | -------------------------------------------------------------------------------- /src/openai/__main__.py: -------------------------------------------------------------------------------- 1 | from .cli import main 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /src/openai/_constants.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | import httpx 4 | 5 | RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response" 6 | OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to" 7 | 8 | # default timeout is 10 minutes 9 | DEFAULT_TIMEOUT = httpx.Timeout(timeout=600.0, connect=5.0) 10 | DEFAULT_MAX_RETRIES = 2 11 | DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=100) 12 | 13 | INITIAL_RETRY_DELAY = 0.5 14 | MAX_RETRY_DELAY = 8.0 15 | -------------------------------------------------------------------------------- /src/openai/_extras/__init__.py: -------------------------------------------------------------------------------- 1 | from .numpy_proxy import numpy as numpy, has_numpy as has_numpy 2 | from .pandas_proxy import pandas as pandas 3 | -------------------------------------------------------------------------------- /src/openai/_extras/_common.py: -------------------------------------------------------------------------------- 1 | from .._exceptions import OpenAIError 2 | 3 | INSTRUCTIONS = """ 4 | 5 | OpenAI error: 6 | 7 | missing `{library}` 8 | 9 | This feature requires additional dependencies: 10 | 11 | $ pip install openai[{extra}] 12 | 13 | """ 14 | 15 | 16 | def format_instructions(*, library: str, extra: str) -> str: 17 | return INSTRUCTIONS.format(library=library, extra=extra) 18 | 19 | 20 | class MissingDependencyError(OpenAIError): 21 | pass 22 | -------------------------------------------------------------------------------- /src/openai/_extras/numpy_proxy.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import TYPE_CHECKING, Any 4 | from typing_extensions import override 5 | 6 | from .._utils import LazyProxy 7 | from ._common import MissingDependencyError, format_instructions 8 | 9 | if TYPE_CHECKING: 10 | import numpy as numpy 11 | 12 | 13 | NUMPY_INSTRUCTIONS = format_instructions(library="numpy", extra="datalib") 14 | 15 | 16 | class NumpyProxy(LazyProxy[Any]): 17 | @override 18 | def __load__(self) -> Any: 19 | try: 20 | import numpy 21 | except ImportError as err: 22 | raise MissingDependencyError(NUMPY_INSTRUCTIONS) from err 23 | 24 | return numpy 25 | 26 | 27 | if not TYPE_CHECKING: 28 | numpy = NumpyProxy() 29 | 30 | 31 | def has_numpy() -> bool: 32 | try: 33 | import numpy # noqa: F401 # pyright: ignore[reportUnusedImport] 34 | except ImportError: 35 | return False 36 | 37 | return True 38 | -------------------------------------------------------------------------------- /src/openai/_extras/pandas_proxy.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import TYPE_CHECKING, Any 4 | from typing_extensions import override 5 | 6 | from .._utils import LazyProxy 7 | from ._common import MissingDependencyError, format_instructions 8 | 9 | if TYPE_CHECKING: 10 | import pandas as pandas 11 | 12 | 13 | PANDAS_INSTRUCTIONS = format_instructions(library="pandas", extra="datalib") 14 | 15 | 16 | class PandasProxy(LazyProxy[Any]): 17 | @override 18 | def __load__(self) -> Any: 19 | try: 20 | import pandas 21 | except ImportError as err: 22 | raise MissingDependencyError(PANDAS_INSTRUCTIONS) from err 23 | 24 | return pandas 25 | 26 | 27 | if not TYPE_CHECKING: 28 | pandas = PandasProxy() 29 | -------------------------------------------------------------------------------- /src/openai/_resource.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | import time 6 | from typing import TYPE_CHECKING 7 | 8 | import anyio 9 | 10 | if TYPE_CHECKING: 11 | from ._client import OpenAI, AsyncOpenAI 12 | 13 | 14 | class SyncAPIResource: 15 | _client: OpenAI 16 | 17 | def __init__(self, client: OpenAI) -> None: 18 | self._client = client 19 | self._get = client.get 20 | self._post = client.post 21 | self._patch = client.patch 22 | self._put = client.put 23 | self._delete = client.delete 24 | self._get_api_list = client.get_api_list 25 | 26 | def _sleep(self, seconds: float) -> None: 27 | time.sleep(seconds) 28 | 29 | 30 | class AsyncAPIResource: 31 | _client: AsyncOpenAI 32 | 33 | def __init__(self, client: AsyncOpenAI) -> None: 34 | self._client = client 35 | self._get = client.get 36 | self._post = client.post 37 | self._patch = client.patch 38 | self._put = client.put 39 | self._delete = client.delete 40 | self._get_api_list = client.get_api_list 41 | 42 | async def _sleep(self, seconds: float) -> None: 43 | await anyio.sleep(seconds) 44 | -------------------------------------------------------------------------------- /src/openai/_utils/_streams.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from typing_extensions import Iterator, AsyncIterator 3 | 4 | 5 | def consume_sync_iterator(iterator: Iterator[Any]) -> None: 6 | for _ in iterator: 7 | ... 8 | 9 | 10 | async def consume_async_iterator(iterator: AsyncIterator[Any]) -> None: 11 | async for _ in iterator: 12 | ... 13 | -------------------------------------------------------------------------------- /src/openai/_version.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | __title__ = "openai" 4 | __version__ = "1.61.0" # x-release-please-version 5 | -------------------------------------------------------------------------------- /src/openai/cli/__init__.py: -------------------------------------------------------------------------------- 1 | from ._cli import main as main 2 | -------------------------------------------------------------------------------- /src/openai/cli/_api/__init__.py: -------------------------------------------------------------------------------- 1 | from ._main import register_commands as register_commands 2 | -------------------------------------------------------------------------------- /src/openai/cli/_api/_main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from argparse import ArgumentParser 4 | 5 | from . import chat, audio, files, image, models, completions 6 | 7 | 8 | def register_commands(parser: ArgumentParser) -> None: 9 | subparsers = parser.add_subparsers(help="All API subcommands") 10 | 11 | chat.register(subparsers) 12 | image.register(subparsers) 13 | audio.register(subparsers) 14 | files.register(subparsers) 15 | models.register(subparsers) 16 | completions.register(subparsers) 17 | -------------------------------------------------------------------------------- /src/openai/cli/_api/chat/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import TYPE_CHECKING 4 | from argparse import ArgumentParser 5 | 6 | from . import completions 7 | 8 | if TYPE_CHECKING: 9 | from argparse import _SubParsersAction 10 | 11 | 12 | def register(subparser: _SubParsersAction[ArgumentParser]) -> None: 13 | completions.register(subparser) 14 | -------------------------------------------------------------------------------- /src/openai/cli/_errors.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import sys 4 | 5 | import pydantic 6 | 7 | from ._utils import Colors, organization_info 8 | from .._exceptions import APIError, OpenAIError 9 | 10 | 11 | class CLIError(OpenAIError): ... 12 | 13 | 14 | class SilentCLIError(CLIError): ... 15 | 16 | 17 | def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None: 18 | if isinstance(err, SilentCLIError): 19 | return 20 | 21 | sys.stderr.write("{}{}Error:{} {}\n".format(organization_info(), Colors.FAIL, Colors.ENDC, err)) 22 | -------------------------------------------------------------------------------- /src/openai/cli/_models.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from typing_extensions import ClassVar 3 | 4 | import pydantic 5 | 6 | from .. import _models 7 | from .._compat import PYDANTIC_V2, ConfigDict 8 | 9 | 10 | class BaseModel(_models.BaseModel): 11 | if PYDANTIC_V2: 12 | model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True) 13 | else: 14 | 15 | class Config(pydantic.BaseConfig): # type: ignore 16 | extra: Any = pydantic.Extra.ignore # type: ignore 17 | arbitrary_types_allowed: bool = True 18 | -------------------------------------------------------------------------------- /src/openai/cli/_tools/__init__.py: -------------------------------------------------------------------------------- 1 | from ._main import register_commands as register_commands 2 | -------------------------------------------------------------------------------- /src/openai/cli/_tools/_main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import TYPE_CHECKING 4 | from argparse import ArgumentParser 5 | 6 | from . import migrate, fine_tunes 7 | 8 | if TYPE_CHECKING: 9 | from argparse import _SubParsersAction 10 | 11 | 12 | def register_commands(parser: ArgumentParser, subparser: _SubParsersAction[ArgumentParser]) -> None: 13 | migrate.register(subparser) 14 | 15 | namespaced = parser.add_subparsers(title="Tools", help="Convenience client side tools") 16 | 17 | fine_tunes.register(namespaced) 18 | -------------------------------------------------------------------------------- /src/openai/cli/_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import sys 4 | 5 | import openai 6 | 7 | from .. import OpenAI, _load_client 8 | from .._compat import model_json 9 | from .._models import BaseModel 10 | 11 | 12 | class Colors: 13 | HEADER = "\033[95m" 14 | OKBLUE = "\033[94m" 15 | OKGREEN = "\033[92m" 16 | WARNING = "\033[93m" 17 | FAIL = "\033[91m" 18 | ENDC = "\033[0m" 19 | BOLD = "\033[1m" 20 | UNDERLINE = "\033[4m" 21 | 22 | 23 | def get_client() -> OpenAI: 24 | return _load_client() 25 | 26 | 27 | def organization_info() -> str: 28 | organization = openai.organization 29 | if organization is not None: 30 | return "[organization={}] ".format(organization) 31 | 32 | return "" 33 | 34 | 35 | def print_model(model: BaseModel) -> None: 36 | sys.stdout.write(model_json(model, indent=2) + "\n") 37 | 38 | 39 | def can_use_http2() -> bool: 40 | try: 41 | import h2 # type: ignore # noqa 42 | except ImportError: 43 | return False 44 | 45 | return True 46 | -------------------------------------------------------------------------------- /src/openai/lib/.keep: -------------------------------------------------------------------------------- 1 | File generated from our OpenAPI spec by Stainless. 2 | 3 | This directory can be used to store custom files to expand the SDK. 4 | It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. -------------------------------------------------------------------------------- /src/openai/lib/__init__.py: -------------------------------------------------------------------------------- 1 | from ._tools import pydantic_function_tool as pydantic_function_tool 2 | from ._parsing import ResponseFormatT as ResponseFormatT 3 | -------------------------------------------------------------------------------- /src/openai/lib/_parsing/__init__.py: -------------------------------------------------------------------------------- 1 | from ._completions import ( 2 | ResponseFormatT as ResponseFormatT, 3 | has_parseable_input, 4 | has_parseable_input as has_parseable_input, 5 | maybe_parse_content as maybe_parse_content, 6 | validate_input_tools as validate_input_tools, 7 | parse_chat_completion as parse_chat_completion, 8 | get_input_tool_by_name as get_input_tool_by_name, 9 | solve_response_format_t as solve_response_format_t, 10 | parse_function_tool_arguments as parse_function_tool_arguments, 11 | type_to_response_format_param as type_to_response_format_param, 12 | ) 13 | -------------------------------------------------------------------------------- /src/openai/lib/streaming/__init__.py: -------------------------------------------------------------------------------- 1 | from ._assistants import ( 2 | AssistantEventHandler as AssistantEventHandler, 3 | AssistantEventHandlerT as AssistantEventHandlerT, 4 | AssistantStreamManager as AssistantStreamManager, 5 | AsyncAssistantEventHandler as AsyncAssistantEventHandler, 6 | AsyncAssistantEventHandlerT as AsyncAssistantEventHandlerT, 7 | AsyncAssistantStreamManager as AsyncAssistantStreamManager, 8 | ) 9 | -------------------------------------------------------------------------------- /src/openai/lib/streaming/chat/_types.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing_extensions import TypeAlias 4 | 5 | from ....types.chat import ParsedChoice, ParsedChatCompletion, ParsedChatCompletionMessage 6 | 7 | ParsedChatCompletionSnapshot: TypeAlias = ParsedChatCompletion[object] 8 | """Snapshot type representing an in-progress accumulation of 9 | a `ParsedChatCompletion` object. 10 | """ 11 | 12 | ParsedChatCompletionMessageSnapshot: TypeAlias = ParsedChatCompletionMessage[object] 13 | """Snapshot type representing an in-progress accumulation of 14 | a `ParsedChatCompletionMessage` object. 15 | 16 | If the content has been fully accumulated, the `.parsed` content will be 17 | the `response_format` instance, otherwise it'll be the raw JSON parsed version. 18 | """ 19 | 20 | ParsedChoiceSnapshot: TypeAlias = ParsedChoice[object] 21 | -------------------------------------------------------------------------------- /src/openai/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elenakozlova28/openai-python/7a6517d81e4ae9e9e9527cd401bb76937983dfef/src/openai/py.typed -------------------------------------------------------------------------------- /src/openai/resources/beta/chat/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .chat import Chat, AsyncChat 4 | from .completions import Completions, AsyncCompletions 5 | 6 | __all__ = [ 7 | "Completions", 8 | "AsyncCompletions", 9 | "Chat", 10 | "AsyncChat", 11 | ] 12 | -------------------------------------------------------------------------------- /src/openai/resources/beta/chat/chat.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from ...._compat import cached_property 6 | from .completions import Completions, AsyncCompletions 7 | from ...._resource import SyncAPIResource, AsyncAPIResource 8 | 9 | __all__ = ["Chat", "AsyncChat"] 10 | 11 | 12 | class Chat(SyncAPIResource): 13 | @cached_property 14 | def completions(self) -> Completions: 15 | return Completions(self._client) 16 | 17 | 18 | class AsyncChat(AsyncAPIResource): 19 | @cached_property 20 | def completions(self) -> AsyncCompletions: 21 | return AsyncCompletions(self._client) 22 | -------------------------------------------------------------------------------- /src/openai/resources/beta/realtime/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .realtime import ( 4 | Realtime, 5 | AsyncRealtime, 6 | RealtimeWithRawResponse, 7 | AsyncRealtimeWithRawResponse, 8 | RealtimeWithStreamingResponse, 9 | AsyncRealtimeWithStreamingResponse, 10 | ) 11 | from .sessions import ( 12 | Sessions, 13 | AsyncSessions, 14 | SessionsWithRawResponse, 15 | AsyncSessionsWithRawResponse, 16 | SessionsWithStreamingResponse, 17 | AsyncSessionsWithStreamingResponse, 18 | ) 19 | 20 | __all__ = [ 21 | "Sessions", 22 | "AsyncSessions", 23 | "SessionsWithRawResponse", 24 | "AsyncSessionsWithRawResponse", 25 | "SessionsWithStreamingResponse", 26 | "AsyncSessionsWithStreamingResponse", 27 | "Realtime", 28 | "AsyncRealtime", 29 | "RealtimeWithRawResponse", 30 | "AsyncRealtimeWithRawResponse", 31 | "RealtimeWithStreamingResponse", 32 | "AsyncRealtimeWithStreamingResponse", 33 | ] 34 | -------------------------------------------------------------------------------- /src/openai/resources/beta/threads/runs/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .runs import ( 4 | Runs, 5 | AsyncRuns, 6 | RunsWithRawResponse, 7 | AsyncRunsWithRawResponse, 8 | RunsWithStreamingResponse, 9 | AsyncRunsWithStreamingResponse, 10 | ) 11 | from .steps import ( 12 | Steps, 13 | AsyncSteps, 14 | StepsWithRawResponse, 15 | AsyncStepsWithRawResponse, 16 | StepsWithStreamingResponse, 17 | AsyncStepsWithStreamingResponse, 18 | ) 19 | 20 | __all__ = [ 21 | "Steps", 22 | "AsyncSteps", 23 | "StepsWithRawResponse", 24 | "AsyncStepsWithRawResponse", 25 | "StepsWithStreamingResponse", 26 | "AsyncStepsWithStreamingResponse", 27 | "Runs", 28 | "AsyncRuns", 29 | "RunsWithRawResponse", 30 | "AsyncRunsWithRawResponse", 31 | "RunsWithStreamingResponse", 32 | "AsyncRunsWithStreamingResponse", 33 | ] 34 | -------------------------------------------------------------------------------- /src/openai/resources/chat/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .chat import ( 4 | Chat, 5 | AsyncChat, 6 | ChatWithRawResponse, 7 | AsyncChatWithRawResponse, 8 | ChatWithStreamingResponse, 9 | AsyncChatWithStreamingResponse, 10 | ) 11 | from .completions import ( 12 | Completions, 13 | AsyncCompletions, 14 | CompletionsWithRawResponse, 15 | AsyncCompletionsWithRawResponse, 16 | CompletionsWithStreamingResponse, 17 | AsyncCompletionsWithStreamingResponse, 18 | ) 19 | 20 | __all__ = [ 21 | "Completions", 22 | "AsyncCompletions", 23 | "CompletionsWithRawResponse", 24 | "AsyncCompletionsWithRawResponse", 25 | "CompletionsWithStreamingResponse", 26 | "AsyncCompletionsWithStreamingResponse", 27 | "Chat", 28 | "AsyncChat", 29 | "ChatWithRawResponse", 30 | "AsyncChatWithRawResponse", 31 | "ChatWithStreamingResponse", 32 | "AsyncChatWithStreamingResponse", 33 | ] 34 | -------------------------------------------------------------------------------- /src/openai/resources/fine_tuning/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .jobs import ( 4 | Jobs, 5 | AsyncJobs, 6 | JobsWithRawResponse, 7 | AsyncJobsWithRawResponse, 8 | JobsWithStreamingResponse, 9 | AsyncJobsWithStreamingResponse, 10 | ) 11 | from .fine_tuning import ( 12 | FineTuning, 13 | AsyncFineTuning, 14 | FineTuningWithRawResponse, 15 | AsyncFineTuningWithRawResponse, 16 | FineTuningWithStreamingResponse, 17 | AsyncFineTuningWithStreamingResponse, 18 | ) 19 | 20 | __all__ = [ 21 | "Jobs", 22 | "AsyncJobs", 23 | "JobsWithRawResponse", 24 | "AsyncJobsWithRawResponse", 25 | "JobsWithStreamingResponse", 26 | "AsyncJobsWithStreamingResponse", 27 | "FineTuning", 28 | "AsyncFineTuning", 29 | "FineTuningWithRawResponse", 30 | "AsyncFineTuningWithRawResponse", 31 | "FineTuningWithStreamingResponse", 32 | "AsyncFineTuningWithStreamingResponse", 33 | ] 34 | -------------------------------------------------------------------------------- /src/openai/resources/fine_tuning/jobs/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .jobs import ( 4 | Jobs, 5 | AsyncJobs, 6 | JobsWithRawResponse, 7 | AsyncJobsWithRawResponse, 8 | JobsWithStreamingResponse, 9 | AsyncJobsWithStreamingResponse, 10 | ) 11 | from .checkpoints import ( 12 | Checkpoints, 13 | AsyncCheckpoints, 14 | CheckpointsWithRawResponse, 15 | AsyncCheckpointsWithRawResponse, 16 | CheckpointsWithStreamingResponse, 17 | AsyncCheckpointsWithStreamingResponse, 18 | ) 19 | 20 | __all__ = [ 21 | "Checkpoints", 22 | "AsyncCheckpoints", 23 | "CheckpointsWithRawResponse", 24 | "AsyncCheckpointsWithRawResponse", 25 | "CheckpointsWithStreamingResponse", 26 | "AsyncCheckpointsWithStreamingResponse", 27 | "Jobs", 28 | "AsyncJobs", 29 | "JobsWithRawResponse", 30 | "AsyncJobsWithRawResponse", 31 | "JobsWithStreamingResponse", 32 | "AsyncJobsWithStreamingResponse", 33 | ] 34 | -------------------------------------------------------------------------------- /src/openai/resources/uploads/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .parts import ( 4 | Parts, 5 | AsyncParts, 6 | PartsWithRawResponse, 7 | AsyncPartsWithRawResponse, 8 | PartsWithStreamingResponse, 9 | AsyncPartsWithStreamingResponse, 10 | ) 11 | from .uploads import ( 12 | Uploads, 13 | AsyncUploads, 14 | UploadsWithRawResponse, 15 | AsyncUploadsWithRawResponse, 16 | UploadsWithStreamingResponse, 17 | AsyncUploadsWithStreamingResponse, 18 | ) 19 | 20 | __all__ = [ 21 | "Parts", 22 | "AsyncParts", 23 | "PartsWithRawResponse", 24 | "AsyncPartsWithRawResponse", 25 | "PartsWithStreamingResponse", 26 | "AsyncPartsWithStreamingResponse", 27 | "Uploads", 28 | "AsyncUploads", 29 | "UploadsWithRawResponse", 30 | "AsyncUploadsWithRawResponse", 31 | "UploadsWithStreamingResponse", 32 | "AsyncUploadsWithStreamingResponse", 33 | ] 34 | -------------------------------------------------------------------------------- /src/openai/types/audio/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .translation import Translation as Translation 6 | from .speech_model import SpeechModel as SpeechModel 7 | from .transcription import Transcription as Transcription 8 | from .transcription_word import TranscriptionWord as TranscriptionWord 9 | from .translation_verbose import TranslationVerbose as TranslationVerbose 10 | from .speech_create_params import SpeechCreateParams as SpeechCreateParams 11 | from .transcription_segment import TranscriptionSegment as TranscriptionSegment 12 | from .transcription_verbose import TranscriptionVerbose as TranscriptionVerbose 13 | from .translation_create_params import TranslationCreateParams as TranslationCreateParams 14 | from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams 15 | from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse 16 | from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse 17 | -------------------------------------------------------------------------------- /src/openai/types/audio/speech_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["SpeechModel"] 6 | 7 | SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd"] 8 | -------------------------------------------------------------------------------- /src/openai/types/audio/transcription.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from ..._models import BaseModel 5 | 6 | __all__ = ["Transcription"] 7 | 8 | 9 | class Transcription(BaseModel): 10 | text: str 11 | """The transcribed text.""" 12 | -------------------------------------------------------------------------------- /src/openai/types/audio/transcription_create_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import TypeAlias 5 | 6 | from .transcription import Transcription 7 | from .transcription_verbose import TranscriptionVerbose 8 | 9 | __all__ = ["TranscriptionCreateResponse"] 10 | 11 | TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose] 12 | -------------------------------------------------------------------------------- /src/openai/types/audio/transcription_verbose.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | 5 | from ..._models import BaseModel 6 | from .transcription_word import TranscriptionWord 7 | from .transcription_segment import TranscriptionSegment 8 | 9 | __all__ = ["TranscriptionVerbose"] 10 | 11 | 12 | class TranscriptionVerbose(BaseModel): 13 | duration: str 14 | """The duration of the input audio.""" 15 | 16 | language: str 17 | """The language of the input audio.""" 18 | 19 | text: str 20 | """The transcribed text.""" 21 | 22 | segments: Optional[List[TranscriptionSegment]] = None 23 | """Segments of the transcribed text and their corresponding details.""" 24 | 25 | words: Optional[List[TranscriptionWord]] = None 26 | """Extracted words and their corresponding timestamps.""" 27 | -------------------------------------------------------------------------------- /src/openai/types/audio/transcription_word.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from ..._models import BaseModel 5 | 6 | __all__ = ["TranscriptionWord"] 7 | 8 | 9 | class TranscriptionWord(BaseModel): 10 | end: float 11 | """End time of the word in seconds.""" 12 | 13 | start: float 14 | """Start time of the word in seconds.""" 15 | 16 | word: str 17 | """The text content of the word.""" 18 | -------------------------------------------------------------------------------- /src/openai/types/audio/translation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from ..._models import BaseModel 5 | 6 | __all__ = ["Translation"] 7 | 8 | 9 | class Translation(BaseModel): 10 | text: str 11 | -------------------------------------------------------------------------------- /src/openai/types/audio/translation_create_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import TypeAlias 5 | 6 | from .translation import Translation 7 | from .translation_verbose import TranslationVerbose 8 | 9 | __all__ = ["TranslationCreateResponse"] 10 | 11 | TranslationCreateResponse: TypeAlias = Union[Translation, TranslationVerbose] 12 | -------------------------------------------------------------------------------- /src/openai/types/audio/translation_verbose.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | 5 | from ..._models import BaseModel 6 | from .transcription_segment import TranscriptionSegment 7 | 8 | __all__ = ["TranslationVerbose"] 9 | 10 | 11 | class TranslationVerbose(BaseModel): 12 | duration: str 13 | """The duration of the input audio.""" 14 | 15 | language: str 16 | """The language of the output translation (always `english`).""" 17 | 18 | text: str 19 | """The translated text.""" 20 | 21 | segments: Optional[List[TranscriptionSegment]] = None 22 | """Segments of the translated text and their corresponding details.""" 23 | -------------------------------------------------------------------------------- /src/openai/types/audio_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["AudioModel"] 6 | 7 | AudioModel: TypeAlias = Literal["whisper-1"] 8 | -------------------------------------------------------------------------------- /src/openai/types/audio_response_format.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["AudioResponseFormat"] 6 | 7 | AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"] 8 | -------------------------------------------------------------------------------- /src/openai/types/batch_error.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["BatchError"] 8 | 9 | 10 | class BatchError(BaseModel): 11 | code: Optional[str] = None 12 | """An error code identifying the error type.""" 13 | 14 | line: Optional[int] = None 15 | """The line number of the input file where the error occurred, if applicable.""" 16 | 17 | message: Optional[str] = None 18 | """A human-readable message providing more details about the error.""" 19 | 20 | param: Optional[str] = None 21 | """The name of the parameter that caused the error, if applicable.""" 22 | -------------------------------------------------------------------------------- /src/openai/types/batch_list_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["BatchListParams"] 8 | 9 | 10 | class BatchListParams(TypedDict, total=False): 11 | after: str 12 | """A cursor for use in pagination. 13 | 14 | `after` is an object ID that defines your place in the list. For instance, if 15 | you make a list request and receive 100 objects, ending with obj_foo, your 16 | subsequent call can include after=obj_foo in order to fetch the next page of the 17 | list. 18 | """ 19 | 20 | limit: int 21 | """A limit on the number of objects to be returned. 22 | 23 | Limit can range between 1 and 100, and the default is 20. 24 | """ 25 | -------------------------------------------------------------------------------- /src/openai/types/batch_request_counts.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from .._models import BaseModel 5 | 6 | __all__ = ["BatchRequestCounts"] 7 | 8 | 9 | class BatchRequestCounts(BaseModel): 10 | completed: int 11 | """Number of requests that have been completed successfully.""" 12 | 13 | failed: int 14 | """Number of requests that have failed.""" 15 | 16 | total: int 17 | """Total number of requests in the batch.""" 18 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["AssistantDeleted"] 8 | 9 | 10 | class AssistantDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["assistant.deleted"] 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_response_format_option.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Literal, TypeAlias 5 | 6 | from ..shared.response_format_text import ResponseFormatText 7 | from ..shared.response_format_json_object import ResponseFormatJSONObject 8 | from ..shared.response_format_json_schema import ResponseFormatJSONSchema 9 | 10 | __all__ = ["AssistantResponseFormatOption"] 11 | 12 | AssistantResponseFormatOption: TypeAlias = Union[ 13 | Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema 14 | ] 15 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_response_format_option_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import Literal, TypeAlias 7 | 8 | from ..shared_params.response_format_text import ResponseFormatText 9 | from ..shared_params.response_format_json_object import ResponseFormatJSONObject 10 | from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema 11 | 12 | __all__ = ["AssistantResponseFormatOptionParam"] 13 | 14 | AssistantResponseFormatOptionParam: TypeAlias = Union[ 15 | Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema 16 | ] 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_tool.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ..._utils import PropertyInfo 7 | from .function_tool import FunctionTool 8 | from .file_search_tool import FileSearchTool 9 | from .code_interpreter_tool import CodeInterpreterTool 10 | 11 | __all__ = ["AssistantTool"] 12 | 13 | AssistantTool: TypeAlias = Annotated[ 14 | Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type") 15 | ] 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_tool_choice.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ..._models import BaseModel 7 | from .assistant_tool_choice_function import AssistantToolChoiceFunction 8 | 9 | __all__ = ["AssistantToolChoice"] 10 | 11 | 12 | class AssistantToolChoice(BaseModel): 13 | type: Literal["function", "code_interpreter", "file_search"] 14 | """The type of the tool. If type is `function`, the function name must be set""" 15 | 16 | function: Optional[AssistantToolChoiceFunction] = None 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_tool_choice_function.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from ..._models import BaseModel 5 | 6 | __all__ = ["AssistantToolChoiceFunction"] 7 | 8 | 9 | class AssistantToolChoiceFunction(BaseModel): 10 | name: str 11 | """The name of the function to call.""" 12 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_tool_choice_function_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["AssistantToolChoiceFunctionParam"] 8 | 9 | 10 | class AssistantToolChoiceFunctionParam(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the function to call.""" 13 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_tool_choice_option.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Literal, TypeAlias 5 | 6 | from .assistant_tool_choice import AssistantToolChoice 7 | 8 | __all__ = ["AssistantToolChoiceOption"] 9 | 10 | AssistantToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoice] 11 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_tool_choice_option_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import Literal, TypeAlias 7 | 8 | from .assistant_tool_choice_param import AssistantToolChoiceParam 9 | 10 | __all__ = ["AssistantToolChoiceOptionParam"] 11 | 12 | AssistantToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam] 13 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_tool_choice_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam 8 | 9 | __all__ = ["AssistantToolChoiceParam"] 10 | 11 | 12 | class AssistantToolChoiceParam(TypedDict, total=False): 13 | type: Required[Literal["function", "code_interpreter", "file_search"]] 14 | """The type of the tool. If type is `function`, the function name must be set""" 15 | 16 | function: AssistantToolChoiceFunctionParam 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/assistant_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .function_tool_param import FunctionToolParam 9 | from .file_search_tool_param import FileSearchToolParam 10 | from .code_interpreter_tool_param import CodeInterpreterToolParam 11 | 12 | __all__ = ["AssistantToolParam"] 13 | 14 | AssistantToolParam: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam] 15 | -------------------------------------------------------------------------------- /src/openai/types/beta/auto_file_chunking_strategy_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["AutoFileChunkingStrategyParam"] 8 | 9 | 10 | class AutoFileChunkingStrategyParam(TypedDict, total=False): 11 | type: Required[Literal["auto"]] 12 | """Always `auto`.""" 13 | -------------------------------------------------------------------------------- /src/openai/types/beta/chat/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | -------------------------------------------------------------------------------- /src/openai/types/beta/code_interpreter_tool.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["CodeInterpreterTool"] 8 | 9 | 10 | class CodeInterpreterTool(BaseModel): 11 | type: Literal["code_interpreter"] 12 | """The type of tool being defined: `code_interpreter`""" 13 | -------------------------------------------------------------------------------- /src/openai/types/beta/code_interpreter_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["CodeInterpreterToolParam"] 8 | 9 | 10 | class CodeInterpreterToolParam(TypedDict, total=False): 11 | type: Required[Literal["code_interpreter"]] 12 | """The type of tool being defined: `code_interpreter`""" 13 | -------------------------------------------------------------------------------- /src/openai/types/beta/file_chunking_strategy.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ..._utils import PropertyInfo 7 | from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject 8 | from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject 9 | 10 | __all__ = ["FileChunkingStrategy"] 11 | 12 | FileChunkingStrategy: TypeAlias = Annotated[ 13 | Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type") 14 | ] 15 | -------------------------------------------------------------------------------- /src/openai/types/beta/file_chunking_strategy_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam 9 | from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam 10 | 11 | __all__ = ["FileChunkingStrategyParam"] 12 | 13 | FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam] 14 | -------------------------------------------------------------------------------- /src/openai/types/beta/function_tool.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | from ..shared.function_definition import FunctionDefinition 7 | 8 | __all__ = ["FunctionTool"] 9 | 10 | 11 | class FunctionTool(BaseModel): 12 | function: FunctionDefinition 13 | 14 | type: Literal["function"] 15 | """The type of tool being defined: `function`""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/function_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from ..shared_params.function_definition import FunctionDefinition 8 | 9 | __all__ = ["FunctionToolParam"] 10 | 11 | 12 | class FunctionToolParam(TypedDict, total=False): 13 | function: Required[FunctionDefinition] 14 | 15 | type: Required[Literal["function"]] 16 | """The type of tool being defined: `function`""" 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/other_file_chunking_strategy_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["OtherFileChunkingStrategyObject"] 8 | 9 | 10 | class OtherFileChunkingStrategyObject(BaseModel): 11 | type: Literal["other"] 12 | """Always `other`.""" 13 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_created_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ConversationCreatedEvent", "Conversation"] 9 | 10 | 11 | class Conversation(BaseModel): 12 | id: Optional[str] = None 13 | """The unique ID of the conversation.""" 14 | 15 | object: Optional[Literal["realtime.conversation"]] = None 16 | """The object type, must be `realtime.conversation`.""" 17 | 18 | 19 | class ConversationCreatedEvent(BaseModel): 20 | conversation: Conversation 21 | """The conversation resource.""" 22 | 23 | event_id: str 24 | """The unique ID of the server event.""" 25 | 26 | type: Literal["conversation.created"] 27 | """The event type, must be `conversation.created`.""" 28 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ConversationItemContent"] 9 | 10 | 11 | class ConversationItemContent(BaseModel): 12 | id: Optional[str] = None 13 | """ 14 | ID of a previous conversation item to reference (for `item_reference` content 15 | types in `response.create` events). These can reference both client and server 16 | created items. 17 | """ 18 | 19 | audio: Optional[str] = None 20 | """Base64-encoded audio bytes, used for `input_audio` content type.""" 21 | 22 | text: Optional[str] = None 23 | """The text content, used for `input_text` and `text` content types.""" 24 | 25 | transcript: Optional[str] = None 26 | """The transcript of the audio, used for `input_audio` content type.""" 27 | 28 | type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None 29 | """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" 30 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, TypedDict 6 | 7 | __all__ = ["ConversationItemContentParam"] 8 | 9 | 10 | class ConversationItemContentParam(TypedDict, total=False): 11 | id: str 12 | """ 13 | ID of a previous conversation item to reference (for `item_reference` content 14 | types in `response.create` events). These can reference both client and server 15 | created items. 16 | """ 17 | 18 | audio: str 19 | """Base64-encoded audio bytes, used for `input_audio` content type.""" 20 | 21 | text: str 22 | """The text content, used for `input_text` and `text` content types.""" 23 | 24 | transcript: str 25 | """The transcript of the audio, used for `input_audio` content type.""" 26 | 27 | type: Literal["input_text", "input_audio", "item_reference", "text"] 28 | """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" 29 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_create_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .conversation_item import ConversationItem 8 | 9 | __all__ = ["ConversationItemCreateEvent"] 10 | 11 | 12 | class ConversationItemCreateEvent(BaseModel): 13 | item: ConversationItem 14 | """The item to add to the conversation.""" 15 | 16 | type: Literal["conversation.item.create"] 17 | """The event type, must be `conversation.item.create`.""" 18 | 19 | event_id: Optional[str] = None 20 | """Optional client-generated ID used to identify this event.""" 21 | 22 | previous_item_id: Optional[str] = None 23 | """The ID of the preceding item after which the new item will be inserted. 24 | 25 | If not set, the new item will be appended to the end of the conversation. If set 26 | to `root`, the new item will be added to the beginning of the conversation. If 27 | set to an existing ID, it allows an item to be inserted mid-conversation. If the 28 | ID cannot be found, an error will be returned and the item will not be added. 29 | """ 30 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_create_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .conversation_item_param import ConversationItemParam 8 | 9 | __all__ = ["ConversationItemCreateEventParam"] 10 | 11 | 12 | class ConversationItemCreateEventParam(TypedDict, total=False): 13 | item: Required[ConversationItemParam] 14 | """The item to add to the conversation.""" 15 | 16 | type: Required[Literal["conversation.item.create"]] 17 | """The event type, must be `conversation.item.create`.""" 18 | 19 | event_id: str 20 | """Optional client-generated ID used to identify this event.""" 21 | 22 | previous_item_id: str 23 | """The ID of the preceding item after which the new item will be inserted. 24 | 25 | If not set, the new item will be appended to the end of the conversation. If set 26 | to `root`, the new item will be added to the beginning of the conversation. If 27 | set to an existing ID, it allows an item to be inserted mid-conversation. If the 28 | ID cannot be found, an error will be returned and the item will not be added. 29 | """ 30 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_created_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .conversation_item import ConversationItem 7 | 8 | __all__ = ["ConversationItemCreatedEvent"] 9 | 10 | 11 | class ConversationItemCreatedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | item: ConversationItem 16 | """The item to add to the conversation.""" 17 | 18 | previous_item_id: str 19 | """ 20 | The ID of the preceding item in the Conversation context, allows the client to 21 | understand the order of the conversation. 22 | """ 23 | 24 | type: Literal["conversation.item.created"] 25 | """The event type, must be `conversation.item.created`.""" 26 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_delete_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ConversationItemDeleteEvent"] 9 | 10 | 11 | class ConversationItemDeleteEvent(BaseModel): 12 | item_id: str 13 | """The ID of the item to delete.""" 14 | 15 | type: Literal["conversation.item.delete"] 16 | """The event type, must be `conversation.item.delete`.""" 17 | 18 | event_id: Optional[str] = None 19 | """Optional client-generated ID used to identify this event.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_delete_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ConversationItemDeleteEventParam"] 8 | 9 | 10 | class ConversationItemDeleteEventParam(TypedDict, total=False): 11 | item_id: Required[str] 12 | """The ID of the item to delete.""" 13 | 14 | type: Required[Literal["conversation.item.delete"]] 15 | """The event type, must be `conversation.item.delete`.""" 16 | 17 | event_id: str 18 | """Optional client-generated ID used to identify this event.""" 19 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_deleted_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ConversationItemDeletedEvent"] 8 | 9 | 10 | class ConversationItemDeletedEvent(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the item that was deleted.""" 16 | 17 | type: Literal["conversation.item.deleted"] 18 | """The event type, must be `conversation.item.deleted`.""" 19 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ConversationItemInputAudioTranscriptionCompletedEvent"] 8 | 9 | 10 | class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel): 11 | content_index: int 12 | """The index of the content part containing the audio.""" 13 | 14 | event_id: str 15 | """The unique ID of the server event.""" 16 | 17 | item_id: str 18 | """The ID of the user message item containing the audio.""" 19 | 20 | transcript: str 21 | """The transcribed text.""" 22 | 23 | type: Literal["conversation.item.input_audio_transcription.completed"] 24 | """ 25 | The event type, must be `conversation.item.input_audio_transcription.completed`. 26 | """ 27 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ConversationItemInputAudioTranscriptionFailedEvent", "Error"] 9 | 10 | 11 | class Error(BaseModel): 12 | code: Optional[str] = None 13 | """Error code, if any.""" 14 | 15 | message: Optional[str] = None 16 | """A human-readable error message.""" 17 | 18 | param: Optional[str] = None 19 | """Parameter related to the error, if any.""" 20 | 21 | type: Optional[str] = None 22 | """The type of error.""" 23 | 24 | 25 | class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel): 26 | content_index: int 27 | """The index of the content part containing the audio.""" 28 | 29 | error: Error 30 | """Details of the transcription error.""" 31 | 32 | event_id: str 33 | """The unique ID of the server event.""" 34 | 35 | item_id: str 36 | """The ID of the user message item.""" 37 | 38 | type: Literal["conversation.item.input_audio_transcription.failed"] 39 | """The event type, must be `conversation.item.input_audio_transcription.failed`.""" 40 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_truncate_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ConversationItemTruncateEvent"] 9 | 10 | 11 | class ConversationItemTruncateEvent(BaseModel): 12 | audio_end_ms: int 13 | """Inclusive duration up to which audio is truncated, in milliseconds. 14 | 15 | If the audio_end_ms is greater than the actual audio duration, the server will 16 | respond with an error. 17 | """ 18 | 19 | content_index: int 20 | """The index of the content part to truncate. Set this to 0.""" 21 | 22 | item_id: str 23 | """The ID of the assistant message item to truncate. 24 | 25 | Only assistant message items can be truncated. 26 | """ 27 | 28 | type: Literal["conversation.item.truncate"] 29 | """The event type, must be `conversation.item.truncate`.""" 30 | 31 | event_id: Optional[str] = None 32 | """Optional client-generated ID used to identify this event.""" 33 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_truncate_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ConversationItemTruncateEventParam"] 8 | 9 | 10 | class ConversationItemTruncateEventParam(TypedDict, total=False): 11 | audio_end_ms: Required[int] 12 | """Inclusive duration up to which audio is truncated, in milliseconds. 13 | 14 | If the audio_end_ms is greater than the actual audio duration, the server will 15 | respond with an error. 16 | """ 17 | 18 | content_index: Required[int] 19 | """The index of the content part to truncate. Set this to 0.""" 20 | 21 | item_id: Required[str] 22 | """The ID of the assistant message item to truncate. 23 | 24 | Only assistant message items can be truncated. 25 | """ 26 | 27 | type: Required[Literal["conversation.item.truncate"]] 28 | """The event type, must be `conversation.item.truncate`.""" 29 | 30 | event_id: str 31 | """Optional client-generated ID used to identify this event.""" 32 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/conversation_item_truncated_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ConversationItemTruncatedEvent"] 8 | 9 | 10 | class ConversationItemTruncatedEvent(BaseModel): 11 | audio_end_ms: int 12 | """The duration up to which the audio was truncated, in milliseconds.""" 13 | 14 | content_index: int 15 | """The index of the content part that was truncated.""" 16 | 17 | event_id: str 18 | """The unique ID of the server event.""" 19 | 20 | item_id: str 21 | """The ID of the assistant message item that was truncated.""" 22 | 23 | type: Literal["conversation.item.truncated"] 24 | """The event type, must be `conversation.item.truncated`.""" 25 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/error_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ErrorEvent", "Error"] 9 | 10 | 11 | class Error(BaseModel): 12 | message: str 13 | """A human-readable error message.""" 14 | 15 | type: str 16 | """The type of error (e.g., "invalid_request_error", "server_error").""" 17 | 18 | code: Optional[str] = None 19 | """Error code, if any.""" 20 | 21 | event_id: Optional[str] = None 22 | """The event_id of the client event that caused the error, if applicable.""" 23 | 24 | param: Optional[str] = None 25 | """Parameter related to the error, if any.""" 26 | 27 | 28 | class ErrorEvent(BaseModel): 29 | error: Error 30 | """Details of the error.""" 31 | 32 | event_id: str 33 | """The unique ID of the server event.""" 34 | 35 | type: Literal["error"] 36 | """The event type, must be `error`.""" 37 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_append_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["InputAudioBufferAppendEvent"] 9 | 10 | 11 | class InputAudioBufferAppendEvent(BaseModel): 12 | audio: str 13 | """Base64-encoded audio bytes. 14 | 15 | This must be in the format specified by the `input_audio_format` field in the 16 | session configuration. 17 | """ 18 | 19 | type: Literal["input_audio_buffer.append"] 20 | """The event type, must be `input_audio_buffer.append`.""" 21 | 22 | event_id: Optional[str] = None 23 | """Optional client-generated ID used to identify this event.""" 24 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_append_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["InputAudioBufferAppendEventParam"] 8 | 9 | 10 | class InputAudioBufferAppendEventParam(TypedDict, total=False): 11 | audio: Required[str] 12 | """Base64-encoded audio bytes. 13 | 14 | This must be in the format specified by the `input_audio_format` field in the 15 | session configuration. 16 | """ 17 | 18 | type: Required[Literal["input_audio_buffer.append"]] 19 | """The event type, must be `input_audio_buffer.append`.""" 20 | 21 | event_id: str 22 | """Optional client-generated ID used to identify this event.""" 23 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_clear_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["InputAudioBufferClearEvent"] 9 | 10 | 11 | class InputAudioBufferClearEvent(BaseModel): 12 | type: Literal["input_audio_buffer.clear"] 13 | """The event type, must be `input_audio_buffer.clear`.""" 14 | 15 | event_id: Optional[str] = None 16 | """Optional client-generated ID used to identify this event.""" 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["InputAudioBufferClearEventParam"] 8 | 9 | 10 | class InputAudioBufferClearEventParam(TypedDict, total=False): 11 | type: Required[Literal["input_audio_buffer.clear"]] 12 | """The event type, must be `input_audio_buffer.clear`.""" 13 | 14 | event_id: str 15 | """Optional client-generated ID used to identify this event.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_cleared_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["InputAudioBufferClearedEvent"] 8 | 9 | 10 | class InputAudioBufferClearedEvent(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | type: Literal["input_audio_buffer.cleared"] 15 | """The event type, must be `input_audio_buffer.cleared`.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_commit_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["InputAudioBufferCommitEvent"] 9 | 10 | 11 | class InputAudioBufferCommitEvent(BaseModel): 12 | type: Literal["input_audio_buffer.commit"] 13 | """The event type, must be `input_audio_buffer.commit`.""" 14 | 15 | event_id: Optional[str] = None 16 | """Optional client-generated ID used to identify this event.""" 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["InputAudioBufferCommitEventParam"] 8 | 9 | 10 | class InputAudioBufferCommitEventParam(TypedDict, total=False): 11 | type: Required[Literal["input_audio_buffer.commit"]] 12 | """The event type, must be `input_audio_buffer.commit`.""" 13 | 14 | event_id: str 15 | """Optional client-generated ID used to identify this event.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_committed_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["InputAudioBufferCommittedEvent"] 8 | 9 | 10 | class InputAudioBufferCommittedEvent(BaseModel): 11 | event_id: str 12 | """The unique ID of the server event.""" 13 | 14 | item_id: str 15 | """The ID of the user message item that will be created.""" 16 | 17 | previous_item_id: str 18 | """The ID of the preceding item after which the new item will be inserted.""" 19 | 20 | type: Literal["input_audio_buffer.committed"] 21 | """The event type, must be `input_audio_buffer.committed`.""" 22 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["InputAudioBufferSpeechStartedEvent"] 8 | 9 | 10 | class InputAudioBufferSpeechStartedEvent(BaseModel): 11 | audio_start_ms: int 12 | """ 13 | Milliseconds from the start of all audio written to the buffer during the 14 | session when speech was first detected. This will correspond to the beginning of 15 | audio sent to the model, and thus includes the `prefix_padding_ms` configured in 16 | the Session. 17 | """ 18 | 19 | event_id: str 20 | """The unique ID of the server event.""" 21 | 22 | item_id: str 23 | """The ID of the user message item that will be created when speech stops.""" 24 | 25 | type: Literal["input_audio_buffer.speech_started"] 26 | """The event type, must be `input_audio_buffer.speech_started`.""" 27 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["InputAudioBufferSpeechStoppedEvent"] 8 | 9 | 10 | class InputAudioBufferSpeechStoppedEvent(BaseModel): 11 | audio_end_ms: int 12 | """Milliseconds since the session started when speech stopped. 13 | 14 | This will correspond to the end of audio sent to the model, and thus includes 15 | the `min_silence_duration_ms` configured in the Session. 16 | """ 17 | 18 | event_id: str 19 | """The unique ID of the server event.""" 20 | 21 | item_id: str 22 | """The ID of the user message item that will be created.""" 23 | 24 | type: Literal["input_audio_buffer.speech_stopped"] 25 | """The event type, must be `input_audio_buffer.speech_stopped`.""" 26 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/rate_limits_updated_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["RateLimitsUpdatedEvent", "RateLimit"] 9 | 10 | 11 | class RateLimit(BaseModel): 12 | limit: Optional[int] = None 13 | """The maximum allowed value for the rate limit.""" 14 | 15 | name: Optional[Literal["requests", "tokens"]] = None 16 | """The name of the rate limit (`requests`, `tokens`).""" 17 | 18 | remaining: Optional[int] = None 19 | """The remaining value before the limit is reached.""" 20 | 21 | reset_seconds: Optional[float] = None 22 | """Seconds until the rate limit resets.""" 23 | 24 | 25 | class RateLimitsUpdatedEvent(BaseModel): 26 | event_id: str 27 | """The unique ID of the server event.""" 28 | 29 | rate_limits: List[RateLimit] 30 | """List of rate limit information.""" 31 | 32 | type: Literal["rate_limits.updated"] 33 | """The event type, must be `rate_limits.updated`.""" 34 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/realtime_connect_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["RealtimeConnectParams"] 8 | 9 | 10 | class RealtimeConnectParams(TypedDict, total=False): 11 | model: Required[str] 12 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_audio_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ResponseAudioDeltaEvent"] 8 | 9 | 10 | class ResponseAudioDeltaEvent(BaseModel): 11 | content_index: int 12 | """The index of the content part in the item's content array.""" 13 | 14 | delta: str 15 | """Base64-encoded audio data delta.""" 16 | 17 | event_id: str 18 | """The unique ID of the server event.""" 19 | 20 | item_id: str 21 | """The ID of the item.""" 22 | 23 | output_index: int 24 | """The index of the output item in the response.""" 25 | 26 | response_id: str 27 | """The ID of the response.""" 28 | 29 | type: Literal["response.audio.delta"] 30 | """The event type, must be `response.audio.delta`.""" 31 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_audio_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ResponseAudioDoneEvent"] 8 | 9 | 10 | class ResponseAudioDoneEvent(BaseModel): 11 | content_index: int 12 | """The index of the content part in the item's content array.""" 13 | 14 | event_id: str 15 | """The unique ID of the server event.""" 16 | 17 | item_id: str 18 | """The ID of the item.""" 19 | 20 | output_index: int 21 | """The index of the output item in the response.""" 22 | 23 | response_id: str 24 | """The ID of the response.""" 25 | 26 | type: Literal["response.audio.done"] 27 | """The event type, must be `response.audio.done`.""" 28 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_audio_transcript_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ResponseAudioTranscriptDeltaEvent"] 8 | 9 | 10 | class ResponseAudioTranscriptDeltaEvent(BaseModel): 11 | content_index: int 12 | """The index of the content part in the item's content array.""" 13 | 14 | delta: str 15 | """The transcript delta.""" 16 | 17 | event_id: str 18 | """The unique ID of the server event.""" 19 | 20 | item_id: str 21 | """The ID of the item.""" 22 | 23 | output_index: int 24 | """The index of the output item in the response.""" 25 | 26 | response_id: str 27 | """The ID of the response.""" 28 | 29 | type: Literal["response.audio_transcript.delta"] 30 | """The event type, must be `response.audio_transcript.delta`.""" 31 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_audio_transcript_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ResponseAudioTranscriptDoneEvent"] 8 | 9 | 10 | class ResponseAudioTranscriptDoneEvent(BaseModel): 11 | content_index: int 12 | """The index of the content part in the item's content array.""" 13 | 14 | event_id: str 15 | """The unique ID of the server event.""" 16 | 17 | item_id: str 18 | """The ID of the item.""" 19 | 20 | output_index: int 21 | """The index of the output item in the response.""" 22 | 23 | response_id: str 24 | """The ID of the response.""" 25 | 26 | transcript: str 27 | """The final transcript of the audio.""" 28 | 29 | type: Literal["response.audio_transcript.done"] 30 | """The event type, must be `response.audio_transcript.done`.""" 31 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_cancel_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ResponseCancelEvent"] 9 | 10 | 11 | class ResponseCancelEvent(BaseModel): 12 | type: Literal["response.cancel"] 13 | """The event type, must be `response.cancel`.""" 14 | 15 | event_id: Optional[str] = None 16 | """Optional client-generated ID used to identify this event.""" 17 | 18 | response_id: Optional[str] = None 19 | """ 20 | A specific response ID to cancel - if not provided, will cancel an in-progress 21 | response in the default conversation. 22 | """ 23 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_cancel_event_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ResponseCancelEventParam"] 8 | 9 | 10 | class ResponseCancelEventParam(TypedDict, total=False): 11 | type: Required[Literal["response.cancel"]] 12 | """The event type, must be `response.cancel`.""" 13 | 14 | event_id: str 15 | """Optional client-generated ID used to identify this event.""" 16 | 17 | response_id: str 18 | """ 19 | A specific response ID to cancel - if not provided, will cancel an in-progress 20 | response in the default conversation. 21 | """ 22 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_created_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .realtime_response import RealtimeResponse 7 | 8 | __all__ = ["ResponseCreatedEvent"] 9 | 10 | 11 | class ResponseCreatedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | response: RealtimeResponse 16 | """The response resource.""" 17 | 18 | type: Literal["response.created"] 19 | """The event type, must be `response.created`.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .realtime_response import RealtimeResponse 7 | 8 | __all__ = ["ResponseDoneEvent"] 9 | 10 | 11 | class ResponseDoneEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | response: RealtimeResponse 16 | """The response resource.""" 17 | 18 | type: Literal["response.done"] 19 | """The event type, must be `response.done`.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_function_call_arguments_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ResponseFunctionCallArgumentsDeltaEvent"] 8 | 9 | 10 | class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): 11 | call_id: str 12 | """The ID of the function call.""" 13 | 14 | delta: str 15 | """The arguments delta as a JSON string.""" 16 | 17 | event_id: str 18 | """The unique ID of the server event.""" 19 | 20 | item_id: str 21 | """The ID of the function call item.""" 22 | 23 | output_index: int 24 | """The index of the output item in the response.""" 25 | 26 | response_id: str 27 | """The ID of the response.""" 28 | 29 | type: Literal["response.function_call_arguments.delta"] 30 | """The event type, must be `response.function_call_arguments.delta`.""" 31 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_function_call_arguments_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ResponseFunctionCallArgumentsDoneEvent"] 8 | 9 | 10 | class ResponseFunctionCallArgumentsDoneEvent(BaseModel): 11 | arguments: str 12 | """The final arguments as a JSON string.""" 13 | 14 | call_id: str 15 | """The ID of the function call.""" 16 | 17 | event_id: str 18 | """The unique ID of the server event.""" 19 | 20 | item_id: str 21 | """The ID of the function call item.""" 22 | 23 | output_index: int 24 | """The index of the output item in the response.""" 25 | 26 | response_id: str 27 | """The ID of the response.""" 28 | 29 | type: Literal["response.function_call_arguments.done"] 30 | """The event type, must be `response.function_call_arguments.done`.""" 31 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_output_item_added_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .conversation_item import ConversationItem 7 | 8 | __all__ = ["ResponseOutputItemAddedEvent"] 9 | 10 | 11 | class ResponseOutputItemAddedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | item: ConversationItem 16 | """The item to add to the conversation.""" 17 | 18 | output_index: int 19 | """The index of the output item in the Response.""" 20 | 21 | response_id: str 22 | """The ID of the Response to which the item belongs.""" 23 | 24 | type: Literal["response.output_item.added"] 25 | """The event type, must be `response.output_item.added`.""" 26 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_output_item_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .conversation_item import ConversationItem 7 | 8 | __all__ = ["ResponseOutputItemDoneEvent"] 9 | 10 | 11 | class ResponseOutputItemDoneEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | item: ConversationItem 16 | """The item to add to the conversation.""" 17 | 18 | output_index: int 19 | """The index of the output item in the Response.""" 20 | 21 | response_id: str 22 | """The ID of the Response to which the item belongs.""" 23 | 24 | type: Literal["response.output_item.done"] 25 | """The event type, must be `response.output_item.done`.""" 26 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_text_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ResponseTextDeltaEvent"] 8 | 9 | 10 | class ResponseTextDeltaEvent(BaseModel): 11 | content_index: int 12 | """The index of the content part in the item's content array.""" 13 | 14 | delta: str 15 | """The text delta.""" 16 | 17 | event_id: str 18 | """The unique ID of the server event.""" 19 | 20 | item_id: str 21 | """The ID of the item.""" 22 | 23 | output_index: int 24 | """The index of the output item in the response.""" 25 | 26 | response_id: str 27 | """The ID of the response.""" 28 | 29 | type: Literal["response.text.delta"] 30 | """The event type, must be `response.text.delta`.""" 31 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/response_text_done_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["ResponseTextDoneEvent"] 8 | 9 | 10 | class ResponseTextDoneEvent(BaseModel): 11 | content_index: int 12 | """The index of the content part in the item's content array.""" 13 | 14 | event_id: str 15 | """The unique ID of the server event.""" 16 | 17 | item_id: str 18 | """The ID of the item.""" 19 | 20 | output_index: int 21 | """The index of the output item in the response.""" 22 | 23 | response_id: str 24 | """The ID of the response.""" 25 | 26 | text: str 27 | """The final text content.""" 28 | 29 | type: Literal["response.text.done"] 30 | """The event type, must be `response.text.done`.""" 31 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/session_created_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .session import Session 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["SessionCreatedEvent"] 9 | 10 | 11 | class SessionCreatedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | session: Session 16 | """Realtime session object configuration.""" 17 | 18 | type: Literal["session.created"] 19 | """The event type, must be `session.created`.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/realtime/session_updated_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .session import Session 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["SessionUpdatedEvent"] 9 | 10 | 11 | class SessionUpdatedEvent(BaseModel): 12 | event_id: str 13 | """The unique ID of the server event.""" 14 | 15 | session: Session 16 | """Realtime session object configuration.""" 17 | 18 | type: Literal["session.updated"] 19 | """The event type, must be `session.updated`.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/static_file_chunking_strategy.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from ..._models import BaseModel 5 | 6 | __all__ = ["StaticFileChunkingStrategy"] 7 | 8 | 9 | class StaticFileChunkingStrategy(BaseModel): 10 | chunk_overlap_tokens: int 11 | """The number of tokens that overlap between chunks. The default value is `400`. 12 | 13 | Note that the overlap must not exceed half of `max_chunk_size_tokens`. 14 | """ 15 | 16 | max_chunk_size_tokens: int 17 | """The maximum number of tokens in each chunk. 18 | 19 | The default value is `800`. The minimum value is `100` and the maximum value is 20 | `4096`. 21 | """ 22 | -------------------------------------------------------------------------------- /src/openai/types/beta/static_file_chunking_strategy_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | from .static_file_chunking_strategy import StaticFileChunkingStrategy 7 | 8 | __all__ = ["StaticFileChunkingStrategyObject"] 9 | 10 | 11 | class StaticFileChunkingStrategyObject(BaseModel): 12 | static: StaticFileChunkingStrategy 13 | 14 | type: Literal["static"] 15 | """Always `static`.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/static_file_chunking_strategy_object_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam 8 | 9 | __all__ = ["StaticFileChunkingStrategyObjectParam"] 10 | 11 | 12 | class StaticFileChunkingStrategyObjectParam(TypedDict, total=False): 13 | static: Required[StaticFileChunkingStrategyParam] 14 | 15 | type: Required[Literal["static"]] 16 | """Always `static`.""" 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/static_file_chunking_strategy_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["StaticFileChunkingStrategyParam"] 8 | 9 | 10 | class StaticFileChunkingStrategyParam(TypedDict, total=False): 11 | chunk_overlap_tokens: Required[int] 12 | """The number of tokens that overlap between chunks. The default value is `400`. 13 | 14 | Note that the overlap must not exceed half of `max_chunk_size_tokens`. 15 | """ 16 | 17 | max_chunk_size_tokens: Required[int] 18 | """The maximum number of tokens in each chunk. 19 | 20 | The default value is `800`. The minimum value is `100` and the maximum value is 21 | `4096`. 22 | """ 23 | -------------------------------------------------------------------------------- /src/openai/types/beta/thread_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ThreadDeleted"] 8 | 9 | 10 | class ThreadDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["thread.deleted"] 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/annotation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ...._utils import PropertyInfo 7 | from .file_path_annotation import FilePathAnnotation 8 | from .file_citation_annotation import FileCitationAnnotation 9 | 10 | __all__ = ["Annotation"] 11 | 12 | Annotation: TypeAlias = Annotated[Union[FileCitationAnnotation, FilePathAnnotation], PropertyInfo(discriminator="type")] 13 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/annotation_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ...._utils import PropertyInfo 7 | from .file_path_delta_annotation import FilePathDeltaAnnotation 8 | from .file_citation_delta_annotation import FileCitationDeltaAnnotation 9 | 10 | __all__ = ["AnnotationDelta"] 11 | 12 | AnnotationDelta: TypeAlias = Annotated[ 13 | Union[FileCitationDeltaAnnotation, FilePathDeltaAnnotation], PropertyInfo(discriminator="type") 14 | ] 15 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/file_citation_annotation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["FileCitationAnnotation", "FileCitation"] 8 | 9 | 10 | class FileCitation(BaseModel): 11 | file_id: str 12 | """The ID of the specific File the citation is from.""" 13 | 14 | 15 | class FileCitationAnnotation(BaseModel): 16 | end_index: int 17 | 18 | file_citation: FileCitation 19 | 20 | start_index: int 21 | 22 | text: str 23 | """The text in the message content that needs to be replaced.""" 24 | 25 | type: Literal["file_citation"] 26 | """Always `file_citation`.""" 27 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/file_citation_delta_annotation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["FileCitationDeltaAnnotation", "FileCitation"] 9 | 10 | 11 | class FileCitation(BaseModel): 12 | file_id: Optional[str] = None 13 | """The ID of the specific File the citation is from.""" 14 | 15 | quote: Optional[str] = None 16 | """The specific quote in the file.""" 17 | 18 | 19 | class FileCitationDeltaAnnotation(BaseModel): 20 | index: int 21 | """The index of the annotation in the text content part.""" 22 | 23 | type: Literal["file_citation"] 24 | """Always `file_citation`.""" 25 | 26 | end_index: Optional[int] = None 27 | 28 | file_citation: Optional[FileCitation] = None 29 | 30 | start_index: Optional[int] = None 31 | 32 | text: Optional[str] = None 33 | """The text in the message content that needs to be replaced.""" 34 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/file_path_annotation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["FilePathAnnotation", "FilePath"] 8 | 9 | 10 | class FilePath(BaseModel): 11 | file_id: str 12 | """The ID of the file that was generated.""" 13 | 14 | 15 | class FilePathAnnotation(BaseModel): 16 | end_index: int 17 | 18 | file_path: FilePath 19 | 20 | start_index: int 21 | 22 | text: str 23 | """The text in the message content that needs to be replaced.""" 24 | 25 | type: Literal["file_path"] 26 | """Always `file_path`.""" 27 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/file_path_delta_annotation.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["FilePathDeltaAnnotation", "FilePath"] 9 | 10 | 11 | class FilePath(BaseModel): 12 | file_id: Optional[str] = None 13 | """The ID of the file that was generated.""" 14 | 15 | 16 | class FilePathDeltaAnnotation(BaseModel): 17 | index: int 18 | """The index of the annotation in the text content part.""" 19 | 20 | type: Literal["file_path"] 21 | """Always `file_path`.""" 22 | 23 | end_index: Optional[int] = None 24 | 25 | file_path: Optional[FilePath] = None 26 | 27 | start_index: Optional[int] = None 28 | 29 | text: Optional[str] = None 30 | """The text in the message content that needs to be replaced.""" 31 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_file.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ImageFile"] 9 | 10 | 11 | class ImageFile(BaseModel): 12 | file_id: str 13 | """ 14 | The [File](https://platform.openai.com/docs/api-reference/files) ID of the image 15 | in the message content. Set `purpose="vision"` when uploading the File if you 16 | need to later display the file content. 17 | """ 18 | 19 | detail: Optional[Literal["auto", "low", "high"]] = None 20 | """Specifies the detail level of the image if specified by the user. 21 | 22 | `low` uses fewer tokens, you can opt in to high resolution using `high`. 23 | """ 24 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_file_content_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .image_file import ImageFile 7 | 8 | __all__ = ["ImageFileContentBlock"] 9 | 10 | 11 | class ImageFileContentBlock(BaseModel): 12 | image_file: ImageFile 13 | 14 | type: Literal["image_file"] 15 | """Always `image_file`.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_file_content_block_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .image_file_param import ImageFileParam 8 | 9 | __all__ = ["ImageFileContentBlockParam"] 10 | 11 | 12 | class ImageFileContentBlockParam(TypedDict, total=False): 13 | image_file: Required[ImageFileParam] 14 | 15 | type: Required[Literal["image_file"]] 16 | """Always `image_file`.""" 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_file_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ImageFileDelta"] 9 | 10 | 11 | class ImageFileDelta(BaseModel): 12 | detail: Optional[Literal["auto", "low", "high"]] = None 13 | """Specifies the detail level of the image if specified by the user. 14 | 15 | `low` uses fewer tokens, you can opt in to high resolution using `high`. 16 | """ 17 | 18 | file_id: Optional[str] = None 19 | """ 20 | The [File](https://platform.openai.com/docs/api-reference/files) ID of the image 21 | in the message content. Set `purpose="vision"` when uploading the File if you 22 | need to later display the file content. 23 | """ 24 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_file_delta_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .image_file_delta import ImageFileDelta 8 | 9 | __all__ = ["ImageFileDeltaBlock"] 10 | 11 | 12 | class ImageFileDeltaBlock(BaseModel): 13 | index: int 14 | """The index of the content part in the message.""" 15 | 16 | type: Literal["image_file"] 17 | """Always `image_file`.""" 18 | 19 | image_file: Optional[ImageFileDelta] = None 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_file_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ImageFileParam"] 8 | 9 | 10 | class ImageFileParam(TypedDict, total=False): 11 | file_id: Required[str] 12 | """ 13 | The [File](https://platform.openai.com/docs/api-reference/files) ID of the image 14 | in the message content. Set `purpose="vision"` when uploading the File if you 15 | need to later display the file content. 16 | """ 17 | 18 | detail: Literal["auto", "low", "high"] 19 | """Specifies the detail level of the image if specified by the user. 20 | 21 | `low` uses fewer tokens, you can opt in to high resolution using `high`. 22 | """ 23 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_url.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ImageURL"] 9 | 10 | 11 | class ImageURL(BaseModel): 12 | url: str 13 | """ 14 | The external URL of the image, must be a supported image types: jpeg, jpg, png, 15 | gif, webp. 16 | """ 17 | 18 | detail: Optional[Literal["auto", "low", "high"]] = None 19 | """Specifies the detail level of the image. 20 | 21 | `low` uses fewer tokens, you can opt in to high resolution using `high`. Default 22 | value is `auto` 23 | """ 24 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_url_content_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .image_url import ImageURL 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ImageURLContentBlock"] 9 | 10 | 11 | class ImageURLContentBlock(BaseModel): 12 | image_url: ImageURL 13 | 14 | type: Literal["image_url"] 15 | """The type of the content part.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_url_content_block_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from .image_url_param import ImageURLParam 8 | 9 | __all__ = ["ImageURLContentBlockParam"] 10 | 11 | 12 | class ImageURLContentBlockParam(TypedDict, total=False): 13 | image_url: Required[ImageURLParam] 14 | 15 | type: Required[Literal["image_url"]] 16 | """The type of the content part.""" 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_url_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["ImageURLDelta"] 9 | 10 | 11 | class ImageURLDelta(BaseModel): 12 | detail: Optional[Literal["auto", "low", "high"]] = None 13 | """Specifies the detail level of the image. 14 | 15 | `low` uses fewer tokens, you can opt in to high resolution using `high`. 16 | """ 17 | 18 | url: Optional[str] = None 19 | """ 20 | The URL of the image, must be a supported image types: jpeg, jpg, png, gif, 21 | webp. 22 | """ 23 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_url_delta_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .image_url_delta import ImageURLDelta 8 | 9 | __all__ = ["ImageURLDeltaBlock"] 10 | 11 | 12 | class ImageURLDeltaBlock(BaseModel): 13 | index: int 14 | """The index of the content part in the message.""" 15 | 16 | type: Literal["image_url"] 17 | """Always `image_url`.""" 18 | 19 | image_url: Optional[ImageURLDelta] = None 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/image_url_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ImageURLParam"] 8 | 9 | 10 | class ImageURLParam(TypedDict, total=False): 11 | url: Required[str] 12 | """ 13 | The external URL of the image, must be a supported image types: jpeg, jpg, png, 14 | gif, webp. 15 | """ 16 | 17 | detail: Literal["auto", "low", "high"] 18 | """Specifies the detail level of the image. 19 | 20 | `low` uses fewer tokens, you can opt in to high resolution using `high`. Default 21 | value is `auto` 22 | """ 23 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/message_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ...._utils import PropertyInfo 7 | from .text_content_block import TextContentBlock 8 | from .refusal_content_block import RefusalContentBlock 9 | from .image_url_content_block import ImageURLContentBlock 10 | from .image_file_content_block import ImageFileContentBlock 11 | 12 | __all__ = ["MessageContent"] 13 | 14 | 15 | MessageContent: TypeAlias = Annotated[ 16 | Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock], 17 | PropertyInfo(discriminator="type"), 18 | ] 19 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/message_content_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ...._utils import PropertyInfo 7 | from .text_delta_block import TextDeltaBlock 8 | from .refusal_delta_block import RefusalDeltaBlock 9 | from .image_url_delta_block import ImageURLDeltaBlock 10 | from .image_file_delta_block import ImageFileDeltaBlock 11 | 12 | __all__ = ["MessageContentDelta"] 13 | 14 | MessageContentDelta: TypeAlias = Annotated[ 15 | Union[ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock], 16 | PropertyInfo(discriminator="type"), 17 | ] 18 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/message_content_part_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .text_content_block_param import TextContentBlockParam 9 | from .image_url_content_block_param import ImageURLContentBlockParam 10 | from .image_file_content_block_param import ImageFileContentBlockParam 11 | 12 | __all__ = ["MessageContentPartParam"] 13 | 14 | MessageContentPartParam: TypeAlias = Union[ImageFileContentBlockParam, ImageURLContentBlockParam, TextContentBlockParam] 15 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/message_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["MessageDeleted"] 8 | 9 | 10 | class MessageDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["thread.message.deleted"] 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/message_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .message_content_delta import MessageContentDelta 8 | 9 | __all__ = ["MessageDelta"] 10 | 11 | 12 | class MessageDelta(BaseModel): 13 | content: Optional[List[MessageContentDelta]] = None 14 | """The content of the message in array of text and/or images.""" 15 | 16 | role: Optional[Literal["user", "assistant"]] = None 17 | """The entity that produced the message. One of `user` or `assistant`.""" 18 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/message_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | from .message_delta import MessageDelta 7 | 8 | __all__ = ["MessageDeltaEvent"] 9 | 10 | 11 | class MessageDeltaEvent(BaseModel): 12 | id: str 13 | """The identifier of the message, which can be referenced in API endpoints.""" 14 | 15 | delta: MessageDelta 16 | """The delta containing the fields that have changed on the Message.""" 17 | 18 | object: Literal["thread.message.delta"] 19 | """The object type, which is always `thread.message.delta`.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/message_update_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Optional 6 | from typing_extensions import Required, TypedDict 7 | 8 | from ...shared_params.metadata import Metadata 9 | 10 | __all__ = ["MessageUpdateParams"] 11 | 12 | 13 | class MessageUpdateParams(TypedDict, total=False): 14 | thread_id: Required[str] 15 | 16 | metadata: Optional[Metadata] 17 | """Set of 16 key-value pairs that can be attached to an object. 18 | 19 | This can be useful for storing additional information about the object in a 20 | structured format, and querying for objects via API or the dashboard. 21 | 22 | Keys are strings with a maximum length of 64 characters. Values are strings with 23 | a maximum length of 512 characters. 24 | """ 25 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/refusal_content_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["RefusalContentBlock"] 8 | 9 | 10 | class RefusalContentBlock(BaseModel): 11 | refusal: str 12 | 13 | type: Literal["refusal"] 14 | """Always `refusal`.""" 15 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/refusal_delta_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["RefusalDeltaBlock"] 9 | 10 | 11 | class RefusalDeltaBlock(BaseModel): 12 | index: int 13 | """The index of the refusal part in the message.""" 14 | 15 | type: Literal["refusal"] 16 | """Always `refusal`.""" 17 | 18 | refusal: Optional[str] = None 19 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/required_action_function_tool_call.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["RequiredActionFunctionToolCall", "Function"] 8 | 9 | 10 | class Function(BaseModel): 11 | arguments: str 12 | """The arguments that the model expects you to pass to the function.""" 13 | 14 | name: str 15 | """The name of the function.""" 16 | 17 | 18 | class RequiredActionFunctionToolCall(BaseModel): 19 | id: str 20 | """The ID of the tool call. 21 | 22 | This ID must be referenced when you submit the tool outputs in using the 23 | [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) 24 | endpoint. 25 | """ 26 | 27 | function: Function 28 | """The function definition.""" 29 | 30 | type: Literal["function"] 31 | """The type of tool call the output is required for. 32 | 33 | For now, this is always `function`. 34 | """ 35 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/run_status.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["RunStatus"] 6 | 7 | RunStatus: TypeAlias = Literal[ 8 | "queued", 9 | "in_progress", 10 | "requires_action", 11 | "cancelling", 12 | "cancelled", 13 | "failed", 14 | "completed", 15 | "incomplete", 16 | "expired", 17 | ] 18 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/run_update_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Optional 6 | from typing_extensions import Required, TypedDict 7 | 8 | from ...shared_params.metadata import Metadata 9 | 10 | __all__ = ["RunUpdateParams"] 11 | 12 | 13 | class RunUpdateParams(TypedDict, total=False): 14 | thread_id: Required[str] 15 | 16 | metadata: Optional[Metadata] 17 | """Set of 16 key-value pairs that can be attached to an object. 18 | 19 | This can be useful for storing additional information about the object in a 20 | structured format, and querying for objects via API or the dashboard. 21 | 22 | Keys are strings with a maximum length of 64 characters. Values are strings with 23 | a maximum length of 512 characters. 24 | """ 25 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/code_interpreter_logs.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | 8 | __all__ = ["CodeInterpreterLogs"] 9 | 10 | 11 | class CodeInterpreterLogs(BaseModel): 12 | index: int 13 | """The index of the output in the outputs array.""" 14 | 15 | type: Literal["logs"] 16 | """Always `logs`.""" 17 | 18 | logs: Optional[str] = None 19 | """The text output from the Code Interpreter tool call.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/code_interpreter_output_image.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | 8 | __all__ = ["CodeInterpreterOutputImage", "Image"] 9 | 10 | 11 | class Image(BaseModel): 12 | file_id: Optional[str] = None 13 | """ 14 | The [file](https://platform.openai.com/docs/api-reference/files) ID of the 15 | image. 16 | """ 17 | 18 | 19 | class CodeInterpreterOutputImage(BaseModel): 20 | index: int 21 | """The index of the output in the outputs array.""" 22 | 23 | type: Literal["image"] 24 | """Always `image`.""" 25 | 26 | image: Optional[Image] = None 27 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/file_search_tool_call_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | 8 | __all__ = ["FileSearchToolCallDelta"] 9 | 10 | 11 | class FileSearchToolCallDelta(BaseModel): 12 | file_search: object 13 | """For now, this is always going to be an empty object.""" 14 | 15 | index: int 16 | """The index of the tool call in the tool calls array.""" 17 | 18 | type: Literal["file_search"] 19 | """The type of tool call. 20 | 21 | This is always going to be `file_search` for this type of tool call. 22 | """ 23 | 24 | id: Optional[str] = None 25 | """The ID of the tool call object.""" 26 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/function_tool_call.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | 8 | __all__ = ["FunctionToolCall", "Function"] 9 | 10 | 11 | class Function(BaseModel): 12 | arguments: str 13 | """The arguments passed to the function.""" 14 | 15 | name: str 16 | """The name of the function.""" 17 | 18 | output: Optional[str] = None 19 | """The output of the function. 20 | 21 | This will be `null` if the outputs have not been 22 | [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) 23 | yet. 24 | """ 25 | 26 | 27 | class FunctionToolCall(BaseModel): 28 | id: str 29 | """The ID of the tool call object.""" 30 | 31 | function: Function 32 | """The definition of the function that was called.""" 33 | 34 | type: Literal["function"] 35 | """The type of tool call. 36 | 37 | This is always going to be `function` for this type of tool call. 38 | """ 39 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/function_tool_call_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | 8 | __all__ = ["FunctionToolCallDelta", "Function"] 9 | 10 | 11 | class Function(BaseModel): 12 | arguments: Optional[str] = None 13 | """The arguments passed to the function.""" 14 | 15 | name: Optional[str] = None 16 | """The name of the function.""" 17 | 18 | output: Optional[str] = None 19 | """The output of the function. 20 | 21 | This will be `null` if the outputs have not been 22 | [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) 23 | yet. 24 | """ 25 | 26 | 27 | class FunctionToolCallDelta(BaseModel): 28 | index: int 29 | """The index of the tool call in the tool calls array.""" 30 | 31 | type: Literal["function"] 32 | """The type of tool call. 33 | 34 | This is always going to be `function` for this type of tool call. 35 | """ 36 | 37 | id: Optional[str] = None 38 | """The ID of the tool call object.""" 39 | 40 | function: Optional[Function] = None 41 | """The definition of the function that was called.""" 42 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/message_creation_step_details.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ....._models import BaseModel 6 | 7 | __all__ = ["MessageCreationStepDetails", "MessageCreation"] 8 | 9 | 10 | class MessageCreation(BaseModel): 11 | message_id: str 12 | """The ID of the message that was created by this run step.""" 13 | 14 | 15 | class MessageCreationStepDetails(BaseModel): 16 | message_creation: MessageCreation 17 | 18 | type: Literal["message_creation"] 19 | """Always `message_creation`.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/run_step_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union, Optional 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ....._utils import PropertyInfo 7 | from ....._models import BaseModel 8 | from .tool_call_delta_object import ToolCallDeltaObject 9 | from .run_step_delta_message_delta import RunStepDeltaMessageDelta 10 | 11 | __all__ = ["RunStepDelta", "StepDetails"] 12 | 13 | StepDetails: TypeAlias = Annotated[ 14 | Union[RunStepDeltaMessageDelta, ToolCallDeltaObject], PropertyInfo(discriminator="type") 15 | ] 16 | 17 | 18 | class RunStepDelta(BaseModel): 19 | step_details: Optional[StepDetails] = None 20 | """The details of the run step.""" 21 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/run_step_delta_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ....._models import BaseModel 6 | from .run_step_delta import RunStepDelta 7 | 8 | __all__ = ["RunStepDeltaEvent"] 9 | 10 | 11 | class RunStepDeltaEvent(BaseModel): 12 | id: str 13 | """The identifier of the run step, which can be referenced in API endpoints.""" 14 | 15 | delta: RunStepDelta 16 | """The delta containing the fields that have changed on the run step.""" 17 | 18 | object: Literal["thread.run.step.delta"] 19 | """The object type, which is always `thread.run.step.delta`.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/run_step_delta_message_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | 8 | __all__ = ["RunStepDeltaMessageDelta", "MessageCreation"] 9 | 10 | 11 | class MessageCreation(BaseModel): 12 | message_id: Optional[str] = None 13 | """The ID of the message that was created by this run step.""" 14 | 15 | 16 | class RunStepDeltaMessageDelta(BaseModel): 17 | type: Literal["message_creation"] 18 | """Always `message_creation`.""" 19 | 20 | message_creation: Optional[MessageCreation] = None 21 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/run_step_include.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["RunStepInclude"] 6 | 7 | RunStepInclude: TypeAlias = Literal["step_details.tool_calls[*].file_search.results[*].content"] 8 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/step_retrieve_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import List 6 | from typing_extensions import Required, TypedDict 7 | 8 | from .run_step_include import RunStepInclude 9 | 10 | __all__ = ["StepRetrieveParams"] 11 | 12 | 13 | class StepRetrieveParams(TypedDict, total=False): 14 | thread_id: Required[str] 15 | 16 | run_id: Required[str] 17 | 18 | include: List[RunStepInclude] 19 | """A list of additional fields to include in the response. 20 | 21 | Currently the only supported value is 22 | `step_details.tool_calls[*].file_search.results[*].content` to fetch the file 23 | search result content. 24 | 25 | See the 26 | [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) 27 | for more information. 28 | """ 29 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/tool_call.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ....._utils import PropertyInfo 7 | from .function_tool_call import FunctionToolCall 8 | from .file_search_tool_call import FileSearchToolCall 9 | from .code_interpreter_tool_call import CodeInterpreterToolCall 10 | 11 | __all__ = ["ToolCall"] 12 | 13 | ToolCall: TypeAlias = Annotated[ 14 | Union[CodeInterpreterToolCall, FileSearchToolCall, FunctionToolCall], PropertyInfo(discriminator="type") 15 | ] 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/tool_call_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Union 4 | from typing_extensions import Annotated, TypeAlias 5 | 6 | from ....._utils import PropertyInfo 7 | from .function_tool_call_delta import FunctionToolCallDelta 8 | from .file_search_tool_call_delta import FileSearchToolCallDelta 9 | from .code_interpreter_tool_call_delta import CodeInterpreterToolCallDelta 10 | 11 | __all__ = ["ToolCallDelta"] 12 | 13 | ToolCallDelta: TypeAlias = Annotated[ 14 | Union[CodeInterpreterToolCallDelta, FileSearchToolCallDelta, FunctionToolCallDelta], 15 | PropertyInfo(discriminator="type"), 16 | ] 17 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/tool_call_delta_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | from typing_extensions import Literal 5 | 6 | from ....._models import BaseModel 7 | from .tool_call_delta import ToolCallDelta 8 | 9 | __all__ = ["ToolCallDeltaObject"] 10 | 11 | 12 | class ToolCallDeltaObject(BaseModel): 13 | type: Literal["tool_calls"] 14 | """Always `tool_calls`.""" 15 | 16 | tool_calls: Optional[List[ToolCallDelta]] = None 17 | """An array of tool calls the run step was involved in. 18 | 19 | These can be associated with one of three types of tools: `code_interpreter`, 20 | `file_search`, or `function`. 21 | """ 22 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/runs/tool_calls_step_details.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | from typing_extensions import Literal 5 | 6 | from .tool_call import ToolCall 7 | from ....._models import BaseModel 8 | 9 | __all__ = ["ToolCallsStepDetails"] 10 | 11 | 12 | class ToolCallsStepDetails(BaseModel): 13 | tool_calls: List[ToolCall] 14 | """An array of tool calls the run step was involved in. 15 | 16 | These can be associated with one of three types of tools: `code_interpreter`, 17 | `file_search`, or `function`. 18 | """ 19 | 20 | type: Literal["tool_calls"] 21 | """Always `tool_calls`.""" 22 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/text.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | 5 | from ...._models import BaseModel 6 | from .annotation import Annotation 7 | 8 | __all__ = ["Text"] 9 | 10 | 11 | class Text(BaseModel): 12 | annotations: List[Annotation] 13 | 14 | value: str 15 | """The data that makes up the text.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/text_content_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .text import Text 6 | from ...._models import BaseModel 7 | 8 | __all__ = ["TextContentBlock"] 9 | 10 | 11 | class TextContentBlock(BaseModel): 12 | text: Text 13 | 14 | type: Literal["text"] 15 | """Always `text`.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/text_content_block_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["TextContentBlockParam"] 8 | 9 | 10 | class TextContentBlockParam(TypedDict, total=False): 11 | text: Required[str] 12 | """Text content to be sent to the model""" 13 | 14 | type: Required[Literal["text"]] 15 | """Always `text`.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/text_delta.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | 5 | from ...._models import BaseModel 6 | from .annotation_delta import AnnotationDelta 7 | 8 | __all__ = ["TextDelta"] 9 | 10 | 11 | class TextDelta(BaseModel): 12 | annotations: Optional[List[AnnotationDelta]] = None 13 | 14 | value: Optional[str] = None 15 | """The data that makes up the text.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/threads/text_delta_block.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | from typing_extensions import Literal 5 | 6 | from ...._models import BaseModel 7 | from .text_delta import TextDelta 8 | 9 | __all__ = ["TextDeltaBlock"] 10 | 11 | 12 | class TextDeltaBlock(BaseModel): 13 | index: int 14 | """The index of the content part in the message.""" 15 | 16 | type: Literal["text"] 17 | """Always `text`.""" 18 | 19 | text: Optional[TextDelta] = None 20 | -------------------------------------------------------------------------------- /src/openai/types/beta/vector_store_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["VectorStoreDeleted"] 8 | 9 | 10 | class VectorStoreDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["vector_store.deleted"] 16 | -------------------------------------------------------------------------------- /src/openai/types/beta/vector_stores/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .file_list_params import FileListParams as FileListParams 6 | from .vector_store_file import VectorStoreFile as VectorStoreFile 7 | from .file_create_params import FileCreateParams as FileCreateParams 8 | from .vector_store_file_batch import VectorStoreFileBatch as VectorStoreFileBatch 9 | from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams 10 | from .vector_store_file_deleted import VectorStoreFileDeleted as VectorStoreFileDeleted 11 | from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams 12 | -------------------------------------------------------------------------------- /src/openai/types/beta/vector_stores/file_batch_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import List 6 | from typing_extensions import Required, TypedDict 7 | 8 | from ..file_chunking_strategy_param import FileChunkingStrategyParam 9 | 10 | __all__ = ["FileBatchCreateParams"] 11 | 12 | 13 | class FileBatchCreateParams(TypedDict, total=False): 14 | file_ids: Required[List[str]] 15 | """ 16 | A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that 17 | the vector store should use. Useful for tools like `file_search` that can access 18 | files. 19 | """ 20 | 21 | chunking_strategy: FileChunkingStrategyParam 22 | """The chunking strategy used to chunk the file(s). 23 | 24 | If not set, will use the `auto` strategy. Only applicable if `file_ids` is 25 | non-empty. 26 | """ 27 | -------------------------------------------------------------------------------- /src/openai/types/beta/vector_stores/file_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from ..file_chunking_strategy_param import FileChunkingStrategyParam 8 | 9 | __all__ = ["FileCreateParams"] 10 | 11 | 12 | class FileCreateParams(TypedDict, total=False): 13 | file_id: Required[str] 14 | """ 15 | A [File](https://platform.openai.com/docs/api-reference/files) ID that the 16 | vector store should use. Useful for tools like `file_search` that can access 17 | files. 18 | """ 19 | 20 | chunking_strategy: FileChunkingStrategyParam 21 | """The chunking strategy used to chunk the file(s). 22 | 23 | If not set, will use the `auto` strategy. Only applicable if `file_ids` is 24 | non-empty. 25 | """ 26 | -------------------------------------------------------------------------------- /src/openai/types/beta/vector_stores/vector_store_file_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ...._models import BaseModel 6 | 7 | __all__ = ["VectorStoreFileDeleted"] 8 | 9 | 10 | class VectorStoreFileDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["vector_store.file.deleted"] 16 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_audio.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from ..._models import BaseModel 5 | 6 | __all__ = ["ChatCompletionAudio"] 7 | 8 | 9 | class ChatCompletionAudio(BaseModel): 10 | id: str 11 | """Unique identifier for this audio response.""" 12 | 13 | data: str 14 | """ 15 | Base64 encoded audio bytes generated by the model, in the format specified in 16 | the request. 17 | """ 18 | 19 | expires_at: int 20 | """ 21 | The Unix timestamp (in seconds) for when this audio response will no longer be 22 | accessible on the server for use in multi-turn conversations. 23 | """ 24 | 25 | transcript: str 26 | """Transcript of the audio generated by the model.""" 27 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_audio_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionAudioParam"] 8 | 9 | 10 | class ChatCompletionAudioParam(TypedDict, total=False): 11 | format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]] 12 | """Specifies the output audio format. 13 | 14 | Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. 15 | """ 16 | 17 | voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] 18 | """The voice the model uses to respond. 19 | 20 | Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also 21 | supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices 22 | are less expressive). 23 | """ 24 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_content_part_image_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionContentPartImageParam", "ImageURL"] 8 | 9 | 10 | class ImageURL(TypedDict, total=False): 11 | url: Required[str] 12 | """Either a URL of the image or the base64 encoded image data.""" 13 | 14 | detail: Literal["auto", "low", "high"] 15 | """Specifies the detail level of the image. 16 | 17 | Learn more in the 18 | [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). 19 | """ 20 | 21 | 22 | class ChatCompletionContentPartImageParam(TypedDict, total=False): 23 | image_url: Required[ImageURL] 24 | 25 | type: Required[Literal["image_url"]] 26 | """The type of the content part.""" 27 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_content_part_input_audio_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"] 8 | 9 | 10 | class InputAudio(TypedDict, total=False): 11 | data: Required[str] 12 | """Base64 encoded audio data.""" 13 | 14 | format: Required[Literal["wav", "mp3"]] 15 | """The format of the encoded audio data. Currently supports "wav" and "mp3".""" 16 | 17 | 18 | class ChatCompletionContentPartInputAudioParam(TypedDict, total=False): 19 | input_audio: Required[InputAudio] 20 | 21 | type: Required[Literal["input_audio"]] 22 | """The type of the content part. Always `input_audio`.""" 23 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_content_part_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam 9 | from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam 10 | from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam 11 | 12 | __all__ = ["ChatCompletionContentPartParam"] 13 | 14 | ChatCompletionContentPartParam: TypeAlias = Union[ 15 | ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam 16 | ] 17 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_content_part_refusal_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionContentPartRefusalParam"] 8 | 9 | 10 | class ChatCompletionContentPartRefusalParam(TypedDict, total=False): 11 | refusal: Required[str] 12 | """The refusal message generated by the model.""" 13 | 14 | type: Required[Literal["refusal"]] 15 | """The type of the content part.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_content_part_text_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionContentPartTextParam"] 8 | 9 | 10 | class ChatCompletionContentPartTextParam(TypedDict, total=False): 11 | text: Required[str] 12 | """The text content.""" 13 | 14 | type: Required[Literal["text"]] 15 | """The type of the content part.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_developer_message_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union, Iterable 6 | from typing_extensions import Literal, Required, TypedDict 7 | 8 | from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam 9 | 10 | __all__ = ["ChatCompletionDeveloperMessageParam"] 11 | 12 | 13 | class ChatCompletionDeveloperMessageParam(TypedDict, total=False): 14 | content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] 15 | """The contents of the developer message.""" 16 | 17 | role: Required[Literal["developer"]] 18 | """The role of the messages author, in this case `developer`.""" 19 | 20 | name: str 21 | """An optional name for the participant. 22 | 23 | Provides the model information to differentiate between participants of the same 24 | role. 25 | """ 26 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_function_call_option_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionFunctionCallOptionParam"] 8 | 9 | 10 | class ChatCompletionFunctionCallOptionParam(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the function to call.""" 13 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_function_message_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Optional 6 | from typing_extensions import Literal, Required, TypedDict 7 | 8 | __all__ = ["ChatCompletionFunctionMessageParam"] 9 | 10 | 11 | class ChatCompletionFunctionMessageParam(TypedDict, total=False): 12 | content: Required[Optional[str]] 13 | """The contents of the function message.""" 14 | 15 | name: Required[str] 16 | """The name of the function to call.""" 17 | 18 | role: Required[Literal["function"]] 19 | """The role of the messages author, in this case `function`.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_message_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .chat_completion_tool_message_param import ChatCompletionToolMessageParam 9 | from .chat_completion_user_message_param import ChatCompletionUserMessageParam 10 | from .chat_completion_system_message_param import ChatCompletionSystemMessageParam 11 | from .chat_completion_function_message_param import ChatCompletionFunctionMessageParam 12 | from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam 13 | from .chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam 14 | 15 | __all__ = ["ChatCompletionMessageParam"] 16 | 17 | ChatCompletionMessageParam: TypeAlias = Union[ 18 | ChatCompletionDeveloperMessageParam, 19 | ChatCompletionSystemMessageParam, 20 | ChatCompletionUserMessageParam, 21 | ChatCompletionAssistantMessageParam, 22 | ChatCompletionToolMessageParam, 23 | ChatCompletionFunctionMessageParam, 24 | ] 25 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_message_tool_call.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ChatCompletionMessageToolCall", "Function"] 8 | 9 | 10 | class Function(BaseModel): 11 | arguments: str 12 | """ 13 | The arguments to call the function with, as generated by the model in JSON 14 | format. Note that the model does not always generate valid JSON, and may 15 | hallucinate parameters not defined by your function schema. Validate the 16 | arguments in your code before calling your function. 17 | """ 18 | 19 | name: str 20 | """The name of the function to call.""" 21 | 22 | 23 | class ChatCompletionMessageToolCall(BaseModel): 24 | id: str 25 | """The ID of the tool call.""" 26 | 27 | function: Function 28 | """The function that the model called.""" 29 | 30 | type: Literal["function"] 31 | """The type of the tool. Currently, only `function` is supported.""" 32 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_message_tool_call_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionMessageToolCallParam", "Function"] 8 | 9 | 10 | class Function(TypedDict, total=False): 11 | arguments: Required[str] 12 | """ 13 | The arguments to call the function with, as generated by the model in JSON 14 | format. Note that the model does not always generate valid JSON, and may 15 | hallucinate parameters not defined by your function schema. Validate the 16 | arguments in your code before calling your function. 17 | """ 18 | 19 | name: Required[str] 20 | """The name of the function to call.""" 21 | 22 | 23 | class ChatCompletionMessageToolCallParam(TypedDict, total=False): 24 | id: Required[str] 25 | """The ID of the tool call.""" 26 | 27 | function: Required[Function] 28 | """The function that the model called.""" 29 | 30 | type: Required[Literal["function"]] 31 | """The type of the tool. Currently, only `function` is supported.""" 32 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_modality.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ChatCompletionModality"] 6 | 7 | ChatCompletionModality: TypeAlias = Literal["text", "audio"] 8 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_named_tool_choice_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ChatCompletionNamedToolChoiceParam", "Function"] 8 | 9 | 10 | class Function(TypedDict, total=False): 11 | name: Required[str] 12 | """The name of the function to call.""" 13 | 14 | 15 | class ChatCompletionNamedToolChoiceParam(TypedDict, total=False): 16 | function: Required[Function] 17 | 18 | type: Required[Literal["function"]] 19 | """The type of the tool. Currently, only `function` is supported.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_prediction_content_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union, Iterable 6 | from typing_extensions import Literal, Required, TypedDict 7 | 8 | from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam 9 | 10 | __all__ = ["ChatCompletionPredictionContentParam"] 11 | 12 | 13 | class ChatCompletionPredictionContentParam(TypedDict, total=False): 14 | content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] 15 | """ 16 | The content that should be matched when generating a model response. If 17 | generated tokens would match this content, the entire model response can be 18 | returned much more quickly. 19 | """ 20 | 21 | type: Required[Literal["content"]] 22 | """The type of the predicted content you want to provide. 23 | 24 | This type is currently always `content`. 25 | """ 26 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_reasoning_effort.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ChatCompletionReasoningEffort"] 6 | 7 | ChatCompletionReasoningEffort: TypeAlias = Literal["low", "medium", "high"] 8 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_role.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ChatCompletionRole"] 6 | 7 | ChatCompletionRole: TypeAlias = Literal["system", "user", "assistant", "tool", "function"] 8 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_stream_options_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["ChatCompletionStreamOptionsParam"] 8 | 9 | 10 | class ChatCompletionStreamOptionsParam(TypedDict, total=False): 11 | include_usage: bool 12 | """If set, an additional chunk will be streamed before the `data: [DONE]` message. 13 | 14 | The `usage` field on this chunk shows the token usage statistics for the entire 15 | request, and the `choices` field will always be an empty array. All other chunks 16 | will also include a `usage` field, but with a null value. 17 | """ 18 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_system_message_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union, Iterable 6 | from typing_extensions import Literal, Required, TypedDict 7 | 8 | from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam 9 | 10 | __all__ = ["ChatCompletionSystemMessageParam"] 11 | 12 | 13 | class ChatCompletionSystemMessageParam(TypedDict, total=False): 14 | content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] 15 | """The contents of the system message.""" 16 | 17 | role: Required[Literal["system"]] 18 | """The role of the messages author, in this case `system`.""" 19 | 20 | name: str 21 | """An optional name for the participant. 22 | 23 | Provides the model information to differentiate between participants of the same 24 | role. 25 | """ 26 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_tool_choice_option_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import Literal, TypeAlias 7 | 8 | from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam 9 | 10 | __all__ = ["ChatCompletionToolChoiceOptionParam"] 11 | 12 | ChatCompletionToolChoiceOptionParam: TypeAlias = Union[ 13 | Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam 14 | ] 15 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_tool_message_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union, Iterable 6 | from typing_extensions import Literal, Required, TypedDict 7 | 8 | from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam 9 | 10 | __all__ = ["ChatCompletionToolMessageParam"] 11 | 12 | 13 | class ChatCompletionToolMessageParam(TypedDict, total=False): 14 | content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] 15 | """The contents of the tool message.""" 16 | 17 | role: Required[Literal["tool"]] 18 | """The role of the messages author, in this case `tool`.""" 19 | 20 | tool_call_id: Required[str] 21 | """Tool call that this message is responding to.""" 22 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_tool_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | from ..shared_params.function_definition import FunctionDefinition 8 | 9 | __all__ = ["ChatCompletionToolParam"] 10 | 11 | 12 | class ChatCompletionToolParam(TypedDict, total=False): 13 | function: Required[FunctionDefinition] 14 | 15 | type: Required[Literal["function"]] 16 | """The type of the tool. Currently, only `function` is supported.""" 17 | -------------------------------------------------------------------------------- /src/openai/types/chat/chat_completion_user_message_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union, Iterable 6 | from typing_extensions import Literal, Required, TypedDict 7 | 8 | from .chat_completion_content_part_param import ChatCompletionContentPartParam 9 | 10 | __all__ = ["ChatCompletionUserMessageParam"] 11 | 12 | 13 | class ChatCompletionUserMessageParam(TypedDict, total=False): 14 | content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]] 15 | """The contents of the user message.""" 16 | 17 | role: Required[Literal["user"]] 18 | """The role of the messages author, in this case `user`.""" 19 | 20 | name: str 21 | """An optional name for the participant. 22 | 23 | Provides the model information to differentiate between participants of the same 24 | role. 25 | """ 26 | -------------------------------------------------------------------------------- /src/openai/types/chat/parsed_function_tool_call.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall 6 | 7 | __all__ = ["ParsedFunctionToolCall", "ParsedFunction"] 8 | 9 | # we need to disable this check because we're overriding properties 10 | # with subclasses of their types which is technically unsound as 11 | # properties can be mutated. 12 | # pyright: reportIncompatibleVariableOverride=false 13 | 14 | 15 | class ParsedFunction(Function): 16 | parsed_arguments: Optional[object] = None 17 | """ 18 | The arguments to call the function with. 19 | 20 | If you used `openai.pydantic_function_tool()` then this will be an 21 | instance of the given `BaseModel`. 22 | 23 | Otherwise, this will be the parsed JSON arguments. 24 | """ 25 | 26 | 27 | class ParsedFunctionToolCall(ChatCompletionMessageToolCall): 28 | function: ParsedFunction 29 | """The function that the model called.""" 30 | -------------------------------------------------------------------------------- /src/openai/types/completion.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | from typing_extensions import Literal 5 | 6 | from .._models import BaseModel 7 | from .completion_usage import CompletionUsage 8 | from .completion_choice import CompletionChoice 9 | 10 | __all__ = ["Completion"] 11 | 12 | 13 | class Completion(BaseModel): 14 | id: str 15 | """A unique identifier for the completion.""" 16 | 17 | choices: List[CompletionChoice] 18 | """The list of completion choices the model generated for the input prompt.""" 19 | 20 | created: int 21 | """The Unix timestamp (in seconds) of when the completion was created.""" 22 | 23 | model: str 24 | """The model used for completion.""" 25 | 26 | object: Literal["text_completion"] 27 | """The object type, which is always "text_completion" """ 28 | 29 | system_fingerprint: Optional[str] = None 30 | """This fingerprint represents the backend configuration that the model runs with. 31 | 32 | Can be used in conjunction with the `seed` request parameter to understand when 33 | backend changes have been made that might impact determinism. 34 | """ 35 | 36 | usage: Optional[CompletionUsage] = None 37 | """Usage statistics for the completion request.""" 38 | -------------------------------------------------------------------------------- /src/openai/types/completion_choice.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Dict, List, Optional 4 | from typing_extensions import Literal 5 | 6 | from .._models import BaseModel 7 | 8 | __all__ = ["CompletionChoice", "Logprobs"] 9 | 10 | 11 | class Logprobs(BaseModel): 12 | text_offset: Optional[List[int]] = None 13 | 14 | token_logprobs: Optional[List[float]] = None 15 | 16 | tokens: Optional[List[str]] = None 17 | 18 | top_logprobs: Optional[List[Dict[str, float]]] = None 19 | 20 | 21 | class CompletionChoice(BaseModel): 22 | finish_reason: Literal["stop", "length", "content_filter"] 23 | """The reason the model stopped generating tokens. 24 | 25 | This will be `stop` if the model hit a natural stop point or a provided stop 26 | sequence, `length` if the maximum number of tokens specified in the request was 27 | reached, or `content_filter` if content was omitted due to a flag from our 28 | content filters. 29 | """ 30 | 31 | index: int 32 | 33 | logprobs: Optional[Logprobs] = None 34 | 35 | text: str 36 | -------------------------------------------------------------------------------- /src/openai/types/create_embedding_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | from typing_extensions import Literal 5 | 6 | from .._models import BaseModel 7 | from .embedding import Embedding 8 | 9 | __all__ = ["CreateEmbeddingResponse", "Usage"] 10 | 11 | 12 | class Usage(BaseModel): 13 | prompt_tokens: int 14 | """The number of tokens used by the prompt.""" 15 | 16 | total_tokens: int 17 | """The total number of tokens used by the request.""" 18 | 19 | 20 | class CreateEmbeddingResponse(BaseModel): 21 | data: List[Embedding] 22 | """The list of embeddings generated by the model.""" 23 | 24 | model: str 25 | """The name of the model used to generate the embedding.""" 26 | 27 | object: Literal["list"] 28 | """The object type, which is always "list".""" 29 | 30 | usage: Usage 31 | """The usage information for the request.""" 32 | -------------------------------------------------------------------------------- /src/openai/types/embedding.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | from typing_extensions import Literal 5 | 6 | from .._models import BaseModel 7 | 8 | __all__ = ["Embedding"] 9 | 10 | 11 | class Embedding(BaseModel): 12 | embedding: List[float] 13 | """The embedding vector, which is a list of floats. 14 | 15 | The length of vector depends on the model as listed in the 16 | [embedding guide](https://platform.openai.com/docs/guides/embeddings). 17 | """ 18 | 19 | index: int 20 | """The index of the embedding in the list of embeddings.""" 21 | 22 | object: Literal["embedding"] 23 | """The object type, which is always "embedding".""" 24 | -------------------------------------------------------------------------------- /src/openai/types/embedding_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["EmbeddingModel"] 6 | 7 | EmbeddingModel: TypeAlias = Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"] 8 | -------------------------------------------------------------------------------- /src/openai/types/file_content.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import TypeAlias 4 | 5 | __all__ = ["FileContent"] 6 | 7 | FileContent: TypeAlias = str 8 | -------------------------------------------------------------------------------- /src/openai/types/file_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from .._types import FileTypes 8 | from .file_purpose import FilePurpose 9 | 10 | __all__ = ["FileCreateParams"] 11 | 12 | 13 | class FileCreateParams(TypedDict, total=False): 14 | file: Required[FileTypes] 15 | """The File object (not file name) to be uploaded.""" 16 | 17 | purpose: Required[FilePurpose] 18 | """The intended purpose of the uploaded file. 19 | 20 | Use "assistants" for 21 | [Assistants](https://platform.openai.com/docs/api-reference/assistants) and 22 | [Message](https://platform.openai.com/docs/api-reference/messages) files, 23 | "vision" for Assistants image file inputs, "batch" for 24 | [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for 25 | [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). 26 | """ 27 | -------------------------------------------------------------------------------- /src/openai/types/file_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["FileDeleted"] 8 | 9 | 10 | class FileDeleted(BaseModel): 11 | id: str 12 | 13 | deleted: bool 14 | 15 | object: Literal["file"] 16 | -------------------------------------------------------------------------------- /src/openai/types/file_list_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, TypedDict 6 | 7 | __all__ = ["FileListParams"] 8 | 9 | 10 | class FileListParams(TypedDict, total=False): 11 | after: str 12 | """A cursor for use in pagination. 13 | 14 | `after` is an object ID that defines your place in the list. For instance, if 15 | you make a list request and receive 100 objects, ending with obj_foo, your 16 | subsequent call can include after=obj_foo in order to fetch the next page of the 17 | list. 18 | """ 19 | 20 | limit: int 21 | """A limit on the number of objects to be returned. 22 | 23 | Limit can range between 1 and 10,000, and the default is 10,000. 24 | """ 25 | 26 | order: Literal["asc", "desc"] 27 | """Sort order by the `created_at` timestamp of the objects. 28 | 29 | `asc` for ascending order and `desc` for descending order. 30 | """ 31 | 32 | purpose: str 33 | """Only return files with the given purpose.""" 34 | -------------------------------------------------------------------------------- /src/openai/types/file_purpose.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["FilePurpose"] 6 | 7 | FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision"] 8 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .fine_tuning_job import FineTuningJob as FineTuningJob 6 | from .job_list_params import JobListParams as JobListParams 7 | from .job_create_params import JobCreateParams as JobCreateParams 8 | from .fine_tuning_job_event import FineTuningJobEvent as FineTuningJobEvent 9 | from .job_list_events_params import JobListEventsParams as JobListEventsParams 10 | from .fine_tuning_job_integration import FineTuningJobIntegration as FineTuningJobIntegration 11 | from .fine_tuning_job_wandb_integration import FineTuningJobWandbIntegration as FineTuningJobWandbIntegration 12 | from .fine_tuning_job_wandb_integration_object import ( 13 | FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, 14 | ) 15 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/fine_tuning_job_event.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | import builtins 4 | from typing import Optional 5 | from typing_extensions import Literal 6 | 7 | from ..._models import BaseModel 8 | 9 | __all__ = ["FineTuningJobEvent"] 10 | 11 | 12 | class FineTuningJobEvent(BaseModel): 13 | id: str 14 | """The object identifier.""" 15 | 16 | created_at: int 17 | """The Unix timestamp (in seconds) for when the fine-tuning job was created.""" 18 | 19 | level: Literal["info", "warn", "error"] 20 | """The log level of the event.""" 21 | 22 | message: str 23 | """The message of the event.""" 24 | 25 | object: Literal["fine_tuning.job.event"] 26 | """The object type, which is always "fine_tuning.job.event".""" 27 | 28 | data: Optional[builtins.object] = None 29 | """The data associated with the event.""" 30 | 31 | type: Optional[Literal["message", "metrics"]] = None 32 | """The type of event.""" 33 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/fine_tuning_job_integration.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject 5 | 6 | FineTuningJobIntegration = FineTuningJobWandbIntegrationObject 7 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List, Optional 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["FineTuningJobWandbIntegration"] 8 | 9 | 10 | class FineTuningJobWandbIntegration(BaseModel): 11 | project: str 12 | """The name of the project that the new run will be created under.""" 13 | 14 | entity: Optional[str] = None 15 | """The entity to use for the run. 16 | 17 | This allows you to set the team or username of the WandB user that you would 18 | like associated with the run. If not set, the default entity for the registered 19 | WandB API key is used. 20 | """ 21 | 22 | name: Optional[str] = None 23 | """A display name to set for the run. 24 | 25 | If not set, we will use the Job ID as the name. 26 | """ 27 | 28 | tags: Optional[List[str]] = None 29 | """A list of tags to be attached to the newly created run. 30 | 31 | These tags are passed through directly to WandB. Some default tags are generated 32 | by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". 33 | """ 34 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | from .fine_tuning_job_wandb_integration import FineTuningJobWandbIntegration 7 | 8 | __all__ = ["FineTuningJobWandbIntegrationObject"] 9 | 10 | 11 | class FineTuningJobWandbIntegrationObject(BaseModel): 12 | type: Literal["wandb"] 13 | """The type of the integration being enabled for the fine-tuning job""" 14 | 15 | wandb: FineTuningJobWandbIntegration 16 | """The settings for your integration with Weights and Biases. 17 | 18 | This payload specifies the project that metrics will be sent to. Optionally, you 19 | can set an explicit display name for your run, add tags to your run, and set a 20 | default entity (team, username, etc) to be associated with your run. 21 | """ 22 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/job_list_events_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["JobListEventsParams"] 8 | 9 | 10 | class JobListEventsParams(TypedDict, total=False): 11 | after: str 12 | """Identifier for the last event from the previous pagination request.""" 13 | 14 | limit: int 15 | """Number of events to retrieve.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/job_list_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["JobListParams"] 8 | 9 | 10 | class JobListParams(TypedDict, total=False): 11 | after: str 12 | """Identifier for the last job from the previous pagination request.""" 13 | 14 | limit: int 15 | """Number of fine-tuning jobs to retrieve.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/jobs/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .checkpoint_list_params import CheckpointListParams as CheckpointListParams 6 | from .fine_tuning_job_checkpoint import FineTuningJobCheckpoint as FineTuningJobCheckpoint 7 | -------------------------------------------------------------------------------- /src/openai/types/fine_tuning/jobs/checkpoint_list_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import TypedDict 6 | 7 | __all__ = ["CheckpointListParams"] 8 | 9 | 10 | class CheckpointListParams(TypedDict, total=False): 11 | after: str 12 | """Identifier for the last checkpoint ID from the previous pagination request.""" 13 | 14 | limit: int 15 | """Number of checkpoints to retrieve.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/image.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["Image"] 8 | 9 | 10 | class Image(BaseModel): 11 | b64_json: Optional[str] = None 12 | """ 13 | The base64-encoded JSON of the generated image, if `response_format` is 14 | `b64_json`. 15 | """ 16 | 17 | revised_prompt: Optional[str] = None 18 | """ 19 | The prompt that was used to generate the image, if there was any revision to the 20 | prompt. 21 | """ 22 | 23 | url: Optional[str] = None 24 | """The URL of the generated image, if `response_format` is `url` (default).""" 25 | -------------------------------------------------------------------------------- /src/openai/types/image_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ImageModel"] 6 | 7 | ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3"] 8 | -------------------------------------------------------------------------------- /src/openai/types/images_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | 5 | from .image import Image 6 | from .._models import BaseModel 7 | 8 | __all__ = ["ImagesResponse"] 9 | 10 | 11 | class ImagesResponse(BaseModel): 12 | created: int 13 | 14 | data: List[Image] 15 | -------------------------------------------------------------------------------- /src/openai/types/model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from .._models import BaseModel 6 | 7 | __all__ = ["Model"] 8 | 9 | 10 | class Model(BaseModel): 11 | id: str 12 | """The model identifier, which can be referenced in the API endpoints.""" 13 | 14 | created: int 15 | """The Unix timestamp (in seconds) when the model was created.""" 16 | 17 | object: Literal["model"] 18 | """The object type, which is always "model".""" 19 | 20 | owned_by: str 21 | """The organization that owns the model.""" 22 | -------------------------------------------------------------------------------- /src/openai/types/model_deleted.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | 4 | from .._models import BaseModel 5 | 6 | __all__ = ["ModelDeleted"] 7 | 8 | 9 | class ModelDeleted(BaseModel): 10 | id: str 11 | 12 | deleted: bool 13 | 14 | object: str 15 | -------------------------------------------------------------------------------- /src/openai/types/moderation_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import List, Union, Iterable 6 | from typing_extensions import Required, TypedDict 7 | 8 | from .moderation_model import ModerationModel 9 | from .moderation_multi_modal_input_param import ModerationMultiModalInputParam 10 | 11 | __all__ = ["ModerationCreateParams"] 12 | 13 | 14 | class ModerationCreateParams(TypedDict, total=False): 15 | input: Required[Union[str, List[str], Iterable[ModerationMultiModalInputParam]]] 16 | """Input (or inputs) to classify. 17 | 18 | Can be a single string, an array of strings, or an array of multi-modal input 19 | objects similar to other models. 20 | """ 21 | 22 | model: Union[str, ModerationModel] 23 | """The content moderation model you would like to use. 24 | 25 | Learn more in 26 | [the moderation guide](https://platform.openai.com/docs/guides/moderation), and 27 | learn about available models 28 | [here](https://platform.openai.com/docs/models#moderation). 29 | """ 30 | -------------------------------------------------------------------------------- /src/openai/types/moderation_create_response.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import List 4 | 5 | from .._models import BaseModel 6 | from .moderation import Moderation 7 | 8 | __all__ = ["ModerationCreateResponse"] 9 | 10 | 11 | class ModerationCreateResponse(BaseModel): 12 | id: str 13 | """The unique identifier for the moderation request.""" 14 | 15 | model: str 16 | """The model used to generate the moderation results.""" 17 | 18 | results: List[Moderation] 19 | """A list of moderation objects.""" 20 | -------------------------------------------------------------------------------- /src/openai/types/moderation_image_url_input_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ModerationImageURLInputParam", "ImageURL"] 8 | 9 | 10 | class ImageURL(TypedDict, total=False): 11 | url: Required[str] 12 | """Either a URL of the image or the base64 encoded image data.""" 13 | 14 | 15 | class ModerationImageURLInputParam(TypedDict, total=False): 16 | image_url: Required[ImageURL] 17 | """Contains either an image URL or a data URL for a base64 encoded image.""" 18 | 19 | type: Required[Literal["image_url"]] 20 | """Always `image_url`.""" 21 | -------------------------------------------------------------------------------- /src/openai/types/moderation_model.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal, TypeAlias 4 | 5 | __all__ = ["ModerationModel"] 6 | 7 | ModerationModel: TypeAlias = Literal[ 8 | "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable" 9 | ] 10 | -------------------------------------------------------------------------------- /src/openai/types/moderation_multi_modal_input_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Union 6 | from typing_extensions import TypeAlias 7 | 8 | from .moderation_text_input_param import ModerationTextInputParam 9 | from .moderation_image_url_input_param import ModerationImageURLInputParam 10 | 11 | __all__ = ["ModerationMultiModalInputParam"] 12 | 13 | ModerationMultiModalInputParam: TypeAlias = Union[ModerationImageURLInputParam, ModerationTextInputParam] 14 | -------------------------------------------------------------------------------- /src/openai/types/moderation_text_input_param.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ModerationTextInputParam"] 8 | 9 | 10 | class ModerationTextInputParam(TypedDict, total=False): 11 | text: Required[str] 12 | """A string of text to classify.""" 13 | 14 | type: Required[Literal["text"]] 15 | """Always `text`.""" 16 | -------------------------------------------------------------------------------- /src/openai/types/shared/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .metadata import Metadata as Metadata 4 | from .error_object import ErrorObject as ErrorObject 5 | from .function_definition import FunctionDefinition as FunctionDefinition 6 | from .function_parameters import FunctionParameters as FunctionParameters 7 | from .response_format_text import ResponseFormatText as ResponseFormatText 8 | from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject 9 | from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema 10 | -------------------------------------------------------------------------------- /src/openai/types/shared/error_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Optional 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ErrorObject"] 8 | 9 | 10 | class ErrorObject(BaseModel): 11 | code: Optional[str] = None 12 | 13 | message: str 14 | 15 | param: Optional[str] = None 16 | 17 | type: str 18 | -------------------------------------------------------------------------------- /src/openai/types/shared/function_parameters.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Dict 4 | from typing_extensions import TypeAlias 5 | 6 | __all__ = ["FunctionParameters"] 7 | 8 | FunctionParameters: TypeAlias = Dict[str, object] 9 | -------------------------------------------------------------------------------- /src/openai/types/shared/metadata.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing import Dict 4 | from typing_extensions import TypeAlias 5 | 6 | __all__ = ["Metadata"] 7 | 8 | Metadata: TypeAlias = Dict[str, str] 9 | -------------------------------------------------------------------------------- /src/openai/types/shared/response_format_json_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseFormatJSONObject"] 8 | 9 | 10 | class ResponseFormatJSONObject(BaseModel): 11 | type: Literal["json_object"] 12 | """The type of response format being defined: `json_object`""" 13 | -------------------------------------------------------------------------------- /src/openai/types/shared/response_format_text.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["ResponseFormatText"] 8 | 9 | 10 | class ResponseFormatText(BaseModel): 11 | type: Literal["text"] 12 | """The type of response format being defined: `text`""" 13 | -------------------------------------------------------------------------------- /src/openai/types/shared_params/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from .metadata import Metadata as Metadata 4 | from .function_definition import FunctionDefinition as FunctionDefinition 5 | from .function_parameters import FunctionParameters as FunctionParameters 6 | from .response_format_text import ResponseFormatText as ResponseFormatText 7 | from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject 8 | from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema 9 | -------------------------------------------------------------------------------- /src/openai/types/shared_params/function_parameters.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Dict 6 | from typing_extensions import TypeAlias 7 | 8 | __all__ = ["FunctionParameters"] 9 | 10 | FunctionParameters: TypeAlias = Dict[str, object] 11 | -------------------------------------------------------------------------------- /src/openai/types/shared_params/metadata.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Dict 6 | from typing_extensions import TypeAlias 7 | 8 | __all__ = ["Metadata"] 9 | 10 | Metadata: TypeAlias = Dict[str, str] 11 | -------------------------------------------------------------------------------- /src/openai/types/shared_params/response_format_json_object.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ResponseFormatJSONObject"] 8 | 9 | 10 | class ResponseFormatJSONObject(TypedDict, total=False): 11 | type: Required[Literal["json_object"]] 12 | """The type of response format being defined: `json_object`""" 13 | -------------------------------------------------------------------------------- /src/openai/types/shared_params/response_format_text.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Literal, Required, TypedDict 6 | 7 | __all__ = ["ResponseFormatText"] 8 | 9 | 10 | class ResponseFormatText(TypedDict, total=False): 11 | type: Required[Literal["text"]] 12 | """The type of response format being defined: `text`""" 13 | -------------------------------------------------------------------------------- /src/openai/types/upload_complete_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing import List 6 | from typing_extensions import Required, TypedDict 7 | 8 | __all__ = ["UploadCompleteParams"] 9 | 10 | 11 | class UploadCompleteParams(TypedDict, total=False): 12 | part_ids: Required[List[str]] 13 | """The ordered list of Part IDs.""" 14 | 15 | md5: str 16 | """ 17 | The optional md5 checksum for the file contents to verify if the bytes uploaded 18 | matches what you expect. 19 | """ 20 | -------------------------------------------------------------------------------- /src/openai/types/upload_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from .file_purpose import FilePurpose 8 | 9 | __all__ = ["UploadCreateParams"] 10 | 11 | 12 | class UploadCreateParams(TypedDict, total=False): 13 | bytes: Required[int] 14 | """The number of bytes in the file you are uploading.""" 15 | 16 | filename: Required[str] 17 | """The name of the file to upload.""" 18 | 19 | mime_type: Required[str] 20 | """The MIME type of the file. 21 | 22 | This must fall within the supported MIME types for your file purpose. See the 23 | supported MIME types for assistants and vision. 24 | """ 25 | 26 | purpose: Required[FilePurpose] 27 | """The intended purpose of the uploaded file. 28 | 29 | See the 30 | [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). 31 | """ 32 | -------------------------------------------------------------------------------- /src/openai/types/uploads/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from .upload_part import UploadPart as UploadPart 6 | from .part_create_params import PartCreateParams as PartCreateParams 7 | -------------------------------------------------------------------------------- /src/openai/types/uploads/part_create_params.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | from typing_extensions import Required, TypedDict 6 | 7 | from ..._types import FileTypes 8 | 9 | __all__ = ["PartCreateParams"] 10 | 11 | 12 | class PartCreateParams(TypedDict, total=False): 13 | data: Required[FileTypes] 14 | """The chunk of bytes for this Part.""" 15 | -------------------------------------------------------------------------------- /src/openai/types/uploads/upload_part.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from typing_extensions import Literal 4 | 5 | from ..._models import BaseModel 6 | 7 | __all__ = ["UploadPart"] 8 | 9 | 10 | class UploadPart(BaseModel): 11 | id: str 12 | """The upload Part unique identifier, which can be referenced in API endpoints.""" 13 | 14 | created_at: int 15 | """The Unix timestamp (in seconds) for when the Part was created.""" 16 | 17 | object: Literal["upload.part"] 18 | """The object type, which is always `upload.part`.""" 19 | 20 | upload_id: str 21 | """The ID of the Upload object that this Part was added to.""" 22 | -------------------------------------------------------------------------------- /src/openai/version.py: -------------------------------------------------------------------------------- 1 | from ._version import __version__ 2 | 3 | VERSION: str = __version__ 4 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/audio/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/beta/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/beta/realtime/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/beta/test_realtime.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | 3 | from __future__ import annotations 4 | 5 | import os 6 | 7 | import pytest 8 | 9 | base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") 10 | 11 | 12 | class TestRealtime: 13 | parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) 14 | 15 | 16 | class TestAsyncRealtime: 17 | parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) 18 | -------------------------------------------------------------------------------- /tests/api_resources/beta/threads/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/beta/threads/runs/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/beta/vector_stores/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/chat/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/fine_tuning/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/fine_tuning/jobs/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/api_resources/uploads/__init__.py: -------------------------------------------------------------------------------- 1 | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. 2 | -------------------------------------------------------------------------------- /tests/lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elenakozlova28/openai-python/7a6517d81e4ae9e9e9527cd401bb76937983dfef/tests/lib/__init__.py -------------------------------------------------------------------------------- /tests/lib/chat/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elenakozlova28/openai-python/7a6517d81e4ae9e9e9527cd401bb76937983dfef/tests/lib/chat/__init__.py -------------------------------------------------------------------------------- /tests/lib/schema_types/query.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import List, Union, Optional 3 | 4 | from pydantic import BaseModel 5 | 6 | 7 | class Table(str, Enum): 8 | orders = "orders" 9 | customers = "customers" 10 | products = "products" 11 | 12 | 13 | class Column(str, Enum): 14 | id = "id" 15 | status = "status" 16 | expected_delivery_date = "expected_delivery_date" 17 | delivered_at = "delivered_at" 18 | shipped_at = "shipped_at" 19 | ordered_at = "ordered_at" 20 | canceled_at = "canceled_at" 21 | 22 | 23 | class Operator(str, Enum): 24 | eq = "=" 25 | gt = ">" 26 | lt = "<" 27 | le = "<=" 28 | ge = ">=" 29 | ne = "!=" 30 | 31 | 32 | class OrderBy(str, Enum): 33 | asc = "asc" 34 | desc = "desc" 35 | 36 | 37 | class DynamicValue(BaseModel): 38 | column_name: str 39 | 40 | 41 | class Condition(BaseModel): 42 | column: str 43 | operator: Operator 44 | value: Union[str, int, DynamicValue] 45 | 46 | 47 | class Query(BaseModel): 48 | name: Optional[str] = None 49 | table_name: Table 50 | columns: List[Column] 51 | conditions: List[Condition] 52 | order_by: OrderBy 53 | -------------------------------------------------------------------------------- /tests/lib/test_old_api.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import openai 4 | from openai.lib._old_api import APIRemovedInV1 5 | 6 | 7 | def test_basic_attribute_access_works() -> None: 8 | for attr in dir(openai): 9 | getattr(openai, attr) 10 | 11 | 12 | def test_helpful_error_is_raised() -> None: 13 | with pytest.raises(APIRemovedInV1): 14 | openai.Completion.create() # type: ignore 15 | 16 | with pytest.raises(APIRemovedInV1): 17 | openai.ChatCompletion.create() # type: ignore 18 | -------------------------------------------------------------------------------- /tests/sample_file.txt: -------------------------------------------------------------------------------- 1 | Hello, world! 2 | -------------------------------------------------------------------------------- /tests/test_utils/test_proxy.py: -------------------------------------------------------------------------------- 1 | import operator 2 | from typing import Any 3 | from typing_extensions import override 4 | 5 | from openai._utils import LazyProxy 6 | 7 | 8 | class RecursiveLazyProxy(LazyProxy[Any]): 9 | @override 10 | def __load__(self) -> Any: 11 | return self 12 | 13 | def __call__(self, *_args: Any, **_kwds: Any) -> Any: 14 | raise RuntimeError("This should never be called!") 15 | 16 | 17 | def test_recursive_proxy() -> None: 18 | proxy = RecursiveLazyProxy() 19 | assert repr(proxy) == "RecursiveLazyProxy" 20 | assert str(proxy) == "RecursiveLazyProxy" 21 | assert dir(proxy) == [] 22 | assert type(proxy).__name__ == "RecursiveLazyProxy" 23 | assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy" 24 | --------------------------------------------------------------------------------