├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ ├── model_provider.md │ └── question.md ├── PULL_REQUEST_TEMPLATE │ └── pull_request_template.md └── workflows │ ├── docs.yml │ ├── issues.yml │ ├── publish.yml │ └── tests.yml ├── .gitignore ├── .prettierrc ├── .vscode └── settings.json ├── AGENTS.md ├── LICENSE ├── Makefile ├── README.md ├── docs ├── agents.md ├── assets │ ├── images │ │ ├── favicon-platform.svg │ │ ├── graph.png │ │ ├── mcp-tracing.jpg │ │ └── orchestration.png │ └── logo.svg ├── config.md ├── context.md ├── examples.md ├── guardrails.md ├── handoffs.md ├── index.md ├── ja │ ├── agents.md │ ├── config.md │ ├── context.md │ ├── examples.md │ ├── guardrails.md │ ├── handoffs.md │ ├── index.md │ ├── mcp.md │ ├── models.md │ ├── models │ │ ├── index.md │ │ └── litellm.md │ ├── multi_agent.md │ ├── quickstart.md │ ├── results.md │ ├── running_agents.md │ ├── streaming.md │ ├── tools.md │ ├── tracing.md │ ├── visualization.md │ └── voice │ │ ├── pipeline.md │ │ ├── quickstart.md │ │ └── tracing.md ├── mcp.md ├── models │ ├── index.md │ └── litellm.md ├── multi_agent.md ├── quickstart.md ├── ref │ ├── agent.md │ ├── agent_output.md │ ├── exceptions.md │ ├── extensions │ │ ├── handoff_filters.md │ │ ├── handoff_prompt.md │ │ └── litellm.md │ ├── function_schema.md │ ├── guardrail.md │ ├── handoffs.md │ ├── index.md │ ├── items.md │ ├── lifecycle.md │ ├── mcp │ │ ├── server.md │ │ └── util.md │ ├── model_settings.md │ ├── models │ │ ├── interface.md │ │ ├── openai_chatcompletions.md │ │ └── openai_responses.md │ ├── result.md │ ├── run.md │ ├── run_context.md │ ├── stream_events.md │ ├── tool.md │ ├── tracing │ │ ├── create.md │ │ ├── index.md │ │ ├── processor_interface.md │ │ ├── processors.md │ │ ├── scope.md │ │ ├── setup.md │ │ ├── span_data.md │ │ ├── spans.md │ │ ├── traces.md │ │ └── util.md │ ├── usage.md │ └── voice │ │ ├── events.md │ │ ├── exceptions.md │ │ ├── input.md │ │ ├── model.md │ │ ├── models │ │ ├── openai_provider.md │ │ ├── openai_stt.md │ │ └── openai_tts.md │ │ ├── pipeline.md │ │ ├── pipeline_config.md │ │ ├── result.md │ │ ├── utils.md │ │ └── workflow.md ├── results.md ├── running_agents.md ├── scripts │ └── translate_docs.py ├── streaming.md ├── stylesheets │ └── extra.css ├── tools.md ├── tracing.md ├── visualization.md └── voice │ ├── pipeline.md │ ├── quickstart.md │ └── tracing.md ├── examples ├── __init__.py ├── agent_patterns │ ├── README.md │ ├── agents_as_tools.py │ ├── deterministic.py │ ├── forcing_tool_use.py │ ├── input_guardrails.py │ ├── llm_as_a_judge.py │ ├── output_guardrails.py │ ├── parallelization.py │ ├── routing.py │ └── streaming_guardrails.py ├── basic │ ├── agent_lifecycle_example.py │ ├── dynamic_system_prompt.py │ ├── hello_world.py │ ├── hello_world_jupyter.py │ ├── lifecycle_example.py │ ├── local_image.py │ ├── media │ │ └── image_bison.jpg │ ├── non_strict_output_type.py │ ├── previous_response_id.py │ ├── remote_image.py │ ├── stream_items.py │ ├── stream_text.py │ └── tools.py ├── customer_service │ └── main.py ├── financial_research_agent │ ├── README.md │ ├── __init__.py │ ├── agents │ │ ├── __init__.py │ │ ├── financials_agent.py │ │ ├── planner_agent.py │ │ ├── risk_agent.py │ │ ├── search_agent.py │ │ ├── verifier_agent.py │ │ └── writer_agent.py │ ├── main.py │ ├── manager.py │ └── printer.py ├── handoffs │ ├── message_filter.py │ └── message_filter_streaming.py ├── hosted_mcp │ ├── __init__.py │ ├── approvals.py │ └── simple.py ├── mcp │ ├── filesystem_example │ │ ├── README.md │ │ ├── main.py │ │ └── sample_files │ │ │ ├── favorite_books.txt │ │ │ ├── favorite_cities.txt │ │ │ └── favorite_songs.txt │ ├── git_example │ │ ├── README.md │ │ └── main.py │ ├── sse_example │ │ ├── README.md │ │ ├── main.py │ │ └── server.py │ └── streamablehttp_example │ │ ├── README.md │ │ ├── main.py │ │ └── server.py ├── model_providers │ ├── README.md │ ├── custom_example_agent.py │ ├── custom_example_global.py │ ├── custom_example_provider.py │ ├── litellm_auto.py │ └── litellm_provider.py ├── research_bot │ ├── README.md │ ├── __init__.py │ ├── agents │ │ ├── __init__.py │ │ ├── planner_agent.py │ │ ├── search_agent.py │ │ └── writer_agent.py │ ├── main.py │ ├── manager.py │ ├── printer.py │ └── sample_outputs │ │ ├── product_recs.md │ │ ├── product_recs.txt │ │ ├── vacation.md │ │ └── vacation.txt ├── tools │ ├── code_interpreter.py │ ├── computer_use.py │ ├── file_search.py │ ├── image_generator.py │ └── web_search.py └── voice │ ├── __init__.py │ ├── static │ ├── README.md │ ├── __init__.py │ ├── main.py │ └── util.py │ └── streamed │ ├── README.md │ ├── __init__.py │ ├── main.py │ └── my_workflow.py ├── mkdocs.yml ├── pyproject.toml ├── src └── agents │ ├── __init__.py │ ├── _config.py │ ├── _debug.py │ ├── _run_impl.py │ ├── agent.py │ ├── agent_output.py │ ├── computer.py │ ├── exceptions.py │ ├── extensions │ ├── __init__.py │ ├── handoff_filters.py │ ├── handoff_prompt.py │ ├── models │ │ ├── __init__.py │ │ ├── litellm_model.py │ │ └── litellm_provider.py │ └── visualization.py │ ├── function_schema.py │ ├── guardrail.py │ ├── handoffs.py │ ├── items.py │ ├── lifecycle.py │ ├── logger.py │ ├── mcp │ ├── __init__.py │ ├── server.py │ └── util.py │ ├── model_settings.py │ ├── models │ ├── __init__.py │ ├── _openai_shared.py │ ├── chatcmpl_converter.py │ ├── chatcmpl_helpers.py │ ├── chatcmpl_stream_handler.py │ ├── fake_id.py │ ├── interface.py │ ├── multi_provider.py │ ├── openai_chatcompletions.py │ ├── openai_provider.py │ └── openai_responses.py │ ├── py.typed │ ├── result.py │ ├── run.py │ ├── run_context.py │ ├── stream_events.py │ ├── strict_schema.py │ ├── tool.py │ ├── tracing │ ├── __init__.py │ ├── create.py │ ├── logger.py │ ├── processor_interface.py │ ├── processors.py │ ├── scope.py │ ├── setup.py │ ├── span_data.py │ ├── spans.py │ ├── traces.py │ └── util.py │ ├── usage.py │ ├── util │ ├── __init__.py │ ├── _coro.py │ ├── _error_tracing.py │ ├── _json.py │ ├── _pretty_print.py │ ├── _transforms.py │ └── _types.py │ ├── version.py │ └── voice │ ├── __init__.py │ ├── events.py │ ├── exceptions.py │ ├── imports.py │ ├── input.py │ ├── model.py │ ├── models │ ├── __init__.py │ ├── openai_model_provider.py │ ├── openai_stt.py │ └── openai_tts.py │ ├── pipeline.py │ ├── pipeline_config.py │ ├── result.py │ ├── utils.py │ └── workflow.py ├── tests ├── README.md ├── __init__.py ├── conftest.py ├── fake_model.py ├── fastapi │ ├── __init__.py │ ├── streaming_app.py │ └── test_streaming_context.py ├── mcp │ ├── __init__.py │ ├── conftest.py │ ├── helpers.py │ ├── test_caching.py │ ├── test_connect_disconnect.py │ ├── test_mcp_tracing.py │ ├── test_mcp_util.py │ ├── test_runner_calls_mcp.py │ └── test_server_errors.py ├── model_settings │ └── test_serialization.py ├── models │ ├── __init__.py │ ├── conftest.py │ ├── test_litellm_chatcompletions_stream.py │ ├── test_litellm_extra_body.py │ └── test_map.py ├── test_agent_config.py ├── test_agent_hooks.py ├── test_agent_runner.py ├── test_agent_runner_streamed.py ├── test_agent_tracing.py ├── test_cancel_streaming.py ├── test_computer_action.py ├── test_config.py ├── test_doc_parsing.py ├── test_extension_filters.py ├── test_extra_headers.py ├── test_function_schema.py ├── test_function_tool.py ├── test_function_tool_decorator.py ├── test_global_hooks.py ├── test_guardrails.py ├── test_handoff_tool.py ├── test_items_helpers.py ├── test_max_turns.py ├── test_openai_chatcompletions.py ├── test_openai_chatcompletions_converter.py ├── test_openai_chatcompletions_stream.py ├── test_openai_responses_converter.py ├── test_output_tool.py ├── test_pretty_print.py ├── test_responses.py ├── test_responses_tracing.py ├── test_result_cast.py ├── test_run_config.py ├── test_run_error_details.py ├── test_run_step_execution.py ├── test_run_step_processing.py ├── test_strict_schema.py ├── test_tool_choice_reset.py ├── test_tool_converter.py ├── test_tool_use_behavior.py ├── test_trace_processor.py ├── test_tracing.py ├── test_tracing_errors.py ├── test_tracing_errors_streamed.py ├── test_usage.py ├── test_visualization.py ├── testing_processor.py ├── tracing │ └── test_processor_api_key.py └── voice │ ├── __init__.py │ ├── conftest.py │ ├── fake_models.py │ ├── helpers.py │ ├── test_input.py │ ├── test_openai_stt.py │ ├── test_openai_tts.py │ ├── test_pipeline.py │ └── test_workflow.py └── uv.lock /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report a bug 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Please read this first 11 | 12 | - **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/) 13 | - **Have you searched for related issues?** Others may have faced similar issues. 14 | 15 | ### Describe the bug 16 | A clear and concise description of what the bug is. 17 | 18 | ### Debug information 19 | - Agents SDK version: (e.g. `v0.0.3`) 20 | - Python version (e.g. Python 3.10) 21 | 22 | ### Repro steps 23 | 24 | Ideally provide a minimal python script that can be run to reproduce the bug. 25 | 26 | 27 | ### Expected behavior 28 | A clear and concise description of what you expected to happen. 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Please read this first 11 | 12 | - **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/) 13 | - **Have you searched for related issues?** Others may have had similar requests 14 | 15 | ### Describe the feature 16 | What is the feature you're requesting? How would it work? Please provide examples and details if possible. 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/model_provider.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom model providers 3 | about: Questions or bugs about using non-OpenAI models 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Please read this first 11 | 12 | - **Have you read the custom model provider docs, including the 'Common issues' section?** [Model provider docs](https://openai.github.io/openai-agents-python/models/#using-other-llm-providers) 13 | - **Have you searched for related issues?** Others may have faced similar issues. 14 | 15 | ### Describe the question 16 | A clear and concise description of what the question or bug is. 17 | 18 | ### Debug information 19 | - Agents SDK version: (e.g. `v0.0.3`) 20 | - Python version (e.g. Python 3.10) 21 | 22 | ### Repro steps 23 | Ideally provide a minimal python script that can be run to reproduce the issue. 24 | 25 | ### Expected behavior 26 | A clear and concise description of what you expected to happen. 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: Questions about the SDK 4 | title: '' 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Please read this first 11 | 12 | - **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/) 13 | - **Have you searched for related issues?** Others may have had similar requests 14 | 15 | ### Question 16 | Describe your question. Provide details if available. 17 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ### Summary 2 | 3 | 4 | 5 | ### Test plan 6 | 7 | 8 | 9 | ### Issue number 10 | 11 | 12 | 13 | ### Checks 14 | 15 | - [ ] I've added new tests (if relevant) 16 | - [ ] I've added/updated the relevant documentation 17 | - [ ] I've run `make lint` and `make format` 18 | - [ ] I've made sure tests pass 19 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy docs 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Tests"] 6 | types: 7 | - completed 8 | 9 | permissions: 10 | contents: write # This allows pushing to gh-pages 11 | 12 | jobs: 13 | deploy_docs: 14 | if: ${{ github.event.workflow_run.conclusion == 'success' }} 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout repository 18 | uses: actions/checkout@v4 19 | - name: Setup uv 20 | uses: astral-sh/setup-uv@v5 21 | with: 22 | enable-cache: true 23 | - name: Install dependencies 24 | run: make sync 25 | - name: Deploy docs 26 | run: make deploy-docs 27 | -------------------------------------------------------------------------------- /.github/workflows/issues.yml: -------------------------------------------------------------------------------- 1 | name: Close inactive issues 2 | on: 3 | schedule: 4 | - cron: "30 1 * * *" 5 | 6 | jobs: 7 | close-issues: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | steps: 13 | - uses: actions/stale@v9 14 | with: 15 | days-before-issue-stale: 7 16 | days-before-issue-close: 3 17 | stale-issue-label: "stale" 18 | stale-issue-message: "This issue is stale because it has been open for 7 days with no activity." 19 | close-issue-message: "This issue was closed because it has been inactive for 3 days since being marked as stale." 20 | any-of-issue-labels: 'question,needs-more-info' 21 | days-before-pr-stale: 10 22 | days-before-pr-close: 7 23 | stale-pr-label: "stale" 24 | stale-pr-message: "This PR is stale because it has been open for 10 days with no activity." 25 | close-pr-message: "This PR was closed because it has been inactive for 7 days since being marked as stale." 26 | repo-token: ${{ secrets.GITHUB_TOKEN }} 27 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | 3 | on: 4 | release: 5 | types: 6 | - published 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | publish: 13 | environment: 14 | name: pypi 15 | url: https://pypi.org/p/openai-agents 16 | permissions: 17 | id-token: write # Important for trusted publishing to PyPI 18 | runs-on: ubuntu-latest 19 | env: 20 | OPENAI_API_KEY: fake-for-tests 21 | 22 | steps: 23 | - name: Checkout repository 24 | uses: actions/checkout@v4 25 | - name: Setup uv 26 | uses: astral-sh/setup-uv@v5 27 | with: 28 | enable-cache: true 29 | - name: Install dependencies 30 | run: make sync 31 | - name: Build package 32 | run: uv build 33 | - name: Publish to PyPI 34 | uses: pypa/gh-action-pypi-publish@release/v1 35 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | # All PRs, including stacked PRs 9 | 10 | env: 11 | UV_FROZEN: "1" 12 | 13 | jobs: 14 | lint: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout repository 18 | uses: actions/checkout@v4 19 | - name: Setup uv 20 | uses: astral-sh/setup-uv@v5 21 | with: 22 | enable-cache: true 23 | - name: Install dependencies 24 | run: make sync 25 | - name: Run lint 26 | run: make lint 27 | 28 | typecheck: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - name: Checkout repository 32 | uses: actions/checkout@v4 33 | - name: Setup uv 34 | uses: astral-sh/setup-uv@v5 35 | with: 36 | enable-cache: true 37 | - name: Install dependencies 38 | run: make sync 39 | - name: Run typecheck 40 | run: make mypy 41 | 42 | tests: 43 | runs-on: ubuntu-latest 44 | env: 45 | OPENAI_API_KEY: fake-for-tests 46 | steps: 47 | - name: Checkout repository 48 | uses: actions/checkout@v4 49 | - name: Setup uv 50 | uses: astral-sh/setup-uv@v5 51 | with: 52 | enable-cache: true 53 | - name: Install dependencies 54 | run: make sync 55 | - name: Run tests with coverage 56 | run: make coverage 57 | 58 | build-docs: 59 | runs-on: ubuntu-latest 60 | env: 61 | OPENAI_API_KEY: fake-for-tests 62 | steps: 63 | - name: Checkout repository 64 | uses: actions/checkout@v4 65 | - name: Setup uv 66 | uses: astral-sh/setup-uv@v5 67 | with: 68 | enable-cache: true 69 | - name: Install dependencies 70 | run: make sync 71 | - name: Build docs 72 | run: make build-docs 73 | 74 | old_versions: 75 | runs-on: ubuntu-latest 76 | env: 77 | OPENAI_API_KEY: fake-for-tests 78 | steps: 79 | - name: Checkout repository 80 | uses: actions/checkout@v4 81 | - name: Setup uv 82 | uses: astral-sh/setup-uv@v5 83 | with: 84 | enable-cache: true 85 | - name: Install dependencies 86 | run: make sync 87 | - name: Run tests 88 | run: make old_version_tests 89 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # macOS Files 2 | .DS_Store 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | **/__pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pdm 88 | .pdm.toml 89 | .pdm-python 90 | .pdm-build/ 91 | 92 | # PEP 582 93 | __pypackages__/ 94 | 95 | # Celery stuff 96 | celerybeat-schedule 97 | celerybeat.pid 98 | 99 | # SageMath parsed files 100 | *.sage.py 101 | 102 | # Environments 103 | .env 104 | .venv 105 | env/ 106 | venv/ 107 | ENV/ 108 | env.bak/ 109 | venv.bak/ 110 | .venv39 111 | .venv_res 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # pytype static type analyzer 132 | .pytype/ 133 | 134 | # Cython debug symbols 135 | cython_debug/ 136 | 137 | # PyCharm 138 | .idea/ 139 | 140 | # Ruff stuff: 141 | .ruff_cache/ 142 | 143 | # PyPI configuration file 144 | .pypirc 145 | .aider* 146 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "tabWidth": 4, 3 | "overrides": [ 4 | { 5 | "files": "*.yml", 6 | "options": { 7 | "tabWidth": 2 8 | } 9 | } 10 | ] 11 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.testing.pytestArgs": [ 3 | "tests" 4 | ], 5 | "python.testing.unittestEnabled": false, 6 | "python.testing.pytestEnabled": true 7 | } -------------------------------------------------------------------------------- /AGENTS.md: -------------------------------------------------------------------------------- 1 | Welcome to the OpenAI Agents SDK repository. This file contains the main points for new contributors. 2 | 3 | ## Repository overview 4 | 5 | - **Source code**: `src/agents/` contains the implementation. 6 | - **Tests**: `tests/` with a short guide in `tests/README.md`. 7 | - **Examples**: under `examples/`. 8 | - **Documentation**: markdown pages live in `docs/` with `mkdocs.yml` controlling the site. 9 | - **Utilities**: developer commands are defined in the `Makefile`. 10 | - **PR template**: `.github/PULL_REQUEST_TEMPLATE/pull_request_template.md` describes the information every PR must include. 11 | 12 | ## Local workflow 13 | 14 | 1. Format, lint and type‑check your changes: 15 | 16 | ```bash 17 | make format 18 | make lint 19 | make mypy 20 | ``` 21 | 22 | 2. Run the tests: 23 | 24 | ```bash 25 | make tests 26 | ``` 27 | 28 | To run a single test, use `uv run pytest -s -k `. 29 | 30 | 3. Build the documentation (optional but recommended for docs changes): 31 | 32 | ```bash 33 | make build-docs 34 | ``` 35 | 36 | Coverage can be generated with `make coverage`. 37 | 38 | ## Snapshot tests 39 | 40 | Some tests rely on inline snapshots. See `tests/README.md` for details on updating them: 41 | 42 | ```bash 43 | make snapshots-fix # update existing snapshots 44 | make snapshots-create # create new snapshots 45 | ``` 46 | 47 | Run `make tests` again after updating snapshots to ensure they pass. 48 | 49 | ## Style notes 50 | 51 | - Write comments as full sentences and end them with a period. 52 | 53 | ## Pull request expectations 54 | 55 | PRs should use the template located at `.github/PULL_REQUEST_TEMPLATE/pull_request_template.md`. Provide a summary, test plan and issue number if applicable, then check that: 56 | 57 | - New tests are added when needed. 58 | - Documentation is updated. 59 | - `make lint` and `make format` have been run. 60 | - The full test suite passes. 61 | 62 | Commit messages should be concise and written in the imperative mood. Small, focused commits are preferred. 63 | 64 | ## What reviewers look for 65 | 66 | - Tests covering new behaviour. 67 | - Consistent style: code formatted with `ruff format`, imports sorted, and type hints passing `mypy`. 68 | - Clear documentation for any public API changes. 69 | - Clean history and a helpful PR description. 70 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 OpenAI 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: sync 2 | sync: 3 | uv sync --all-extras --all-packages --group dev 4 | 5 | .PHONY: format 6 | format: 7 | uv run ruff format 8 | uv run ruff check --fix 9 | 10 | .PHONY: lint 11 | lint: 12 | uv run ruff check 13 | 14 | .PHONY: mypy 15 | mypy: 16 | uv run mypy . 17 | 18 | .PHONY: tests 19 | tests: 20 | uv run pytest 21 | 22 | .PHONY: coverage 23 | coverage: 24 | 25 | uv run coverage run -m pytest 26 | uv run coverage xml -o coverage.xml 27 | uv run coverage report -m --fail-under=95 28 | 29 | .PHONY: snapshots-fix 30 | snapshots-fix: 31 | uv run pytest --inline-snapshot=fix 32 | 33 | .PHONY: snapshots-create 34 | snapshots-create: 35 | uv run pytest --inline-snapshot=create 36 | 37 | .PHONY: old_version_tests 38 | old_version_tests: 39 | UV_PROJECT_ENVIRONMENT=.venv_39 uv run --python 3.9 -m pytest 40 | 41 | .PHONY: build-docs 42 | build-docs: 43 | uv run mkdocs build 44 | 45 | .PHONY: build-full-docs 46 | build-full-docs: 47 | uv run docs/scripts/translate_docs.py 48 | uv run mkdocs build 49 | 50 | .PHONY: serve-docs 51 | serve-docs: 52 | uv run mkdocs serve 53 | 54 | .PHONY: deploy-docs 55 | deploy-docs: 56 | uv run mkdocs gh-deploy --force --verbose 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /docs/assets/images/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/docs/assets/images/graph.png -------------------------------------------------------------------------------- /docs/assets/images/mcp-tracing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/docs/assets/images/mcp-tracing.jpg -------------------------------------------------------------------------------- /docs/assets/images/orchestration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/docs/assets/images/orchestration.png -------------------------------------------------------------------------------- /docs/assets/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /docs/examples.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | Check out a variety of sample implementations of the SDK in the examples section of the [repo](https://github.com/openai/openai-agents-python/tree/main/examples). The examples are organized into several categories that demonstrate different patterns and capabilities. 4 | 5 | 6 | ## Categories 7 | 8 | - **[agent_patterns](https://github.com/openai/openai-agents-python/tree/main/examples/agent_patterns):** 9 | Examples in this category illustrate common agent design patterns, such as 10 | 11 | - Deterministic workflows 12 | - Agents as tools 13 | - Parallel agent execution 14 | 15 | - **[basic](https://github.com/openai/openai-agents-python/tree/main/examples/basic):** 16 | These examples showcase foundational capabilities of the SDK, such as 17 | 18 | - Dynamic system prompts 19 | - Streaming outputs 20 | - Lifecycle events 21 | 22 | - **[tool examples](https://github.com/openai/openai-agents-python/tree/main/examples/tools):** 23 | Learn how to implement OAI hosted tools such as web search and file search, 24 | and integrate them into your agents. 25 | 26 | - **[model providers](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers):** 27 | Explore how to use non-OpenAI models with the SDK. 28 | 29 | - **[handoffs](https://github.com/openai/openai-agents-python/tree/main/examples/handoffs):** 30 | See practical examples of agent handoffs. 31 | 32 | - **[mcp](https://github.com/openai/openai-agents-python/tree/main/examples/mcp):** 33 | Learn how to build agents with MCP. 34 | 35 | - **[customer_service](https://github.com/openai/openai-agents-python/tree/main/examples/customer_service)** and **[research_bot](https://github.com/openai/openai-agents-python/tree/main/examples/research_bot):** 36 | Two more built-out examples that illustrate real-world applications 37 | 38 | - **customer_service**: Example customer service system for an airline. 39 | - **research_bot**: Simple deep research clone. 40 | 41 | - **[voice](https://github.com/openai/openai-agents-python/tree/main/examples/voice):** 42 | See examples of voice agents, using our TTS and STT models. 43 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # OpenAI Agents SDK 2 | 3 | The [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) enables you to build agentic AI apps in a lightweight, easy-to-use package with very few abstractions. It's a production-ready upgrade of our previous experimentation for agents, [Swarm](https://github.com/openai/swarm/tree/main). The Agents SDK has a very small set of primitives: 4 | 5 | - **Agents**, which are LLMs equipped with instructions and tools 6 | - **Handoffs**, which allow agents to delegate to other agents for specific tasks 7 | - **Guardrails**, which enable the inputs to agents to be validated 8 | 9 | In combination with Python, these primitives are powerful enough to express complex relationships between tools and agents, and allow you to build real-world applications without a steep learning curve. In addition, the SDK comes with built-in **tracing** that lets you visualize and debug your agentic flows, as well as evaluate them and even fine-tune models for your application. 10 | 11 | ## Why use the Agents SDK 12 | 13 | The SDK has two driving design principles: 14 | 15 | 1. Enough features to be worth using, but few enough primitives to make it quick to learn. 16 | 2. Works great out of the box, but you can customize exactly what happens. 17 | 18 | Here are the main features of the SDK: 19 | 20 | - Agent loop: Built-in agent loop that handles calling tools, sending results to the LLM, and looping until the LLM is done. 21 | - Python-first: Use built-in language features to orchestrate and chain agents, rather than needing to learn new abstractions. 22 | - Handoffs: A powerful feature to coordinate and delegate between multiple agents. 23 | - Guardrails: Run input validations and checks in parallel to your agents, breaking early if the checks fail. 24 | - Function tools: Turn any Python function into a tool, with automatic schema generation and Pydantic-powered validation. 25 | - Tracing: Built-in tracing that lets you visualize, debug and monitor your workflows, as well as use the OpenAI suite of evaluation, fine-tuning and distillation tools. 26 | 27 | ## Installation 28 | 29 | ```bash 30 | pip install openai-agents 31 | ``` 32 | 33 | ## Hello world example 34 | 35 | ```python 36 | from agents import Agent, Runner 37 | 38 | agent = Agent(name="Assistant", instructions="You are a helpful assistant") 39 | 40 | result = Runner.run_sync(agent, "Write a haiku about recursion in programming.") 41 | print(result.final_output) 42 | 43 | # Code within the code, 44 | # Functions calling themselves, 45 | # Infinite loop's dance. 46 | ``` 47 | 48 | (_If running this, ensure you set the `OPENAI_API_KEY` environment variable_) 49 | 50 | ```bash 51 | export OPENAI_API_KEY=sk-... 52 | ``` 53 | -------------------------------------------------------------------------------- /docs/ja/config.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # SDK の設定 6 | 7 | ## API キーとクライアント 8 | 9 | デフォルトでは、 SDK はインポートされた時点で LLM リクエストとトレーシングに使用する `OPENAI_API_KEY` 環境変数を探します。アプリ起動前にこの環境変数を設定できない場合は、 [set_default_openai_key()][agents.set_default_openai_key] 関数を利用してキーを設定できます。 10 | 11 | ```python 12 | from agents import set_default_openai_key 13 | 14 | set_default_openai_key("sk-...") 15 | ``` 16 | 17 | また、使用する OpenAI クライアントを構成することも可能です。デフォルトでは、 SDK は環境変数または上記で設定したデフォルトキーを用いて `AsyncOpenAI` インスタンスを作成します。これを変更するには、 [set_default_openai_client()][agents.set_default_openai_client] 関数を使用します。 18 | 19 | ```python 20 | from openai import AsyncOpenAI 21 | from agents import set_default_openai_client 22 | 23 | custom_client = AsyncOpenAI(base_url="...", api_key="...") 24 | set_default_openai_client(custom_client) 25 | ``` 26 | 27 | さらに、使用する OpenAI API をカスタマイズすることもできます。既定では OpenAI Responses API を利用します。これを Chat Completions API に変更するには、 [set_default_openai_api()][agents.set_default_openai_api] 関数を使用してください。 28 | 29 | ```python 30 | from agents import set_default_openai_api 31 | 32 | set_default_openai_api("chat_completions") 33 | ``` 34 | 35 | ## トレーシング 36 | 37 | トレーシングはデフォルトで有効になっています。前述の OpenAI API キー(環境変数または設定したデフォルトキー)が自動的に使用されます。トレーシングで使用する API キーを個別に設定したい場合は、 [`set_tracing_export_api_key`][agents.set_tracing_export_api_key] 関数を利用してください。 38 | 39 | ```python 40 | from agents import set_tracing_export_api_key 41 | 42 | set_tracing_export_api_key("sk-...") 43 | ``` 44 | 45 | トレーシングを完全に無効化するには、 [`set_tracing_disabled()`][agents.set_tracing_disabled] 関数を呼び出します。 46 | 47 | ```python 48 | from agents import set_tracing_disabled 49 | 50 | set_tracing_disabled(True) 51 | ``` 52 | 53 | ## デバッグログ 54 | 55 | SDK にはハンドラーが設定されていない Python ロガーが 2 つあります。デフォルトでは、警告とエラーは `stdout` に出力されますが、それ以外のログは抑制されます。 56 | 57 | 詳細なログを有効にするには、 [`enable_verbose_stdout_logging()`][agents.enable_verbose_stdout_logging] 関数を使用します。 58 | 59 | ```python 60 | from agents import enable_verbose_stdout_logging 61 | 62 | enable_verbose_stdout_logging() 63 | ``` 64 | 65 | 必要に応じて、ハンドラー、フィルター、フォーマッターなどを追加してログをカスタマイズすることも可能です。詳しくは [Python ロギングガイド](https://docs.python.org/3/howto/logging.html) を参照してください。 66 | 67 | ```python 68 | import logging 69 | 70 | logger = logging.getLogger("openai.agents") # or openai.agents.tracing for the Tracing logger 71 | 72 | # To make all logs show up 73 | logger.setLevel(logging.DEBUG) 74 | # To make info and above show up 75 | logger.setLevel(logging.INFO) 76 | # To make warning and above show up 77 | logger.setLevel(logging.WARNING) 78 | # etc 79 | 80 | # You can customize this as needed, but this will output to `stderr` by default 81 | logger.addHandler(logging.StreamHandler()) 82 | ``` 83 | 84 | ### ログに含まれる機微情報 85 | 86 | 特定のログには機微情報(たとえば ユーザー データ)が含まれる場合があります。この情報が記録されるのを防ぎたい場合は、次の環境変数を設定してください。 87 | 88 | LLM の入力および出力のログを無効にする: 89 | 90 | ```bash 91 | export OPENAI_AGENTS_DONT_LOG_MODEL_DATA=1 92 | ``` 93 | 94 | ツールの入力および出力のログを無効にする: 95 | 96 | ```bash 97 | export OPENAI_AGENTS_DONT_LOG_TOOL_DATA=1 98 | ``` -------------------------------------------------------------------------------- /docs/ja/examples.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # コード例 6 | 7 | リポジトリの [examples セクション](https://github.com/openai/openai-agents-python/tree/main/examples) には、 SDK のさまざまなサンプル実装が用意されています。これらの例は、異なるパターンや機能を示す複数のカテゴリーに整理されています。 8 | 9 | 10 | ## カテゴリー 11 | 12 | - **[agent_patterns](https://github.com/openai/openai-agents-python/tree/main/examples/agent_patterns):** 13 | このカテゴリーの例では、一般的なエージェント設計パターンを紹介しています。 14 | 15 | - 決定論的ワークフロー 16 | - ツールとしてのエージェント 17 | - エージェントの並列実行 18 | 19 | - **[basic](https://github.com/openai/openai-agents-python/tree/main/examples/basic):** 20 | SDK の基礎的な機能を示す例です。 21 | 22 | - 動的なシステムプロンプト 23 | - ストリーミング出力 24 | - ライフサイクルイベント 25 | 26 | - **[tool examples](https://github.com/openai/openai-agents-python/tree/main/examples/tools):** 27 | Web 検索やファイル検索など、 OpenAI がホストするツールの実装方法と、それらをエージェントに統合する方法を学べます。 28 | 29 | - **[model providers](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers):** 30 | OpenAI 以外のモデルを SDK で利用する方法を探ります。 31 | 32 | - **[handoffs](https://github.com/openai/openai-agents-python/tree/main/examples/handoffs):** 33 | エージェントのハンドオフを実践的に示す例です。 34 | 35 | - **[mcp](https://github.com/openai/openai-agents-python/tree/main/examples/mcp):** 36 | MCP を使ったエージェントの構築方法を学べます。 37 | 38 | - **[customer_service](https://github.com/openai/openai-agents-python/tree/main/examples/customer_service)** と **[research_bot](https://github.com/openai/openai-agents-python/tree/main/examples/research_bot):** 39 | より実践的なユースケースを示す、拡張された 2 つの例です。 40 | 41 | - **customer_service**: 航空会社向けカスタマーサービスシステムの例 42 | - **research_bot**: シンプルなディープリサーチクローン 43 | 44 | - **[voice](https://github.com/openai/openai-agents-python/tree/main/examples/voice):** 45 | TTS と STT モデルを用いた音声エージェントの例をご覧ください。 -------------------------------------------------------------------------------- /docs/ja/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # OpenAI Agents SDK 6 | 7 | [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) は、抽象化をほとんど排した軽量で使いやすいパッケージにより、エージェントベースの AI アプリを構築できるようにします。これは、以前のエージェント向け実験プロジェクトである [Swarm](https://github.com/openai/swarm/tree/main) をプロダクションレベルへとアップグレードしたものです。Agents SDK にはごく少数の基本コンポーネントがあります。 8 | 9 | - **エージェント**: instructions と tools を備えた LLM 10 | - **ハンドオフ**: エージェントが特定タスクを他のエージェントへ委任するしくみ 11 | - **ガードレール**: エージェントへの入力を検証する機能 12 | 13 | Python と組み合わせることで、これらのコンポーネントはツールとエージェント間の複雑な関係を表現でき、学習コストを抑えつつ実際のアプリケーションを構築できます。さらに SDK には、エージェントフローを可視化・デバッグできる **トレーシング** が標準搭載されており、評価やファインチューニングにも活用可能です。 14 | 15 | ## Agents SDK を使用する理由 16 | 17 | SDK には 2 つの設計原則があります。 18 | 19 | 1. 使う価値のある十分な機能を備えつつ、学習が早いようコンポーネント数を絞る。 20 | 2. すぐに使い始められる初期設定で動作しつつ、挙動を細かくカスタマイズできる。 21 | 22 | 主な機能は次のとおりです。 23 | 24 | - エージェントループ: ツール呼び出し、結果を LLM に送信、LLM が完了するまでのループを自動で処理。 25 | - Python ファースト: 新しい抽象化を学ばずに、言語標準機能でエージェントをオーケストレーション。 26 | - ハンドオフ: 複数エージェント間の協調と委譲を実現する強力な機能。 27 | - ガードレール: エージェントと並列で入力バリデーションを実行し、失敗時に早期終了。 28 | - 関数ツール: 任意の Python 関数をツール化し、自動スキーマ生成と Pydantic での検証を提供。 29 | - トレーシング: フローの可視化・デバッグ・モニタリングに加え、OpenAI の評価・ファインチューニング・蒸留ツールを利用可能。 30 | 31 | ## インストール 32 | 33 | ```bash 34 | pip install openai-agents 35 | ``` 36 | 37 | ## Hello World の例 38 | 39 | ```python 40 | from agents import Agent, Runner 41 | 42 | agent = Agent(name="Assistant", instructions="You are a helpful assistant") 43 | 44 | result = Runner.run_sync(agent, "Write a haiku about recursion in programming.") 45 | print(result.final_output) 46 | 47 | # Code within the code, 48 | # Functions calling themselves, 49 | # Infinite loop's dance. 50 | ``` 51 | 52 | (_これを実行する場合は、`OPENAI_API_KEY` 環境変数を設定してください_) 53 | 54 | ```bash 55 | export OPENAI_API_KEY=sk-... 56 | ``` -------------------------------------------------------------------------------- /docs/ja/mcp.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # Model context protocol (MCP) 6 | 7 | [Model context protocol](https://modelcontextprotocol.io/introduction)(通称 MCP)は、 LLM にツールとコンテキストを提供するための仕組みです。MCP のドキュメントでは次のように説明されています。 8 | 9 | > MCP は、アプリケーションが LLM にコンテキストを提供する方法を標準化するオープンプロトコルです。MCP は AI アプリケーションにとっての USB‑C ポートのようなものと考えてください。USB‑C が各種デバイスを周辺機器と接続するための標準化された方法を提供するのと同様に、MCP は AI モデルをさまざまなデータソースやツールと接続するための標準化された方法を提供します。 10 | 11 | Agents SDK は MCP をサポートしており、これにより幅広い MCP サーバーをエージェントにツールとして追加できます。 12 | 13 | ## MCP サーバー 14 | 15 | 現在、MCP 仕様では使用するトランスポート方式に基づき 3 種類のサーバーが定義されています。 16 | 17 | 1. **stdio** サーバー: アプリケーションのサブプロセスとして実行されます。ローカルで動かすイメージです。 18 | 2. **HTTP over SSE** サーバー: リモートで動作し、 URL 経由で接続します。 19 | 3. **Streamable HTTP** サーバー: MCP 仕様に定義された Streamable HTTP トランスポートを使用してリモートで動作します。 20 | 21 | これらのサーバーへは [`MCPServerStdio`][agents.mcp.server.MCPServerStdio]、[`MCPServerSse`][agents.mcp.server.MCPServerSse]、[`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp] クラスを使用して接続できます。 22 | 23 | たとえば、[公式 MCP filesystem サーバー](https://www.npmjs.com/package/@modelcontextprotocol/server-filesystem)を利用する場合は次のようになります。 24 | 25 | ```python 26 | async with MCPServerStdio( 27 | params={ 28 | "command": "npx", 29 | "args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir], 30 | } 31 | ) as server: 32 | tools = await server.list_tools() 33 | ``` 34 | 35 | ## MCP サーバーの利用 36 | 37 | MCP サーバーはエージェントに追加できます。Agents SDK はエージェント実行時に毎回 MCP サーバーへ `list_tools()` を呼び出し、 LLM に MCP サーバーのツールを認識させます。LLM が MCP サーバーのツールを呼び出すと、SDK はそのサーバーへ `call_tool()` を実行します。 38 | 39 | ```python 40 | 41 | agent=Agent( 42 | name="Assistant", 43 | instructions="Use the tools to achieve the task", 44 | mcp_servers=[mcp_server_1, mcp_server_2] 45 | ) 46 | ``` 47 | 48 | ## キャッシュ 49 | 50 | エージェントが実行されるたびに、MCP サーバーへ `list_tools()` が呼び出されます。サーバーがリモートの場合は特にレイテンシが発生します。ツール一覧を自動でキャッシュしたい場合は、[`MCPServerStdio`][agents.mcp.server.MCPServerStdio]、[`MCPServerSse`][agents.mcp.server.MCPServerSse]、[`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp] の各クラスに `cache_tools_list=True` を渡してください。ツール一覧が変更されないと確信できる場合のみ使用してください。 51 | 52 | キャッシュを無効化したい場合は、サーバーで `invalidate_tools_cache()` を呼び出します。 53 | 54 | ## エンドツーエンドのコード例 55 | 56 | 完全な動作例は [examples/mcp](https://github.com/openai/openai-agents-python/tree/main/examples/mcp) をご覧ください。 57 | 58 | ## トレーシング 59 | 60 | [トレーシング](./tracing.md) は MCP の操作を自動的にキャプチャします。具体的には次の内容が含まれます。 61 | 62 | 1. ツール一覧取得のための MCP サーバー呼び出し 63 | 2. 関数呼び出しに関する MCP 情報 64 | 65 | ![MCP Tracing Screenshot](../assets/images/mcp-tracing.jpg) -------------------------------------------------------------------------------- /docs/ja/models/litellm.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # LiteLLM 経由でのモデル利用 6 | 7 | !!! note 8 | 9 | LiteLLM との統合は現在ベータ版です。特に小規模なモデルプロバイダーでは問題が発生する可能性があります。問題を見つけた場合は、[GitHub Issues](https://github.com/openai/openai-agents-python/issues) からご報告ください。迅速に対応いたします。 10 | 11 | [LiteLLM](https://docs.litellm.ai/docs/) は、1 つのインターフェースで 100 以上のモデルを利用できるライブラリです。Agents SDK では LiteLLM との統合により、任意の AI モデルを使用できます。 12 | 13 | ## セットアップ 14 | 15 | `litellm` がインストールされていることを確認してください。オプションの `litellm` 依存関係グループをインストールすることで対応できます。 16 | 17 | ```bash 18 | pip install "openai-agents[litellm]" 19 | ``` 20 | 21 | インストール後、任意のエージェントで [`LitellmModel`][agents.extensions.models.litellm_model.LitellmModel] を利用できます。 22 | 23 | ## 例 24 | 25 | 以下は動作する完全なサンプルです。実行するとモデル名と API キーの入力を求められます。例えば次のように入力できます。 26 | 27 | - `openai/gpt-4.1` をモデル名に、OpenAI API キーを入力 28 | - `anthropic/claude-3-5-sonnet-20240620` をモデル名に、Anthropic API キーを入力 29 | - その他 30 | 31 | LiteLLM でサポートされているモデルの全リストは、[litellm providers docs](https://docs.litellm.ai/docs/providers) を参照してください。 32 | 33 | ```python 34 | from __future__ import annotations 35 | 36 | import asyncio 37 | 38 | from agents import Agent, Runner, function_tool, set_tracing_disabled 39 | from agents.extensions.models.litellm_model import LitellmModel 40 | 41 | @function_tool 42 | def get_weather(city: str): 43 | print(f"[debug] getting weather for {city}") 44 | return f"The weather in {city} is sunny." 45 | 46 | 47 | async def main(model: str, api_key: str): 48 | agent = Agent( 49 | name="Assistant", 50 | instructions="You only respond in haikus.", 51 | model=LitellmModel(model=model, api_key=api_key), 52 | tools=[get_weather], 53 | ) 54 | 55 | result = await Runner.run(agent, "What's the weather in Tokyo?") 56 | print(result.final_output) 57 | 58 | 59 | if __name__ == "__main__": 60 | # First try to get model/api key from args 61 | import argparse 62 | 63 | parser = argparse.ArgumentParser() 64 | parser.add_argument("--model", type=str, required=False) 65 | parser.add_argument("--api-key", type=str, required=False) 66 | args = parser.parse_args() 67 | 68 | model = args.model 69 | if not model: 70 | model = input("Enter a model name for Litellm: ") 71 | 72 | api_key = args.api_key 73 | if not api_key: 74 | api_key = input("Enter an API key for Litellm: ") 75 | 76 | asyncio.run(main(model, api_key)) 77 | ``` -------------------------------------------------------------------------------- /docs/ja/multi_agent.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # 複数エージェントのオーケストレーション 6 | 7 | オーケストレーションとは、アプリ内でエージェントがどのように流れるかを指します。どのエージェントが、どの順序で実行され、その後どう決定するかを制御します。エージェントをオーケストレーションする主な方法は次の 2 つです。 8 | 9 | 1. LLM に判断させる: LLM の知能を活用し、計画・推論を行い、その結果に基づいて次のステップを決定します。 10 | 2. コードでオーケストレーションする: コード側でエージェントの流れを定義します。 11 | 12 | これらのパターンは組み合わせて使用できます。それぞれにトレードオフがあり、以下で説明します。 13 | 14 | ## LLM によるオーケストレーション 15 | 16 | エージェントとは、 instructions、ツール、ハンドオフを備えた LLM です。オープンエンドなタスクが与えられた場合、 LLM はタスクをどのように進めるかを自律的に計画し、ツールを使ってアクションやデータ取得を行い、ハンドオフでサブエージェントへタスクを委譲できます。たとえば、リサーチエージェントには次のようなツールを装備できます。 17 | 18 | - Web 検索でオンライン情報を取得する 19 | - ファイル検索で独自データや接続を調べる 20 | - コンピュータ操作でコンピュータ上のアクションを実行する 21 | - コード実行でデータ分析を行う 22 | - 計画立案やレポート作成などに長けた専門エージェントへのハンドオフ 23 | 24 | このパターンはタスクがオープンエンドで、 LLM の知能に頼りたい場合に最適です。重要な戦術は次のとおりです。 25 | 26 | 1. 良いプロンプトに投資する。利用可能なツール、使い方、守るべきパラメーターを明確に示します。 27 | 2. アプリを監視し、改善を繰り返す。問題が起きた箇所を特定し、プロンプトを改善します。 28 | 3. エージェントに内省と改善を許可する。たとえばループで実行し自己批評させたり、エラーメッセージを渡して修正させたりします。 29 | 4. 何でもこなす汎用エージェントより、特定タスクに特化したエージェントを用意します。 30 | 5. [evals](https://platform.openai.com/docs/guides/evals) に投資する。これによりエージェントを訓練し、タスク性能を向上できます。 31 | 32 | ## コードによるオーケストレーション 33 | 34 | LLM によるオーケストレーションは強力ですが、コードでオーケストレーションすると速度・コスト・性能の面でより決定的かつ予測可能になります。よく使われるパターンは次のとおりです。 35 | 36 | - [structured outputs](https://platform.openai.com/docs/guides/structured-outputs) を使って、コード側で検査できる 適切な形式のデータ を生成する。たとえばエージェントにタスクをいくつかのカテゴリーに分類させ、そのカテゴリーに応じて次のエージェントを選択します。 37 | - あるエージェントの出力を次のエージェントの入力に変換して複数エージェントをチェーンする。ブログ記事執筆を「リサーチ → アウトライン作成 → 記事執筆 → 批評 → 改善」という一連のステップに分解できます。 38 | - タスクを実行するエージェントを `while` ループで回し、評価とフィードバックを行うエージェントと組み合わせ、評価者が基準を満たしたと判断するまで繰り返します。 39 | - `asyncio.gather` など Python の基本コンポーネントを用いて複数エージェントを並列実行する。互いに依存しない複数タスクがある場合に高速化できます。 40 | 41 | [`examples/agent_patterns`](https://github.com/openai/openai-agents-python/tree/main/examples/agent_patterns) には多数のコード例があります。 -------------------------------------------------------------------------------- /docs/ja/results.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # 結果 6 | 7 | `Runner.run` メソッドを呼び出すと、以下のいずれかが返されます。 8 | 9 | - `run` または `run_sync` を呼び出した場合は [`RunResult`][agents.result.RunResult] 10 | - `run_streamed` を呼び出した場合は [`RunResultStreaming`][agents.result.RunResultStreaming] 11 | 12 | これらはどちらも [`RunResultBase`][agents.result.RunResultBase] を継承しており、ほとんどの有用な情報はここに格納されています。 13 | 14 | ## 最終出力 15 | 16 | [`final_output`][agents.result.RunResultBase.final_output] プロパティには、最後に実行されたエージェントの最終出力が格納されます。内容は以下のいずれかです。 17 | 18 | - `output_type` が定義されていない場合は `str` 19 | - `output_type` が定義されている場合は `last_agent.output_type` 型のオブジェクト 20 | 21 | !!! note 22 | 23 | `final_output` の型は `Any` です。ハンドオフが発生する可能性があるため、静的に型付けできません。ハンドオフが発生すると、どのエージェントでも最後になり得るため、可能性のある出力型を静的に特定できないのです。 24 | 25 | ## 次のターンへの入力 26 | 27 | [`result.to_input_list()`][agents.result.RunResultBase.to_input_list] を使用すると、エージェント実行中に生成されたアイテムを元の入力に連結した入力リストへ変換できます。これにより、あるエージェント実行の出力を別の実行へ渡したり、ループで実行して毎回新しいユーザー入力を追加したりすることが容易になります。 28 | 29 | ## 最後のエージェント 30 | 31 | [`last_agent`][agents.result.RunResultBase.last_agent] プロパティには、最後に実行されたエージェントが格納されています。アプリケーションによっては、次回ユーザーが入力する際にこれが役立つことがよくあります。例えば、フロントラインのトリアージ エージェントが言語専用のエージェントにハンドオフする場合、最後のエージェントを保存しておき、ユーザーが次にメッセージを送ったときに再利用できます。 32 | 33 | ## 新しいアイテム 34 | 35 | [`new_items`][agents.result.RunResultBase.new_items] プロパティには、実行中に生成された新しいアイテムが含まれます。これらのアイテムは [`RunItem`][agents.items.RunItem] です。RunItem は、 LLM が生成した raw アイテムをラップします。 36 | 37 | - [`MessageOutputItem`][agents.items.MessageOutputItem] — LLM からのメッセージを示します。 raw アイテムは生成されたメッセージです。 38 | - [`HandoffCallItem`][agents.items.HandoffCallItem] — LLM がハンドオフ ツールを呼び出したことを示します。 raw アイテムは LLM からのツール呼び出しアイテムです。 39 | - [`HandoffOutputItem`][agents.items.HandoffOutputItem] — ハンドオフが発生したことを示します。 raw アイテムはハンドオフ ツール呼び出しに対するツール応答です。また、アイテムから送信元 / 送信先エージェントにもアクセスできます。 40 | - [`ToolCallItem`][agents.items.ToolCallItem] — LLM がツールを呼び出したことを示します。 41 | - [`ToolCallOutputItem`][agents.items.ToolCallOutputItem] — ツールが呼び出されたことを示します。 raw アイテムはツール応答です。また、アイテムからツール出力にもアクセスできます。 42 | - [`ReasoningItem`][agents.items.ReasoningItem] — LLM からの推論アイテムを示します。 raw アイテムは生成された推論内容です。 43 | 44 | ## その他の情報 45 | 46 | ### ガードレール結果 47 | 48 | [`input_guardrail_results`][agents.result.RunResultBase.input_guardrail_results] と [`output_guardrail_results`][agents.result.RunResultBase.output_guardrail_results] プロパティには、ガードレールの結果が存在する場合に格納されます。ガードレール結果には、ログや保存を行いたい有用な情報が含まれることがあるため、これらを参照できるようにしています。 49 | 50 | ### raw レスポンス 51 | 52 | [`raw_responses`][agents.result.RunResultBase.raw_responses] プロパティには、 LLM が生成した [`ModelResponse`][agents.items.ModelResponse] が格納されます。 53 | 54 | ### 元の入力 55 | 56 | [`input`][agents.result.RunResultBase.input] プロパティには、`run` メソッドに渡した元の入力が格納されます。ほとんどの場合は必要ありませんが、必要に応じて参照できるように用意されています。 -------------------------------------------------------------------------------- /docs/ja/visualization.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # エージェントの可視化 6 | 7 | エージェントの可視化を使用すると、 ** Graphviz ** を用いてエージェントとその関係を構造化されたグラフィカル表現として生成できます。これは、アプリケーション内でエージェント、ツール、handoffs がどのように相互作用するかを理解するのに役立ちます。 8 | 9 | ## インストール 10 | 11 | オプションの `viz` 依存関係グループをインストールします: 12 | 13 | ```bash 14 | pip install "openai-agents[viz]" 15 | ``` 16 | 17 | ## グラフの生成 18 | 19 | `draw_graph` 関数を使用してエージェントの可視化を生成できます。この関数は有向グラフを作成し、以下のように表現します。 20 | 21 | - **エージェント** は黄色のボックスで表されます。 22 | - **ツール** は緑色の楕円で表されます。 23 | - **handoffs** はエージェント間の有向エッジで示されます。 24 | 25 | ### 使用例 26 | 27 | ```python 28 | from agents import Agent, function_tool 29 | from agents.extensions.visualization import draw_graph 30 | 31 | @function_tool 32 | def get_weather(city: str) -> str: 33 | return f"The weather in {city} is sunny." 34 | 35 | spanish_agent = Agent( 36 | name="Spanish agent", 37 | instructions="You only speak Spanish.", 38 | ) 39 | 40 | english_agent = Agent( 41 | name="English agent", 42 | instructions="You only speak English", 43 | ) 44 | 45 | triage_agent = Agent( 46 | name="Triage agent", 47 | instructions="Handoff to the appropriate agent based on the language of the request.", 48 | handoffs=[spanish_agent, english_agent], 49 | tools=[get_weather], 50 | ) 51 | 52 | draw_graph(triage_agent) 53 | ``` 54 | 55 | ![Agent Graph](../assets/images/graph.png) 56 | 57 | これにより、 **triage agent** の構造と、それがサブエージェントやツールとどのようにつながっているかを視覚的に表すグラフが生成されます。 58 | 59 | ## 可視化の理解 60 | 61 | 生成されたグラフには次の要素が含まれます。 62 | 63 | - エントリーポイントを示す **start node** (`__start__`) 64 | - 黄色の塗りつぶしを持つ **矩形** のエージェント 65 | - 緑色の塗りつぶしを持つ **楕円** のツール 66 | - 相互作用を示す有向エッジ 67 | - エージェント間の handoffs には **実線の矢印** 68 | - ツール呼び出しには **破線の矢印** 69 | - 実行が終了する位置を示す **end node** (`__end__`) 70 | 71 | ## グラフのカスタマイズ 72 | 73 | ### グラフの表示 74 | デフォルトでは、`draw_graph` はグラフをインラインで表示します。別ウィンドウでグラフを表示するには、次のように記述します。 75 | 76 | ```python 77 | draw_graph(triage_agent).view() 78 | ``` 79 | 80 | ### グラフの保存 81 | デフォルトでは、`draw_graph` はグラフをインラインで表示します。ファイルとして保存するには、ファイル名を指定します: 82 | 83 | ```python 84 | draw_graph(triage_agent, filename="agent_graph") 85 | ``` 86 | 87 | これにより、作業ディレクトリに `agent_graph.png` が生成されます。 88 | -------------------------------------------------------------------------------- /docs/ja/voice/tracing.md: -------------------------------------------------------------------------------- 1 | --- 2 | search: 3 | exclude: true 4 | --- 5 | # トレーシング 6 | 7 | [エージェントのトレーシング](../tracing.md) と同様に、音声パイプラインも自動的にトレーシングされます。 8 | 9 | 基本的なトレーシング情報については上記のドキュメントを参照してください。さらに、[`VoicePipelineConfig`][agents.voice.pipeline_config.VoicePipelineConfig] でパイプラインのトレーシング設定を行えます。 10 | 11 | 主なトレーシング関連フィールドは次のとおりです。 12 | 13 | - [`tracing_disabled`][agents.voice.pipeline_config.VoicePipelineConfig.tracing_disabled]:トレーシングを無効にするかどうかを制御します。デフォルトではトレーシングは有効です。 14 | - [`trace_include_sensitive_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_data]:トレースに音声テキストなどの機微なデータを含めるかどうかを制御します。これは音声パイプライン専用であり、Workflow 内部で発生する処理には影響しません。 15 | - [`trace_include_sensitive_audio_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_audio_data]:トレースに音声データを含めるかどうかを制御します。 16 | - [`workflow_name`][agents.voice.pipeline_config.VoicePipelineConfig.workflow_name]:トレース Workflow の名前です。 17 | - [`group_id`][agents.voice.pipeline_config.VoicePipelineConfig.group_id]:複数のトレースを関連付けるための `group_id` です。 18 | - [`trace_metadata`][agents.voice.pipeline_config.VoicePipelineConfig.tracing_disabled]:トレースに追加するメタデータです。 -------------------------------------------------------------------------------- /docs/models/litellm.md: -------------------------------------------------------------------------------- 1 | # Using any model via LiteLLM 2 | 3 | !!! note 4 | 5 | The LiteLLM integration is in beta. You may run into issues with some model providers, especially smaller ones. Please report any issues via [Github issues](https://github.com/openai/openai-agents-python/issues) and we'll fix quickly. 6 | 7 | [LiteLLM](https://docs.litellm.ai/docs/) is a library that allows you to use 100+ models via a single interface. We've added a LiteLLM integration to allow you to use any AI model in the Agents SDK. 8 | 9 | ## Setup 10 | 11 | You'll need to ensure `litellm` is available. You can do this by installing the optional `litellm` dependency group: 12 | 13 | ```bash 14 | pip install "openai-agents[litellm]" 15 | ``` 16 | 17 | Once done, you can use [`LitellmModel`][agents.extensions.models.litellm_model.LitellmModel] in any agent. 18 | 19 | ## Example 20 | 21 | This is a fully working example. When you run it, you'll be prompted for a model name and API key. For example, you could enter: 22 | 23 | - `openai/gpt-4.1` for the model, and your OpenAI API key 24 | - `anthropic/claude-3-5-sonnet-20240620` for the model, and your Anthropic API key 25 | - etc 26 | 27 | For a full list of models supported in LiteLLM, see the [litellm providers docs](https://docs.litellm.ai/docs/providers). 28 | 29 | ```python 30 | from __future__ import annotations 31 | 32 | import asyncio 33 | 34 | from agents import Agent, Runner, function_tool, set_tracing_disabled 35 | from agents.extensions.models.litellm_model import LitellmModel 36 | 37 | @function_tool 38 | def get_weather(city: str): 39 | print(f"[debug] getting weather for {city}") 40 | return f"The weather in {city} is sunny." 41 | 42 | 43 | async def main(model: str, api_key: str): 44 | agent = Agent( 45 | name="Assistant", 46 | instructions="You only respond in haikus.", 47 | model=LitellmModel(model=model, api_key=api_key), 48 | tools=[get_weather], 49 | ) 50 | 51 | result = await Runner.run(agent, "What's the weather in Tokyo?") 52 | print(result.final_output) 53 | 54 | 55 | if __name__ == "__main__": 56 | # First try to get model/api key from args 57 | import argparse 58 | 59 | parser = argparse.ArgumentParser() 60 | parser.add_argument("--model", type=str, required=False) 61 | parser.add_argument("--api-key", type=str, required=False) 62 | args = parser.parse_args() 63 | 64 | model = args.model 65 | if not model: 66 | model = input("Enter a model name for Litellm: ") 67 | 68 | api_key = args.api_key 69 | if not api_key: 70 | api_key = input("Enter an API key for Litellm: ") 71 | 72 | asyncio.run(main(model, api_key)) 73 | ``` 74 | -------------------------------------------------------------------------------- /docs/ref/agent.md: -------------------------------------------------------------------------------- 1 | # `Agents` 2 | 3 | ::: agents.agent 4 | -------------------------------------------------------------------------------- /docs/ref/agent_output.md: -------------------------------------------------------------------------------- 1 | # `Agent output` 2 | 3 | ::: agents.agent_output 4 | -------------------------------------------------------------------------------- /docs/ref/exceptions.md: -------------------------------------------------------------------------------- 1 | # `Exceptions` 2 | 3 | ::: agents.exceptions 4 | -------------------------------------------------------------------------------- /docs/ref/extensions/handoff_filters.md: -------------------------------------------------------------------------------- 1 | # `Handoff filters` 2 | 3 | ::: agents.extensions.handoff_filters 4 | -------------------------------------------------------------------------------- /docs/ref/extensions/handoff_prompt.md: -------------------------------------------------------------------------------- 1 | # `Handoff prompt` 2 | 3 | ::: agents.extensions.handoff_prompt 4 | 5 | options: 6 | members: 7 | - RECOMMENDED_PROMPT_PREFIX 8 | - prompt_with_handoff_instructions 9 | -------------------------------------------------------------------------------- /docs/ref/extensions/litellm.md: -------------------------------------------------------------------------------- 1 | # `LiteLLM Models` 2 | 3 | ::: agents.extensions.models.litellm_model 4 | -------------------------------------------------------------------------------- /docs/ref/function_schema.md: -------------------------------------------------------------------------------- 1 | # `Function schema` 2 | 3 | ::: agents.function_schema 4 | -------------------------------------------------------------------------------- /docs/ref/guardrail.md: -------------------------------------------------------------------------------- 1 | # `Guardrails` 2 | 3 | ::: agents.guardrail 4 | -------------------------------------------------------------------------------- /docs/ref/handoffs.md: -------------------------------------------------------------------------------- 1 | # `Handoffs` 2 | 3 | ::: agents.handoffs 4 | -------------------------------------------------------------------------------- /docs/ref/index.md: -------------------------------------------------------------------------------- 1 | # Agents module 2 | 3 | ::: agents 4 | 5 | options: 6 | members: 7 | - set_default_openai_key 8 | - set_default_openai_client 9 | - set_default_openai_api 10 | - set_tracing_export_api_key 11 | - set_tracing_disabled 12 | - set_trace_processors 13 | - enable_verbose_stdout_logging 14 | -------------------------------------------------------------------------------- /docs/ref/items.md: -------------------------------------------------------------------------------- 1 | # `Items` 2 | 3 | ::: agents.items 4 | -------------------------------------------------------------------------------- /docs/ref/lifecycle.md: -------------------------------------------------------------------------------- 1 | # `Lifecycle` 2 | 3 | ::: agents.lifecycle 4 | 5 | options: 6 | show_source: false 7 | -------------------------------------------------------------------------------- /docs/ref/mcp/server.md: -------------------------------------------------------------------------------- 1 | # `MCP Servers` 2 | 3 | ::: agents.mcp.server 4 | -------------------------------------------------------------------------------- /docs/ref/mcp/util.md: -------------------------------------------------------------------------------- 1 | # `MCP Util` 2 | 3 | ::: agents.mcp.util 4 | -------------------------------------------------------------------------------- /docs/ref/model_settings.md: -------------------------------------------------------------------------------- 1 | # `Model settings` 2 | 3 | ::: agents.model_settings 4 | -------------------------------------------------------------------------------- /docs/ref/models/interface.md: -------------------------------------------------------------------------------- 1 | # `Model interface` 2 | 3 | ::: agents.models.interface 4 | -------------------------------------------------------------------------------- /docs/ref/models/openai_chatcompletions.md: -------------------------------------------------------------------------------- 1 | # `OpenAI Chat Completions model` 2 | 3 | ::: agents.models.openai_chatcompletions 4 | -------------------------------------------------------------------------------- /docs/ref/models/openai_responses.md: -------------------------------------------------------------------------------- 1 | # `OpenAI Responses model` 2 | 3 | ::: agents.models.openai_responses 4 | -------------------------------------------------------------------------------- /docs/ref/result.md: -------------------------------------------------------------------------------- 1 | # `Results` 2 | 3 | ::: agents.result 4 | -------------------------------------------------------------------------------- /docs/ref/run.md: -------------------------------------------------------------------------------- 1 | # `Runner` 2 | 3 | ::: agents.run 4 | 5 | options: 6 | members: 7 | - Runner 8 | - RunConfig 9 | -------------------------------------------------------------------------------- /docs/ref/run_context.md: -------------------------------------------------------------------------------- 1 | # `Run context` 2 | 3 | ::: agents.run_context 4 | -------------------------------------------------------------------------------- /docs/ref/stream_events.md: -------------------------------------------------------------------------------- 1 | # `Streaming events` 2 | 3 | ::: agents.stream_events 4 | -------------------------------------------------------------------------------- /docs/ref/tool.md: -------------------------------------------------------------------------------- 1 | # `Tools` 2 | 3 | ::: agents.tool 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/create.md: -------------------------------------------------------------------------------- 1 | # `Creating traces/spans` 2 | 3 | ::: agents.tracing.create 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/index.md: -------------------------------------------------------------------------------- 1 | # Tracing module 2 | 3 | ::: agents.tracing 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/processor_interface.md: -------------------------------------------------------------------------------- 1 | # `Processor interface` 2 | 3 | ::: agents.tracing.processor_interface 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/processors.md: -------------------------------------------------------------------------------- 1 | # `Processors` 2 | 3 | ::: agents.tracing.processors 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/scope.md: -------------------------------------------------------------------------------- 1 | # `Scope` 2 | 3 | ::: agents.tracing.scope 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/setup.md: -------------------------------------------------------------------------------- 1 | # `Setup` 2 | 3 | ::: agents.tracing.setup 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/span_data.md: -------------------------------------------------------------------------------- 1 | # `Span data` 2 | 3 | ::: agents.tracing.span_data 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/spans.md: -------------------------------------------------------------------------------- 1 | # `Spans` 2 | 3 | ::: agents.tracing.spans 4 | 5 | options: 6 | members: 7 | - Span 8 | - NoOpSpan 9 | - SpanImpl 10 | -------------------------------------------------------------------------------- /docs/ref/tracing/traces.md: -------------------------------------------------------------------------------- 1 | # `Traces` 2 | 3 | ::: agents.tracing.traces 4 | -------------------------------------------------------------------------------- /docs/ref/tracing/util.md: -------------------------------------------------------------------------------- 1 | # `Util` 2 | 3 | ::: agents.tracing.util 4 | -------------------------------------------------------------------------------- /docs/ref/usage.md: -------------------------------------------------------------------------------- 1 | # `Usage` 2 | 3 | ::: agents.usage 4 | -------------------------------------------------------------------------------- /docs/ref/voice/events.md: -------------------------------------------------------------------------------- 1 | # `Events` 2 | 3 | ::: agents.voice.events 4 | -------------------------------------------------------------------------------- /docs/ref/voice/exceptions.md: -------------------------------------------------------------------------------- 1 | # `Exceptions` 2 | 3 | ::: agents.voice.exceptions 4 | -------------------------------------------------------------------------------- /docs/ref/voice/input.md: -------------------------------------------------------------------------------- 1 | # `Input` 2 | 3 | ::: agents.voice.input 4 | -------------------------------------------------------------------------------- /docs/ref/voice/model.md: -------------------------------------------------------------------------------- 1 | # `Model` 2 | 3 | ::: agents.voice.model 4 | -------------------------------------------------------------------------------- /docs/ref/voice/models/openai_provider.md: -------------------------------------------------------------------------------- 1 | # `OpenAIVoiceModelProvider` 2 | 3 | ::: agents.voice.models.openai_model_provider 4 | -------------------------------------------------------------------------------- /docs/ref/voice/models/openai_stt.md: -------------------------------------------------------------------------------- 1 | # `OpenAI STT` 2 | 3 | ::: agents.voice.models.openai_stt 4 | -------------------------------------------------------------------------------- /docs/ref/voice/models/openai_tts.md: -------------------------------------------------------------------------------- 1 | # `OpenAI TTS` 2 | 3 | ::: agents.voice.models.openai_tts 4 | -------------------------------------------------------------------------------- /docs/ref/voice/pipeline.md: -------------------------------------------------------------------------------- 1 | # `Pipeline` 2 | 3 | ::: agents.voice.pipeline 4 | -------------------------------------------------------------------------------- /docs/ref/voice/pipeline_config.md: -------------------------------------------------------------------------------- 1 | # `Pipeline Config` 2 | 3 | ::: agents.voice.pipeline_config 4 | -------------------------------------------------------------------------------- /docs/ref/voice/result.md: -------------------------------------------------------------------------------- 1 | # `Result` 2 | 3 | ::: agents.voice.result 4 | -------------------------------------------------------------------------------- /docs/ref/voice/utils.md: -------------------------------------------------------------------------------- 1 | # `Utils` 2 | 3 | ::: agents.voice.utils 4 | -------------------------------------------------------------------------------- /docs/ref/voice/workflow.md: -------------------------------------------------------------------------------- 1 | # `Workflow` 2 | 3 | ::: agents.voice.workflow 4 | -------------------------------------------------------------------------------- /docs/visualization.md: -------------------------------------------------------------------------------- 1 | # Agent Visualization 2 | 3 | Agent visualization allows you to generate a structured graphical representation of agents and their relationships using **Graphviz**. This is useful for understanding how agents, tools, and handoffs interact within an application. 4 | 5 | ## Installation 6 | 7 | Install the optional `viz` dependency group: 8 | 9 | ```bash 10 | pip install "openai-agents[viz]" 11 | ``` 12 | 13 | ## Generating a Graph 14 | 15 | You can generate an agent visualization using the `draw_graph` function. This function creates a directed graph where: 16 | 17 | - **Agents** are represented as yellow boxes. 18 | - **Tools** are represented as green ellipses. 19 | - **Handoffs** are directed edges from one agent to another. 20 | 21 | ### Example Usage 22 | 23 | ```python 24 | from agents import Agent, function_tool 25 | from agents.extensions.visualization import draw_graph 26 | 27 | @function_tool 28 | def get_weather(city: str) -> str: 29 | return f"The weather in {city} is sunny." 30 | 31 | spanish_agent = Agent( 32 | name="Spanish agent", 33 | instructions="You only speak Spanish.", 34 | ) 35 | 36 | english_agent = Agent( 37 | name="English agent", 38 | instructions="You only speak English", 39 | ) 40 | 41 | triage_agent = Agent( 42 | name="Triage agent", 43 | instructions="Handoff to the appropriate agent based on the language of the request.", 44 | handoffs=[spanish_agent, english_agent], 45 | tools=[get_weather], 46 | ) 47 | 48 | draw_graph(triage_agent) 49 | ``` 50 | 51 | ![Agent Graph](./assets/images/graph.png) 52 | 53 | This generates a graph that visually represents the structure of the **triage agent** and its connections to sub-agents and tools. 54 | 55 | 56 | ## Understanding the Visualization 57 | 58 | The generated graph includes: 59 | 60 | - A **start node** (`__start__`) indicating the entry point. 61 | - Agents represented as **rectangles** with yellow fill. 62 | - Tools represented as **ellipses** with green fill. 63 | - Directed edges indicating interactions: 64 | - **Solid arrows** for agent-to-agent handoffs. 65 | - **Dotted arrows** for tool invocations. 66 | - An **end node** (`__end__`) indicating where execution terminates. 67 | 68 | ## Customizing the Graph 69 | 70 | ### Showing the Graph 71 | By default, `draw_graph` displays the graph inline. To show the graph in a separate window, write the following: 72 | 73 | ```python 74 | draw_graph(triage_agent).view() 75 | ``` 76 | 77 | ### Saving the Graph 78 | By default, `draw_graph` displays the graph inline. To save it as a file, specify a filename: 79 | 80 | ```python 81 | draw_graph(triage_agent, filename="agent_graph") 82 | ``` 83 | 84 | This will generate `agent_graph.png` in the working directory. 85 | -------------------------------------------------------------------------------- /docs/voice/tracing.md: -------------------------------------------------------------------------------- 1 | # Tracing 2 | 3 | Just like the way [agents are traced](../tracing.md), voice pipelines are also automatically traced. 4 | 5 | You can read the tracing doc above for basic tracing information, but you can additionally configure tracing of a pipeline via [`VoicePipelineConfig`][agents.voice.pipeline_config.VoicePipelineConfig]. 6 | 7 | Key tracing related fields are: 8 | 9 | - [`tracing_disabled`][agents.voice.pipeline_config.VoicePipelineConfig.tracing_disabled]: controls whether tracing is disabled. By default, tracing is enabled. 10 | - [`trace_include_sensitive_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_data]: controls whether traces include potentially sensitive data, like audio transcripts. This is specifically for the voice pipeline, and not for anything that goes on inside your Workflow. 11 | - [`trace_include_sensitive_audio_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_audio_data]: controls whether traces include audio data. 12 | - [`workflow_name`][agents.voice.pipeline_config.VoicePipelineConfig.workflow_name]: The name of the trace workflow. 13 | - [`group_id`][agents.voice.pipeline_config.VoicePipelineConfig.group_id]: The `group_id` of the trace, which lets you link multiple traces. 14 | - [`trace_metadata`][agents.voice.pipeline_config.VoicePipelineConfig.tracing_disabled]: Additional metadata to include with the trace. 15 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- 1 | # Make the examples directory into a package to avoid top-level module name collisions. 2 | # This is needed so that mypy treats files like examples/customer_service/main.py and 3 | # examples/researcher_app/main.py as distinct modules rather than both named "main". 4 | -------------------------------------------------------------------------------- /examples/agent_patterns/agents_as_tools.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agents import Agent, ItemHelpers, MessageOutputItem, Runner, trace 4 | 5 | """ 6 | This example shows the agents-as-tools pattern. The frontline agent receives a user message and 7 | then picks which agents to call, as tools. In this case, it picks from a set of translation 8 | agents. 9 | """ 10 | 11 | spanish_agent = Agent( 12 | name="spanish_agent", 13 | instructions="You translate the user's message to Spanish", 14 | handoff_description="An english to spanish translator", 15 | ) 16 | 17 | french_agent = Agent( 18 | name="french_agent", 19 | instructions="You translate the user's message to French", 20 | handoff_description="An english to french translator", 21 | ) 22 | 23 | italian_agent = Agent( 24 | name="italian_agent", 25 | instructions="You translate the user's message to Italian", 26 | handoff_description="An english to italian translator", 27 | ) 28 | 29 | orchestrator_agent = Agent( 30 | name="orchestrator_agent", 31 | instructions=( 32 | "You are a translation agent. You use the tools given to you to translate." 33 | "If asked for multiple translations, you call the relevant tools in order." 34 | "You never translate on your own, you always use the provided tools." 35 | ), 36 | tools=[ 37 | spanish_agent.as_tool( 38 | tool_name="translate_to_spanish", 39 | tool_description="Translate the user's message to Spanish", 40 | ), 41 | french_agent.as_tool( 42 | tool_name="translate_to_french", 43 | tool_description="Translate the user's message to French", 44 | ), 45 | italian_agent.as_tool( 46 | tool_name="translate_to_italian", 47 | tool_description="Translate the user's message to Italian", 48 | ), 49 | ], 50 | ) 51 | 52 | synthesizer_agent = Agent( 53 | name="synthesizer_agent", 54 | instructions="You inspect translations, correct them if needed, and produce a final concatenated response.", 55 | ) 56 | 57 | 58 | async def main(): 59 | msg = input("Hi! What would you like translated, and to which languages? ") 60 | 61 | # Run the entire orchestration in a single trace 62 | with trace("Orchestrator evaluator"): 63 | orchestrator_result = await Runner.run(orchestrator_agent, msg) 64 | 65 | for item in orchestrator_result.new_items: 66 | if isinstance(item, MessageOutputItem): 67 | text = ItemHelpers.text_message_output(item) 68 | if text: 69 | print(f" - Translation step: {text}") 70 | 71 | synthesizer_result = await Runner.run( 72 | synthesizer_agent, orchestrator_result.to_input_list() 73 | ) 74 | 75 | print(f"\n\nFinal response:\n{synthesizer_result.final_output}") 76 | 77 | 78 | if __name__ == "__main__": 79 | asyncio.run(main()) 80 | -------------------------------------------------------------------------------- /examples/agent_patterns/deterministic.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from pydantic import BaseModel 4 | 5 | from agents import Agent, Runner, trace 6 | 7 | """ 8 | This example demonstrates a deterministic flow, where each step is performed by an agent. 9 | 1. The first agent generates a story outline 10 | 2. We feed the outline into the second agent 11 | 3. The second agent checks if the outline is good quality and if it is a scifi story 12 | 4. If the outline is not good quality or not a scifi story, we stop here 13 | 5. If the outline is good quality and a scifi story, we feed the outline into the third agent 14 | 6. The third agent writes the story 15 | """ 16 | 17 | story_outline_agent = Agent( 18 | name="story_outline_agent", 19 | instructions="Generate a very short story outline based on the user's input.", 20 | ) 21 | 22 | 23 | class OutlineCheckerOutput(BaseModel): 24 | good_quality: bool 25 | is_scifi: bool 26 | 27 | 28 | outline_checker_agent = Agent( 29 | name="outline_checker_agent", 30 | instructions="Read the given story outline, and judge the quality. Also, determine if it is a scifi story.", 31 | output_type=OutlineCheckerOutput, 32 | ) 33 | 34 | story_agent = Agent( 35 | name="story_agent", 36 | instructions="Write a short story based on the given outline.", 37 | output_type=str, 38 | ) 39 | 40 | 41 | async def main(): 42 | input_prompt = input("What kind of story do you want? ") 43 | 44 | # Ensure the entire workflow is a single trace 45 | with trace("Deterministic story flow"): 46 | # 1. Generate an outline 47 | outline_result = await Runner.run( 48 | story_outline_agent, 49 | input_prompt, 50 | ) 51 | print("Outline generated") 52 | 53 | # 2. Check the outline 54 | outline_checker_result = await Runner.run( 55 | outline_checker_agent, 56 | outline_result.final_output, 57 | ) 58 | 59 | # 3. Add a gate to stop if the outline is not good quality or not a scifi story 60 | assert isinstance(outline_checker_result.final_output, OutlineCheckerOutput) 61 | if not outline_checker_result.final_output.good_quality: 62 | print("Outline is not good quality, so we stop here.") 63 | exit(0) 64 | 65 | if not outline_checker_result.final_output.is_scifi: 66 | print("Outline is not a scifi story, so we stop here.") 67 | exit(0) 68 | 69 | print("Outline is good quality and a scifi story, so we continue to write the story.") 70 | 71 | # 4. Write the story 72 | story_result = await Runner.run( 73 | story_agent, 74 | outline_result.final_output, 75 | ) 76 | print(f"Story: {story_result.final_output}") 77 | 78 | 79 | if __name__ == "__main__": 80 | asyncio.run(main()) 81 | -------------------------------------------------------------------------------- /examples/agent_patterns/llm_as_a_judge.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | from dataclasses import dataclass 5 | from typing import Literal 6 | 7 | from agents import Agent, ItemHelpers, Runner, TResponseInputItem, trace 8 | 9 | """ 10 | This example shows the LLM as a judge pattern. The first agent generates an outline for a story. 11 | The second agent judges the outline and provides feedback. We loop until the judge is satisfied 12 | with the outline. 13 | """ 14 | 15 | story_outline_generator = Agent( 16 | name="story_outline_generator", 17 | instructions=( 18 | "You generate a very short story outline based on the user's input." 19 | "If there is any feedback provided, use it to improve the outline." 20 | ), 21 | ) 22 | 23 | 24 | @dataclass 25 | class EvaluationFeedback: 26 | feedback: str 27 | score: Literal["pass", "needs_improvement", "fail"] 28 | 29 | 30 | evaluator = Agent[None]( 31 | name="evaluator", 32 | instructions=( 33 | "You evaluate a story outline and decide if it's good enough." 34 | "If it's not good enough, you provide feedback on what needs to be improved." 35 | "Never give it a pass on the first try." 36 | ), 37 | output_type=EvaluationFeedback, 38 | ) 39 | 40 | 41 | async def main() -> None: 42 | msg = input("What kind of story would you like to hear? ") 43 | input_items: list[TResponseInputItem] = [{"content": msg, "role": "user"}] 44 | 45 | latest_outline: str | None = None 46 | 47 | # We'll run the entire workflow in a single trace 48 | with trace("LLM as a judge"): 49 | while True: 50 | story_outline_result = await Runner.run( 51 | story_outline_generator, 52 | input_items, 53 | ) 54 | 55 | input_items = story_outline_result.to_input_list() 56 | latest_outline = ItemHelpers.text_message_outputs(story_outline_result.new_items) 57 | print("Story outline generated") 58 | 59 | evaluator_result = await Runner.run(evaluator, input_items) 60 | result: EvaluationFeedback = evaluator_result.final_output 61 | 62 | print(f"Evaluator score: {result.score}") 63 | 64 | if result.score == "pass": 65 | print("Story outline is good enough, exiting.") 66 | break 67 | 68 | print("Re-running with feedback") 69 | 70 | input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"}) 71 | 72 | print(f"Final story outline: {latest_outline}") 73 | 74 | 75 | if __name__ == "__main__": 76 | asyncio.run(main()) 77 | -------------------------------------------------------------------------------- /examples/agent_patterns/output_guardrails.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import json 5 | 6 | from pydantic import BaseModel, Field 7 | 8 | from agents import ( 9 | Agent, 10 | GuardrailFunctionOutput, 11 | OutputGuardrailTripwireTriggered, 12 | RunContextWrapper, 13 | Runner, 14 | output_guardrail, 15 | ) 16 | 17 | """ 18 | This example shows how to use output guardrails. 19 | 20 | Output guardrails are checks that run on the final output of an agent. 21 | They can be used to do things like: 22 | - Check if the output contains sensitive data 23 | - Check if the output is a valid response to the user's message 24 | 25 | In this example, we'll use a (contrived) example where we check if the agent's response contains 26 | a phone number. 27 | """ 28 | 29 | 30 | # The agent's output type 31 | class MessageOutput(BaseModel): 32 | reasoning: str = Field(description="Thoughts on how to respond to the user's message") 33 | response: str = Field(description="The response to the user's message") 34 | user_name: str | None = Field(description="The name of the user who sent the message, if known") 35 | 36 | 37 | @output_guardrail 38 | async def sensitive_data_check( 39 | context: RunContextWrapper, agent: Agent, output: MessageOutput 40 | ) -> GuardrailFunctionOutput: 41 | phone_number_in_response = "650" in output.response 42 | phone_number_in_reasoning = "650" in output.reasoning 43 | 44 | return GuardrailFunctionOutput( 45 | output_info={ 46 | "phone_number_in_response": phone_number_in_response, 47 | "phone_number_in_reasoning": phone_number_in_reasoning, 48 | }, 49 | tripwire_triggered=phone_number_in_response or phone_number_in_reasoning, 50 | ) 51 | 52 | 53 | agent = Agent( 54 | name="Assistant", 55 | instructions="You are a helpful assistant.", 56 | output_type=MessageOutput, 57 | output_guardrails=[sensitive_data_check], 58 | ) 59 | 60 | 61 | async def main(): 62 | # This should be ok 63 | await Runner.run(agent, "What's the capital of California?") 64 | print("First message passed") 65 | 66 | # This should trip the guardrail 67 | try: 68 | result = await Runner.run( 69 | agent, "My phone number is 650-123-4567. Where do you think I live?" 70 | ) 71 | print( 72 | f"Guardrail didn't trip - this is unexpected. Output: {json.dumps(result.final_output.model_dump(), indent=2)}" 73 | ) 74 | 75 | except OutputGuardrailTripwireTriggered as e: 76 | print(f"Guardrail tripped. Info: {e.guardrail_result.output.output_info}") 77 | 78 | 79 | if __name__ == "__main__": 80 | asyncio.run(main()) 81 | -------------------------------------------------------------------------------- /examples/agent_patterns/parallelization.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agents import Agent, ItemHelpers, Runner, trace 4 | 5 | """ 6 | This example shows the parallelization pattern. We run the agent three times in parallel, and pick 7 | the best result. 8 | """ 9 | 10 | spanish_agent = Agent( 11 | name="spanish_agent", 12 | instructions="You translate the user's message to Spanish", 13 | ) 14 | 15 | translation_picker = Agent( 16 | name="translation_picker", 17 | instructions="You pick the best Spanish translation from the given options.", 18 | ) 19 | 20 | 21 | async def main(): 22 | msg = input("Hi! Enter a message, and we'll translate it to Spanish.\n\n") 23 | 24 | # Ensure the entire workflow is a single trace 25 | with trace("Parallel translation"): 26 | res_1, res_2, res_3 = await asyncio.gather( 27 | Runner.run( 28 | spanish_agent, 29 | msg, 30 | ), 31 | Runner.run( 32 | spanish_agent, 33 | msg, 34 | ), 35 | Runner.run( 36 | spanish_agent, 37 | msg, 38 | ), 39 | ) 40 | 41 | outputs = [ 42 | ItemHelpers.text_message_outputs(res_1.new_items), 43 | ItemHelpers.text_message_outputs(res_2.new_items), 44 | ItemHelpers.text_message_outputs(res_3.new_items), 45 | ] 46 | 47 | translations = "\n\n".join(outputs) 48 | print(f"\n\nTranslations:\n\n{translations}") 49 | 50 | best_translation = await Runner.run( 51 | translation_picker, 52 | f"Input: {msg}\n\nTranslations:\n{translations}", 53 | ) 54 | 55 | print("\n\n-----") 56 | 57 | print(f"Best translation: {best_translation.final_output}") 58 | 59 | 60 | if __name__ == "__main__": 61 | asyncio.run(main()) 62 | -------------------------------------------------------------------------------- /examples/agent_patterns/routing.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uuid 3 | 4 | from openai.types.responses import ResponseContentPartDoneEvent, ResponseTextDeltaEvent 5 | 6 | from agents import Agent, RawResponsesStreamEvent, Runner, TResponseInputItem, trace 7 | 8 | """ 9 | This example shows the handoffs/routing pattern. The triage agent receives the first message, and 10 | then hands off to the appropriate agent based on the language of the request. Responses are 11 | streamed to the user. 12 | """ 13 | 14 | french_agent = Agent( 15 | name="french_agent", 16 | instructions="You only speak French", 17 | ) 18 | 19 | spanish_agent = Agent( 20 | name="spanish_agent", 21 | instructions="You only speak Spanish", 22 | ) 23 | 24 | english_agent = Agent( 25 | name="english_agent", 26 | instructions="You only speak English", 27 | ) 28 | 29 | triage_agent = Agent( 30 | name="triage_agent", 31 | instructions="Handoff to the appropriate agent based on the language of the request.", 32 | handoffs=[french_agent, spanish_agent, english_agent], 33 | ) 34 | 35 | 36 | async def main(): 37 | # We'll create an ID for this conversation, so we can link each trace 38 | conversation_id = str(uuid.uuid4().hex[:16]) 39 | 40 | msg = input("Hi! We speak French, Spanish and English. How can I help? ") 41 | agent = triage_agent 42 | inputs: list[TResponseInputItem] = [{"content": msg, "role": "user"}] 43 | 44 | while True: 45 | # Each conversation turn is a single trace. Normally, each input from the user would be an 46 | # API request to your app, and you can wrap the request in a trace() 47 | with trace("Routing example", group_id=conversation_id): 48 | result = Runner.run_streamed( 49 | agent, 50 | input=inputs, 51 | ) 52 | async for event in result.stream_events(): 53 | if not isinstance(event, RawResponsesStreamEvent): 54 | continue 55 | data = event.data 56 | if isinstance(data, ResponseTextDeltaEvent): 57 | print(data.delta, end="", flush=True) 58 | elif isinstance(data, ResponseContentPartDoneEvent): 59 | print("\n") 60 | 61 | inputs = result.to_input_list() 62 | print("\n") 63 | 64 | user_msg = input("Enter a message: ") 65 | inputs.append({"content": user_msg, "role": "user"}) 66 | agent = result.current_agent 67 | 68 | 69 | if __name__ == "__main__": 70 | asyncio.run(main()) 71 | -------------------------------------------------------------------------------- /examples/basic/dynamic_system_prompt.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import random 3 | from typing import Literal 4 | 5 | from agents import Agent, RunContextWrapper, Runner 6 | 7 | 8 | class CustomContext: 9 | def __init__(self, style: Literal["haiku", "pirate", "robot"]): 10 | self.style = style 11 | 12 | 13 | def custom_instructions( 14 | run_context: RunContextWrapper[CustomContext], agent: Agent[CustomContext] 15 | ) -> str: 16 | context = run_context.context 17 | if context.style == "haiku": 18 | return "Only respond in haikus." 19 | elif context.style == "pirate": 20 | return "Respond as a pirate." 21 | else: 22 | return "Respond as a robot and say 'beep boop' a lot." 23 | 24 | 25 | agent = Agent( 26 | name="Chat agent", 27 | instructions=custom_instructions, 28 | ) 29 | 30 | 31 | async def main(): 32 | choice: Literal["haiku", "pirate", "robot"] = random.choice(["haiku", "pirate", "robot"]) 33 | context = CustomContext(style=choice) 34 | print(f"Using style: {choice}\n") 35 | 36 | user_message = "Tell me a joke." 37 | print(f"User: {user_message}") 38 | result = await Runner.run(agent, user_message, context=context) 39 | 40 | print(f"Assistant: {result.final_output}") 41 | 42 | 43 | if __name__ == "__main__": 44 | asyncio.run(main()) 45 | 46 | """ 47 | $ python examples/basic/dynamic_system_prompt.py 48 | 49 | Using style: haiku 50 | 51 | User: Tell me a joke. 52 | Assistant: Why don't eggs tell jokes? 53 | They might crack each other's shells, 54 | leaving yolk on face. 55 | 56 | $ python examples/basic/dynamic_system_prompt.py 57 | Using style: robot 58 | 59 | User: Tell me a joke. 60 | Assistant: Beep boop! Why was the robot so bad at soccer? Beep boop... because it kept kicking up a debug! Beep boop! 61 | 62 | $ python examples/basic/dynamic_system_prompt.py 63 | Using style: pirate 64 | 65 | User: Tell me a joke. 66 | Assistant: Why did the pirate go to school? 67 | 68 | To improve his arrr-ticulation! Har har har! 🏴‍☠️ 69 | """ 70 | -------------------------------------------------------------------------------- /examples/basic/hello_world.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agents import Agent, Runner 4 | 5 | 6 | async def main(): 7 | agent = Agent( 8 | name="Assistant", 9 | instructions="You only respond in haikus.", 10 | ) 11 | 12 | result = await Runner.run(agent, "Tell me about recursion in programming.") 13 | print(result.final_output) 14 | # Function calls itself, 15 | # Looping in smaller pieces, 16 | # Endless by design. 17 | 18 | 19 | if __name__ == "__main__": 20 | asyncio.run(main()) 21 | -------------------------------------------------------------------------------- /examples/basic/hello_world_jupyter.py: -------------------------------------------------------------------------------- 1 | from agents import Agent, Runner 2 | 3 | agent = Agent(name="Assistant", instructions="You are a helpful assistant") 4 | 5 | # Intended for Jupyter notebooks where there's an existing event loop 6 | result = await Runner.run(agent, "Write a haiku about recursion in programming.") # type: ignore[top-level-await] # noqa: F704 7 | print(result.final_output) 8 | 9 | # Code within code loops, 10 | # Infinite mirrors reflect— 11 | # Logic folds on self. 12 | -------------------------------------------------------------------------------- /examples/basic/local_image.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import base64 3 | import os 4 | 5 | from agents import Agent, Runner 6 | 7 | FILEPATH = os.path.join(os.path.dirname(__file__), "media/image_bison.jpg") 8 | 9 | 10 | def image_to_base64(image_path): 11 | with open(image_path, "rb") as image_file: 12 | encoded_string = base64.b64encode(image_file.read()).decode("utf-8") 13 | return encoded_string 14 | 15 | 16 | async def main(): 17 | # Print base64-encoded image 18 | b64_image = image_to_base64(FILEPATH) 19 | 20 | agent = Agent( 21 | name="Assistant", 22 | instructions="You are a helpful assistant.", 23 | ) 24 | 25 | result = await Runner.run( 26 | agent, 27 | [ 28 | { 29 | "role": "user", 30 | "content": [ 31 | { 32 | "type": "input_image", 33 | "detail": "auto", 34 | "image_url": f"data:image/jpeg;base64,{b64_image}", 35 | } 36 | ], 37 | }, 38 | { 39 | "role": "user", 40 | "content": "What do you see in this image?", 41 | }, 42 | ], 43 | ) 44 | print(result.final_output) 45 | 46 | 47 | if __name__ == "__main__": 48 | asyncio.run(main()) 49 | -------------------------------------------------------------------------------- /examples/basic/media/image_bison.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/examples/basic/media/image_bison.jpg -------------------------------------------------------------------------------- /examples/basic/non_strict_output_type.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from dataclasses import dataclass 4 | from typing import Any 5 | 6 | from agents import Agent, AgentOutputSchema, AgentOutputSchemaBase, Runner 7 | 8 | """This example demonstrates how to use an output type that is not in strict mode. Strict mode 9 | allows us to guarantee valid JSON output, but some schemas are not strict-compatible. 10 | 11 | In this example, we define an output type that is not strict-compatible, and then we run the 12 | agent with strict_json_schema=False. 13 | 14 | We also demonstrate a custom output type. 15 | 16 | To understand which schemas are strict-compatible, see: 17 | https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#supported-schemas 18 | """ 19 | 20 | 21 | @dataclass 22 | class OutputType: 23 | jokes: dict[int, str] 24 | """A list of jokes, indexed by joke number.""" 25 | 26 | 27 | class CustomOutputSchema(AgentOutputSchemaBase): 28 | """A demonstration of a custom output schema.""" 29 | 30 | def is_plain_text(self) -> bool: 31 | return False 32 | 33 | def name(self) -> str: 34 | return "CustomOutputSchema" 35 | 36 | def json_schema(self) -> dict[str, Any]: 37 | return { 38 | "type": "object", 39 | "properties": {"jokes": {"type": "object", "properties": {"joke": {"type": "string"}}}}, 40 | } 41 | 42 | def is_strict_json_schema(self) -> bool: 43 | return False 44 | 45 | def validate_json(self, json_str: str) -> Any: 46 | json_obj = json.loads(json_str) 47 | # Just for demonstration, we'll return a list. 48 | return list(json_obj["jokes"].values()) 49 | 50 | 51 | async def main(): 52 | agent = Agent( 53 | name="Assistant", 54 | instructions="You are a helpful assistant.", 55 | output_type=OutputType, 56 | ) 57 | 58 | input = "Tell me 3 short jokes." 59 | 60 | # First, let's try with a strict output type. This should raise an exception. 61 | try: 62 | result = await Runner.run(agent, input) 63 | raise AssertionError("Should have raised an exception") 64 | except Exception as e: 65 | print(f"Error (expected): {e}") 66 | 67 | # Now let's try again with a non-strict output type. This should work. 68 | # In some cases, it will raise an error - the schema isn't strict, so the model may 69 | # produce an invalid JSON object. 70 | agent.output_type = AgentOutputSchema(OutputType, strict_json_schema=False) 71 | result = await Runner.run(agent, input) 72 | print(result.final_output) 73 | 74 | # Finally, let's try a custom output type. 75 | agent.output_type = CustomOutputSchema() 76 | result = await Runner.run(agent, input) 77 | print(result.final_output) 78 | 79 | 80 | if __name__ == "__main__": 81 | asyncio.run(main()) 82 | -------------------------------------------------------------------------------- /examples/basic/previous_response_id.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agents import Agent, Runner 4 | 5 | """This demonstrates usage of the `previous_response_id` parameter to continue a conversation. 6 | The second run passes the previous response ID to the model, which allows it to continue the 7 | conversation without re-sending the previous messages. 8 | 9 | Notes: 10 | 1. This only applies to the OpenAI Responses API. Other models will ignore this parameter. 11 | 2. Responses are only stored for 30 days as of this writing, so in production you should 12 | store the response ID along with an expiration date; if the response is no longer valid, 13 | you'll need to re-send the previous conversation history. 14 | """ 15 | 16 | 17 | async def main(): 18 | agent = Agent( 19 | name="Assistant", 20 | instructions="You are a helpful assistant. be VERY concise.", 21 | ) 22 | 23 | result = await Runner.run(agent, "What is the largest country in South America?") 24 | print(result.final_output) 25 | # Brazil 26 | 27 | result = await Runner.run( 28 | agent, 29 | "What is the capital of that country?", 30 | previous_response_id=result.last_response_id, 31 | ) 32 | print(result.final_output) 33 | # Brasilia 34 | 35 | 36 | async def main_stream(): 37 | agent = Agent( 38 | name="Assistant", 39 | instructions="You are a helpful assistant. be VERY concise.", 40 | ) 41 | 42 | result = Runner.run_streamed(agent, "What is the largest country in South America?") 43 | 44 | async for event in result.stream_events(): 45 | if event.type == "raw_response_event" and event.data.type == "response.output_text.delta": 46 | print(event.data.delta, end="", flush=True) 47 | 48 | print() 49 | 50 | result = Runner.run_streamed( 51 | agent, 52 | "What is the capital of that country?", 53 | previous_response_id=result.last_response_id, 54 | ) 55 | 56 | async for event in result.stream_events(): 57 | if event.type == "raw_response_event" and event.data.type == "response.output_text.delta": 58 | print(event.data.delta, end="", flush=True) 59 | 60 | 61 | if __name__ == "__main__": 62 | is_stream = input("Run in stream mode? (y/n): ") 63 | if is_stream == "y": 64 | asyncio.run(main_stream()) 65 | else: 66 | asyncio.run(main()) 67 | -------------------------------------------------------------------------------- /examples/basic/remote_image.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agents import Agent, Runner 4 | 5 | URL = "https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg" 6 | 7 | 8 | async def main(): 9 | agent = Agent( 10 | name="Assistant", 11 | instructions="You are a helpful assistant.", 12 | ) 13 | 14 | result = await Runner.run( 15 | agent, 16 | [ 17 | { 18 | "role": "user", 19 | "content": [{"type": "input_image", "detail": "auto", "image_url": URL}], 20 | }, 21 | { 22 | "role": "user", 23 | "content": "What do you see in this image?", 24 | }, 25 | ], 26 | ) 27 | print(result.final_output) 28 | 29 | 30 | if __name__ == "__main__": 31 | asyncio.run(main()) 32 | -------------------------------------------------------------------------------- /examples/basic/stream_items.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import random 3 | 4 | from agents import Agent, ItemHelpers, Runner, function_tool 5 | 6 | 7 | @function_tool 8 | def how_many_jokes() -> int: 9 | return random.randint(1, 10) 10 | 11 | 12 | async def main(): 13 | agent = Agent( 14 | name="Joker", 15 | instructions="First call the `how_many_jokes` tool, then tell that many jokes.", 16 | tools=[how_many_jokes], 17 | ) 18 | 19 | result = Runner.run_streamed( 20 | agent, 21 | input="Hello", 22 | ) 23 | print("=== Run starting ===") 24 | async for event in result.stream_events(): 25 | # We'll ignore the raw responses event deltas 26 | if event.type == "raw_response_event": 27 | continue 28 | elif event.type == "agent_updated_stream_event": 29 | print(f"Agent updated: {event.new_agent.name}") 30 | continue 31 | elif event.type == "run_item_stream_event": 32 | if event.item.type == "tool_call_item": 33 | print("-- Tool was called") 34 | elif event.item.type == "tool_call_output_item": 35 | print(f"-- Tool output: {event.item.output}") 36 | elif event.item.type == "message_output_item": 37 | print(f"-- Message output:\n {ItemHelpers.text_message_output(event.item)}") 38 | else: 39 | pass # Ignore other event types 40 | 41 | print("=== Run complete ===") 42 | 43 | 44 | if __name__ == "__main__": 45 | asyncio.run(main()) 46 | 47 | # === Run starting === 48 | # Agent updated: Joker 49 | # -- Tool was called 50 | # -- Tool output: 4 51 | # -- Message output: 52 | # Sure, here are four jokes for you: 53 | 54 | # 1. **Why don't skeletons fight each other?** 55 | # They don't have the guts! 56 | 57 | # 2. **What do you call fake spaghetti?** 58 | # An impasta! 59 | 60 | # 3. **Why did the scarecrow win an award?** 61 | # Because he was outstanding in his field! 62 | 63 | # 4. **Why did the bicycle fall over?** 64 | # Because it was two-tired! 65 | # === Run complete === 66 | -------------------------------------------------------------------------------- /examples/basic/stream_text.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from openai.types.responses import ResponseTextDeltaEvent 4 | 5 | from agents import Agent, Runner 6 | 7 | 8 | async def main(): 9 | agent = Agent( 10 | name="Joker", 11 | instructions="You are a helpful assistant.", 12 | ) 13 | 14 | result = Runner.run_streamed(agent, input="Please tell me 5 jokes.") 15 | async for event in result.stream_events(): 16 | if event.type == "raw_response_event" and isinstance(event.data, ResponseTextDeltaEvent): 17 | print(event.data.delta, end="", flush=True) 18 | 19 | 20 | if __name__ == "__main__": 21 | asyncio.run(main()) 22 | -------------------------------------------------------------------------------- /examples/basic/tools.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from pydantic import BaseModel 4 | 5 | from agents import Agent, Runner, function_tool 6 | 7 | 8 | class Weather(BaseModel): 9 | city: str 10 | temperature_range: str 11 | conditions: str 12 | 13 | 14 | @function_tool 15 | def get_weather(city: str) -> Weather: 16 | print("[debug] get_weather called") 17 | return Weather(city=city, temperature_range="14-20C", conditions="Sunny with wind.") 18 | 19 | 20 | agent = Agent( 21 | name="Hello world", 22 | instructions="You are a helpful agent.", 23 | tools=[get_weather], 24 | ) 25 | 26 | 27 | async def main(): 28 | result = await Runner.run(agent, input="What's the weather in Tokyo?") 29 | print(result.final_output) 30 | # The weather in Tokyo is sunny. 31 | 32 | 33 | if __name__ == "__main__": 34 | asyncio.run(main()) 35 | -------------------------------------------------------------------------------- /examples/financial_research_agent/README.md: -------------------------------------------------------------------------------- 1 | # Financial Research Agent Example 2 | 3 | This example shows how you might compose a richer financial research agent using the Agents SDK. The pattern is similar to the `research_bot` example, but with more specialized sub‑agents and a verification step. 4 | 5 | The flow is: 6 | 7 | 1. **Planning**: A planner agent turns the end user’s request into a list of search terms relevant to financial analysis – recent news, earnings calls, corporate filings, industry commentary, etc. 8 | 2. **Search**: A search agent uses the built‑in `WebSearchTool` to retrieve terse summaries for each search term. (You could also add `FileSearchTool` if you have indexed PDFs or 10‑Ks.) 9 | 3. **Sub‑analysts**: Additional agents (e.g. a fundamentals analyst and a risk analyst) are exposed as tools so the writer can call them inline and incorporate their outputs. 10 | 4. **Writing**: A senior writer agent brings together the search snippets and any sub‑analyst summaries into a long‑form markdown report plus a short executive summary. 11 | 5. **Verification**: A final verifier agent audits the report for obvious inconsistencies or missing sourcing. 12 | 13 | You can run the example with: 14 | 15 | ```bash 16 | python -m examples.financial_research_agent.main 17 | ``` 18 | 19 | and enter a query like: 20 | 21 | ``` 22 | Write up an analysis of Apple Inc.'s most recent quarter. 23 | ``` 24 | 25 | ### Starter prompt 26 | 27 | The writer agent is seeded with instructions similar to: 28 | 29 | ``` 30 | You are a senior financial analyst. You will be provided with the original query 31 | and a set of raw search summaries. Your job is to synthesize these into a 32 | long‑form markdown report (at least several paragraphs) with a short executive 33 | summary. You also have access to tools like `fundamentals_analysis` and 34 | `risk_analysis` to get short specialist write‑ups if you want to incorporate them. 35 | Add a few follow‑up questions for further research. 36 | ``` 37 | 38 | You can tweak these prompts and sub‑agents to suit your own data sources and preferred report structure. 39 | -------------------------------------------------------------------------------- /examples/financial_research_agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/examples/financial_research_agent/__init__.py -------------------------------------------------------------------------------- /examples/financial_research_agent/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/examples/financial_research_agent/agents/__init__.py -------------------------------------------------------------------------------- /examples/financial_research_agent/agents/financials_agent.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from agents import Agent 4 | 5 | # A sub‑agent focused on analyzing a company's fundamentals. 6 | FINANCIALS_PROMPT = ( 7 | "You are a financial analyst focused on company fundamentals such as revenue, " 8 | "profit, margins and growth trajectory. Given a collection of web (and optional file) " 9 | "search results about a company, write a concise analysis of its recent financial " 10 | "performance. Pull out key metrics or quotes. Keep it under 2 paragraphs." 11 | ) 12 | 13 | 14 | class AnalysisSummary(BaseModel): 15 | summary: str 16 | """Short text summary for this aspect of the analysis.""" 17 | 18 | 19 | financials_agent = Agent( 20 | name="FundamentalsAnalystAgent", 21 | instructions=FINANCIALS_PROMPT, 22 | output_type=AnalysisSummary, 23 | ) 24 | -------------------------------------------------------------------------------- /examples/financial_research_agent/agents/planner_agent.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from agents import Agent 4 | 5 | # Generate a plan of searches to ground the financial analysis. 6 | # For a given financial question or company, we want to search for 7 | # recent news, official filings, analyst commentary, and other 8 | # relevant background. 9 | PROMPT = ( 10 | "You are a financial research planner. Given a request for financial analysis, " 11 | "produce a set of web searches to gather the context needed. Aim for recent " 12 | "headlines, earnings calls or 10‑K snippets, analyst commentary, and industry background. " 13 | "Output between 5 and 15 search terms to query for." 14 | ) 15 | 16 | 17 | class FinancialSearchItem(BaseModel): 18 | reason: str 19 | """Your reasoning for why this search is relevant.""" 20 | 21 | query: str 22 | """The search term to feed into a web (or file) search.""" 23 | 24 | 25 | class FinancialSearchPlan(BaseModel): 26 | searches: list[FinancialSearchItem] 27 | """A list of searches to perform.""" 28 | 29 | 30 | planner_agent = Agent( 31 | name="FinancialPlannerAgent", 32 | instructions=PROMPT, 33 | model="o3-mini", 34 | output_type=FinancialSearchPlan, 35 | ) 36 | -------------------------------------------------------------------------------- /examples/financial_research_agent/agents/risk_agent.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from agents import Agent 4 | 5 | # A sub‑agent specializing in identifying risk factors or concerns. 6 | RISK_PROMPT = ( 7 | "You are a risk analyst looking for potential red flags in a company's outlook. " 8 | "Given background research, produce a short analysis of risks such as competitive threats, " 9 | "regulatory issues, supply chain problems, or slowing growth. Keep it under 2 paragraphs." 10 | ) 11 | 12 | 13 | class AnalysisSummary(BaseModel): 14 | summary: str 15 | """Short text summary for this aspect of the analysis.""" 16 | 17 | 18 | risk_agent = Agent( 19 | name="RiskAnalystAgent", 20 | instructions=RISK_PROMPT, 21 | output_type=AnalysisSummary, 22 | ) 23 | -------------------------------------------------------------------------------- /examples/financial_research_agent/agents/search_agent.py: -------------------------------------------------------------------------------- 1 | from agents import Agent, WebSearchTool 2 | from agents.model_settings import ModelSettings 3 | 4 | # Given a search term, use web search to pull back a brief summary. 5 | # Summaries should be concise but capture the main financial points. 6 | INSTRUCTIONS = ( 7 | "You are a research assistant specializing in financial topics. " 8 | "Given a search term, use web search to retrieve up‑to‑date context and " 9 | "produce a short summary of at most 300 words. Focus on key numbers, events, " 10 | "or quotes that will be useful to a financial analyst." 11 | ) 12 | 13 | search_agent = Agent( 14 | name="FinancialSearchAgent", 15 | instructions=INSTRUCTIONS, 16 | tools=[WebSearchTool()], 17 | model_settings=ModelSettings(tool_choice="required"), 18 | ) 19 | -------------------------------------------------------------------------------- /examples/financial_research_agent/agents/verifier_agent.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from agents import Agent 4 | 5 | # Agent to sanity‑check a synthesized report for consistency and recall. 6 | # This can be used to flag potential gaps or obvious mistakes. 7 | VERIFIER_PROMPT = ( 8 | "You are a meticulous auditor. You have been handed a financial analysis report. " 9 | "Your job is to verify the report is internally consistent, clearly sourced, and makes " 10 | "no unsupported claims. Point out any issues or uncertainties." 11 | ) 12 | 13 | 14 | class VerificationResult(BaseModel): 15 | verified: bool 16 | """Whether the report seems coherent and plausible.""" 17 | 18 | issues: str 19 | """If not verified, describe the main issues or concerns.""" 20 | 21 | 22 | verifier_agent = Agent( 23 | name="VerificationAgent", 24 | instructions=VERIFIER_PROMPT, 25 | model="gpt-4o", 26 | output_type=VerificationResult, 27 | ) 28 | -------------------------------------------------------------------------------- /examples/financial_research_agent/agents/writer_agent.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from agents import Agent 4 | 5 | # Writer agent brings together the raw search results and optionally calls out 6 | # to sub‑analyst tools for specialized commentary, then returns a cohesive markdown report. 7 | WRITER_PROMPT = ( 8 | "You are a senior financial analyst. You will be provided with the original query and " 9 | "a set of raw search summaries. Your task is to synthesize these into a long‑form markdown " 10 | "report (at least several paragraphs) including a short executive summary and follow‑up " 11 | "questions. If needed, you can call the available analysis tools (e.g. fundamentals_analysis, " 12 | "risk_analysis) to get short specialist write‑ups to incorporate." 13 | ) 14 | 15 | 16 | class FinancialReportData(BaseModel): 17 | short_summary: str 18 | """A short 2‑3 sentence executive summary.""" 19 | 20 | markdown_report: str 21 | """The full markdown report.""" 22 | 23 | follow_up_questions: list[str] 24 | """Suggested follow‑up questions for further research.""" 25 | 26 | 27 | # Note: We will attach handoffs to specialist analyst agents at runtime in the manager. 28 | # This shows how an agent can use handoffs to delegate to specialized subagents. 29 | writer_agent = Agent( 30 | name="FinancialWriterAgent", 31 | instructions=WRITER_PROMPT, 32 | model="gpt-4.5-preview-2025-02-27", 33 | output_type=FinancialReportData, 34 | ) 35 | -------------------------------------------------------------------------------- /examples/financial_research_agent/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from .manager import FinancialResearchManager 4 | 5 | 6 | # Entrypoint for the financial bot example. 7 | # Run this as `python -m examples.financial_research_agent.main` and enter a 8 | # financial research query, for example: 9 | # "Write up an analysis of Apple Inc.'s most recent quarter." 10 | async def main() -> None: 11 | query = input("Enter a financial research query: ") 12 | mgr = FinancialResearchManager() 13 | await mgr.run(query) 14 | 15 | 16 | if __name__ == "__main__": 17 | asyncio.run(main()) 18 | -------------------------------------------------------------------------------- /examples/financial_research_agent/printer.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from rich.console import Console, Group 4 | from rich.live import Live 5 | from rich.spinner import Spinner 6 | 7 | 8 | class Printer: 9 | """ 10 | Simple wrapper to stream status updates. Used by the financial bot 11 | manager as it orchestrates planning, search and writing. 12 | """ 13 | 14 | def __init__(self, console: Console) -> None: 15 | self.live = Live(console=console) 16 | self.items: dict[str, tuple[str, bool]] = {} 17 | self.hide_done_ids: set[str] = set() 18 | self.live.start() 19 | 20 | def end(self) -> None: 21 | self.live.stop() 22 | 23 | def hide_done_checkmark(self, item_id: str) -> None: 24 | self.hide_done_ids.add(item_id) 25 | 26 | def update_item( 27 | self, item_id: str, content: str, is_done: bool = False, hide_checkmark: bool = False 28 | ) -> None: 29 | self.items[item_id] = (content, is_done) 30 | if hide_checkmark: 31 | self.hide_done_ids.add(item_id) 32 | self.flush() 33 | 34 | def mark_item_done(self, item_id: str) -> None: 35 | self.items[item_id] = (self.items[item_id][0], True) 36 | self.flush() 37 | 38 | def flush(self) -> None: 39 | renderables: list[Any] = [] 40 | for item_id, (content, is_done) in self.items.items(): 41 | if is_done: 42 | prefix = "✅ " if item_id not in self.hide_done_ids else "" 43 | renderables.append(prefix + content) 44 | else: 45 | renderables.append(Spinner("dots", text=content)) 46 | self.live.update(Group(*renderables)) 47 | -------------------------------------------------------------------------------- /examples/hosted_mcp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/examples/hosted_mcp/__init__.py -------------------------------------------------------------------------------- /examples/hosted_mcp/approvals.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import asyncio 3 | 4 | from agents import ( 5 | Agent, 6 | HostedMCPTool, 7 | MCPToolApprovalFunctionResult, 8 | MCPToolApprovalRequest, 9 | Runner, 10 | ) 11 | 12 | """This example demonstrates how to use the hosted MCP support in the OpenAI Responses API, with 13 | approval callbacks.""" 14 | 15 | 16 | def approval_callback(request: MCPToolApprovalRequest) -> MCPToolApprovalFunctionResult: 17 | answer = input(f"Approve running the tool `{request.data.name}`? (y/n) ") 18 | result: MCPToolApprovalFunctionResult = {"approve": answer == "y"} 19 | if not result["approve"]: 20 | result["reason"] = "User denied" 21 | return result 22 | 23 | 24 | async def main(verbose: bool, stream: bool): 25 | agent = Agent( 26 | name="Assistant", 27 | tools=[ 28 | HostedMCPTool( 29 | tool_config={ 30 | "type": "mcp", 31 | "server_label": "gitmcp", 32 | "server_url": "https://gitmcp.io/openai/codex", 33 | "require_approval": "always", 34 | }, 35 | on_approval_request=approval_callback, 36 | ) 37 | ], 38 | ) 39 | 40 | if stream: 41 | result = Runner.run_streamed(agent, "Which language is this repo written in?") 42 | async for event in result.stream_events(): 43 | if event.type == "run_item_stream_event": 44 | print(f"Got event of type {event.item.__class__.__name__}") 45 | print(f"Done streaming; final result: {result.final_output}") 46 | else: 47 | res = await Runner.run(agent, "Which language is this repo written in?") 48 | print(res.final_output) 49 | 50 | if verbose: 51 | for item in result.new_items: 52 | print(item) 53 | 54 | 55 | if __name__ == "__main__": 56 | parser = argparse.ArgumentParser() 57 | parser.add_argument("--verbose", action="store_true", default=False) 58 | parser.add_argument("--stream", action="store_true", default=False) 59 | args = parser.parse_args() 60 | 61 | asyncio.run(main(args.verbose, args.stream)) 62 | -------------------------------------------------------------------------------- /examples/hosted_mcp/simple.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import asyncio 3 | 4 | from agents import Agent, HostedMCPTool, Runner 5 | 6 | """This example demonstrates how to use the hosted MCP support in the OpenAI Responses API, with 7 | approvals not required for any tools. You should only use this for trusted MCP servers.""" 8 | 9 | 10 | async def main(verbose: bool, stream: bool): 11 | agent = Agent( 12 | name="Assistant", 13 | tools=[ 14 | HostedMCPTool( 15 | tool_config={ 16 | "type": "mcp", 17 | "server_label": "gitmcp", 18 | "server_url": "https://gitmcp.io/openai/codex", 19 | "require_approval": "never", 20 | } 21 | ) 22 | ], 23 | ) 24 | 25 | if stream: 26 | result = Runner.run_streamed(agent, "Which language is this repo written in?") 27 | async for event in result.stream_events(): 28 | if event.type == "run_item_stream_event": 29 | print(f"Got event of type {event.item.__class__.__name__}") 30 | print(f"Done streaming; final result: {result.final_output}") 31 | else: 32 | res = await Runner.run(agent, "Which language is this repo written in?") 33 | print(res.final_output) 34 | # The repository is primarily written in multiple languages, including Rust and TypeScript... 35 | 36 | if verbose: 37 | for item in result.new_items: 38 | print(item) 39 | 40 | 41 | if __name__ == "__main__": 42 | parser = argparse.ArgumentParser() 43 | parser.add_argument("--verbose", action="store_true", default=False) 44 | parser.add_argument("--stream", action="store_true", default=False) 45 | args = parser.parse_args() 46 | 47 | asyncio.run(main(args.verbose, args.stream)) 48 | -------------------------------------------------------------------------------- /examples/mcp/filesystem_example/README.md: -------------------------------------------------------------------------------- 1 | # MCP Filesystem Example 2 | 3 | This example uses the [filesystem MCP server](https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem), running locally via `npx`. 4 | 5 | Run it via: 6 | 7 | ``` 8 | uv run python examples/mcp/filesystem_example/main.py 9 | ``` 10 | 11 | ## Details 12 | 13 | The example uses the `MCPServerStdio` class from `agents.mcp`, with the command: 14 | 15 | ```bash 16 | npx -y "@modelcontextprotocol/server-filesystem" 17 | ``` 18 | 19 | It's only given access to the `sample_files` directory adjacent to the example, which contains some sample data. 20 | 21 | Under the hood: 22 | 23 | 1. The server is spun up in a subprocess, and exposes a bunch of tools like `list_directory()`, `read_file()`, etc. 24 | 2. We add the server instance to the Agent via `mcp_agents`. 25 | 3. Each time the agent runs, we call out to the MCP server to fetch the list of tools via `server.list_tools()`. 26 | 4. If the LLM chooses to use an MCP tool, we call the MCP server to run the tool via `server.run_tool()`. 27 | -------------------------------------------------------------------------------- /examples/mcp/filesystem_example/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import shutil 4 | 5 | from agents import Agent, Runner, gen_trace_id, trace 6 | from agents.mcp import MCPServer, MCPServerStdio 7 | 8 | 9 | async def run(mcp_server: MCPServer): 10 | agent = Agent( 11 | name="Assistant", 12 | instructions="Use the tools to read the filesystem and answer questions based on those files.", 13 | mcp_servers=[mcp_server], 14 | ) 15 | 16 | # List the files it can read 17 | message = "Read the files and list them." 18 | print(f"Running: {message}") 19 | result = await Runner.run(starting_agent=agent, input=message) 20 | print(result.final_output) 21 | 22 | # Ask about books 23 | message = "What is my #1 favorite book?" 24 | print(f"\n\nRunning: {message}") 25 | result = await Runner.run(starting_agent=agent, input=message) 26 | print(result.final_output) 27 | 28 | # Ask a question that reads then reasons. 29 | message = "Look at my favorite songs. Suggest one new song that I might like." 30 | print(f"\n\nRunning: {message}") 31 | result = await Runner.run(starting_agent=agent, input=message) 32 | print(result.final_output) 33 | 34 | 35 | async def main(): 36 | current_dir = os.path.dirname(os.path.abspath(__file__)) 37 | samples_dir = os.path.join(current_dir, "sample_files") 38 | 39 | async with MCPServerStdio( 40 | name="Filesystem Server, via npx", 41 | params={ 42 | "command": "npx", 43 | "args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir], 44 | }, 45 | ) as server: 46 | trace_id = gen_trace_id() 47 | with trace(workflow_name="MCP Filesystem Example", trace_id=trace_id): 48 | print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n") 49 | await run(server) 50 | 51 | 52 | if __name__ == "__main__": 53 | # Let's make sure the user has npx installed 54 | if not shutil.which("npx"): 55 | raise RuntimeError("npx is not installed. Please install it with `npm install -g npx`.") 56 | 57 | asyncio.run(main()) 58 | -------------------------------------------------------------------------------- /examples/mcp/filesystem_example/sample_files/favorite_books.txt: -------------------------------------------------------------------------------- 1 | 1. To Kill a Mockingbird – Harper Lee 2 | 2. Pride and Prejudice – Jane Austen 3 | 3. 1984 – George Orwell 4 | 4. The Hobbit – J.R.R. Tolkien 5 | 5. Harry Potter and the Sorcerer’s Stone – J.K. Rowling 6 | 6. The Great Gatsby – F. Scott Fitzgerald 7 | 7. Charlotte’s Web – E.B. White 8 | 8. Anne of Green Gables – Lucy Maud Montgomery 9 | 9. The Alchemist – Paulo Coelho 10 | 10. Little Women – Louisa May Alcott 11 | 11. The Catcher in the Rye – J.D. Salinger 12 | 12. Animal Farm – George Orwell 13 | 13. The Chronicles of Narnia: The Lion, the Witch, and the Wardrobe – C.S. Lewis 14 | 14. The Book Thief – Markus Zusak 15 | 15. A Wrinkle in Time – Madeleine L’Engle 16 | 16. The Secret Garden – Frances Hodgson Burnett 17 | 17. Moby-Dick – Herman Melville 18 | 18. Fahrenheit 451 – Ray Bradbury 19 | 19. Jane Eyre – Charlotte Brontë 20 | 20. The Little Prince – Antoine de Saint-Exupéry -------------------------------------------------------------------------------- /examples/mcp/filesystem_example/sample_files/favorite_cities.txt: -------------------------------------------------------------------------------- 1 | - In the summer, I love visiting London. 2 | - In the winter, Tokyo is great. 3 | - In the spring, San Francisco. 4 | - In the fall, New York is the best. -------------------------------------------------------------------------------- /examples/mcp/filesystem_example/sample_files/favorite_songs.txt: -------------------------------------------------------------------------------- 1 | 1. "Here Comes the Sun" – The Beatles 2 | 2. "Imagine" – John Lennon 3 | 3. "Bohemian Rhapsody" – Queen 4 | 4. "Shake It Off" – Taylor Swift 5 | 5. "Billie Jean" – Michael Jackson 6 | 6. "Uptown Funk" – Mark Ronson ft. Bruno Mars 7 | 7. "Don’t Stop Believin’" – Journey 8 | 8. "Dancing Queen" – ABBA 9 | 9. "Happy" – Pharrell Williams 10 | 10. "Wonderwall" – Oasis 11 | -------------------------------------------------------------------------------- /examples/mcp/git_example/README.md: -------------------------------------------------------------------------------- 1 | # MCP Git Example 2 | 3 | This example uses the [git MCP server](https://github.com/modelcontextprotocol/servers/tree/main/src/git), running locally via `uvx`. 4 | 5 | Run it via: 6 | 7 | ``` 8 | uv run python examples/mcp/git_example/main.py 9 | ``` 10 | 11 | ## Details 12 | 13 | The example uses the `MCPServerStdio` class from `agents.mcp`, with the command: 14 | 15 | ```bash 16 | uvx mcp-server-git 17 | ``` 18 | 19 | Prior to running the agent, the user is prompted to provide a local directory path to their git repo. Using that, the Agent can invoke Git MCP tools like `git_log` to inspect the git commit log. 20 | 21 | Under the hood: 22 | 23 | 1. The server is spun up in a subprocess, and exposes a bunch of tools like `git_log()` 24 | 2. We add the server instance to the Agent via `mcp_agents`. 25 | 3. Each time the agent runs, we call out to the MCP server to fetch the list of tools via `server.list_tools()`. The result is cached. 26 | 4. If the LLM chooses to use an MCP tool, we call the MCP server to run the tool via `server.run_tool()`. 27 | -------------------------------------------------------------------------------- /examples/mcp/git_example/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import shutil 3 | 4 | from agents import Agent, Runner, trace 5 | from agents.mcp import MCPServer, MCPServerStdio 6 | 7 | 8 | async def run(mcp_server: MCPServer, directory_path: str): 9 | agent = Agent( 10 | name="Assistant", 11 | instructions=f"Answer questions about the git repository at {directory_path}, use that for repo_path", 12 | mcp_servers=[mcp_server], 13 | ) 14 | 15 | message = "Who's the most frequent contributor?" 16 | print("\n" + "-" * 40) 17 | print(f"Running: {message}") 18 | result = await Runner.run(starting_agent=agent, input=message) 19 | print(result.final_output) 20 | 21 | message = "Summarize the last change in the repository." 22 | print("\n" + "-" * 40) 23 | print(f"Running: {message}") 24 | result = await Runner.run(starting_agent=agent, input=message) 25 | print(result.final_output) 26 | 27 | 28 | async def main(): 29 | # Ask the user for the directory path 30 | directory_path = input("Please enter the path to the git repository: ") 31 | 32 | async with MCPServerStdio( 33 | cache_tools_list=True, # Cache the tools list, for demonstration 34 | params={"command": "uvx", "args": ["mcp-server-git"]}, 35 | ) as server: 36 | with trace(workflow_name="MCP Git Example"): 37 | await run(server, directory_path) 38 | 39 | 40 | if __name__ == "__main__": 41 | if not shutil.which("uvx"): 42 | raise RuntimeError("uvx is not installed. Please install it with `pip install uvx`.") 43 | 44 | asyncio.run(main()) 45 | -------------------------------------------------------------------------------- /examples/mcp/sse_example/README.md: -------------------------------------------------------------------------------- 1 | # MCP SSE Example 2 | 3 | This example uses a local SSE server in [server.py](server.py). 4 | 5 | Run the example via: 6 | 7 | ``` 8 | uv run python examples/mcp/sse_example/main.py 9 | ``` 10 | 11 | ## Details 12 | 13 | The example uses the `MCPServerSse` class from `agents.mcp`. The server runs in a sub-process at `https://localhost:8000/sse`. 14 | -------------------------------------------------------------------------------- /examples/mcp/sse_example/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import shutil 4 | import subprocess 5 | import time 6 | from typing import Any 7 | 8 | from agents import Agent, Runner, gen_trace_id, trace 9 | from agents.mcp import MCPServer, MCPServerSse 10 | from agents.model_settings import ModelSettings 11 | 12 | 13 | async def run(mcp_server: MCPServer): 14 | agent = Agent( 15 | name="Assistant", 16 | instructions="Use the tools to answer the questions.", 17 | mcp_servers=[mcp_server], 18 | model_settings=ModelSettings(tool_choice="required"), 19 | ) 20 | 21 | # Use the `add` tool to add two numbers 22 | message = "Add these numbers: 7 and 22." 23 | print(f"Running: {message}") 24 | result = await Runner.run(starting_agent=agent, input=message) 25 | print(result.final_output) 26 | 27 | # Run the `get_weather` tool 28 | message = "What's the weather in Tokyo?" 29 | print(f"\n\nRunning: {message}") 30 | result = await Runner.run(starting_agent=agent, input=message) 31 | print(result.final_output) 32 | 33 | # Run the `get_secret_word` tool 34 | message = "What's the secret word?" 35 | print(f"\n\nRunning: {message}") 36 | result = await Runner.run(starting_agent=agent, input=message) 37 | print(result.final_output) 38 | 39 | 40 | async def main(): 41 | async with MCPServerSse( 42 | name="SSE Python Server", 43 | params={ 44 | "url": "http://localhost:8000/sse", 45 | }, 46 | ) as server: 47 | trace_id = gen_trace_id() 48 | with trace(workflow_name="SSE Example", trace_id=trace_id): 49 | print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n") 50 | await run(server) 51 | 52 | 53 | if __name__ == "__main__": 54 | # Let's make sure the user has uv installed 55 | if not shutil.which("uv"): 56 | raise RuntimeError( 57 | "uv is not installed. Please install it: https://docs.astral.sh/uv/getting-started/installation/" 58 | ) 59 | 60 | # We'll run the SSE server in a subprocess. Usually this would be a remote server, but for this 61 | # demo, we'll run it locally at http://localhost:8000/sse 62 | process: subprocess.Popen[Any] | None = None 63 | try: 64 | this_dir = os.path.dirname(os.path.abspath(__file__)) 65 | server_file = os.path.join(this_dir, "server.py") 66 | 67 | print("Starting SSE server at http://localhost:8000/sse ...") 68 | 69 | # Run `uv run server.py` to start the SSE server 70 | process = subprocess.Popen(["uv", "run", server_file]) 71 | # Give it 3 seconds to start 72 | time.sleep(3) 73 | 74 | print("SSE server started. Running example...\n\n") 75 | except Exception as e: 76 | print(f"Error starting SSE server: {e}") 77 | exit(1) 78 | 79 | try: 80 | asyncio.run(main()) 81 | finally: 82 | if process: 83 | process.terminate() 84 | -------------------------------------------------------------------------------- /examples/mcp/sse_example/server.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import requests 4 | from mcp.server.fastmcp import FastMCP 5 | 6 | # Create server 7 | mcp = FastMCP("Echo Server") 8 | 9 | 10 | @mcp.tool() 11 | def add(a: int, b: int) -> int: 12 | """Add two numbers""" 13 | print(f"[debug-server] add({a}, {b})") 14 | return a + b 15 | 16 | 17 | @mcp.tool() 18 | def get_secret_word() -> str: 19 | print("[debug-server] get_secret_word()") 20 | return random.choice(["apple", "banana", "cherry"]) 21 | 22 | 23 | @mcp.tool() 24 | def get_current_weather(city: str) -> str: 25 | print(f"[debug-server] get_current_weather({city})") 26 | 27 | endpoint = "https://wttr.in" 28 | response = requests.get(f"{endpoint}/{city}") 29 | return response.text 30 | 31 | 32 | if __name__ == "__main__": 33 | mcp.run(transport="sse") 34 | -------------------------------------------------------------------------------- /examples/mcp/streamablehttp_example/README.md: -------------------------------------------------------------------------------- 1 | # MCP Streamable HTTP Example 2 | 3 | This example uses a local Streamable HTTP server in [server.py](server.py). 4 | 5 | Run the example via: 6 | 7 | ``` 8 | uv run python examples/mcp/streamablehttp_example/main.py 9 | ``` 10 | 11 | ## Details 12 | 13 | The example uses the `MCPServerStreamableHttp` class from `agents.mcp`. The server runs in a sub-process at `https://localhost:8000/mcp`. 14 | -------------------------------------------------------------------------------- /examples/mcp/streamablehttp_example/server.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import requests 4 | from mcp.server.fastmcp import FastMCP 5 | 6 | # Create server 7 | mcp = FastMCP("Echo Server") 8 | 9 | 10 | @mcp.tool() 11 | def add(a: int, b: int) -> int: 12 | """Add two numbers""" 13 | print(f"[debug-server] add({a}, {b})") 14 | return a + b 15 | 16 | 17 | @mcp.tool() 18 | def get_secret_word() -> str: 19 | print("[debug-server] get_secret_word()") 20 | return random.choice(["apple", "banana", "cherry"]) 21 | 22 | 23 | @mcp.tool() 24 | def get_current_weather(city: str) -> str: 25 | print(f"[debug-server] get_current_weather({city})") 26 | 27 | endpoint = "https://wttr.in" 28 | response = requests.get(f"{endpoint}/{city}") 29 | return response.text 30 | 31 | 32 | if __name__ == "__main__": 33 | mcp.run(transport="streamable-http") 34 | -------------------------------------------------------------------------------- /examples/model_providers/README.md: -------------------------------------------------------------------------------- 1 | # Custom LLM providers 2 | 3 | The examples in this directory demonstrate how you might use a non-OpenAI LLM provider. To run them, first set a base URL, API key and model. 4 | 5 | ```bash 6 | export EXAMPLE_BASE_URL="..." 7 | export EXAMPLE_API_KEY="..." 8 | export EXAMPLE_MODEL_NAME"..." 9 | ``` 10 | 11 | Then run the examples, e.g.: 12 | 13 | ``` 14 | python examples/model_providers/custom_example_provider.py 15 | 16 | Loops within themselves, 17 | Function calls its own being, 18 | Depth without ending. 19 | ``` 20 | -------------------------------------------------------------------------------- /examples/model_providers/custom_example_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | 4 | from openai import AsyncOpenAI 5 | 6 | from agents import Agent, OpenAIChatCompletionsModel, Runner, function_tool, set_tracing_disabled 7 | 8 | BASE_URL = os.getenv("EXAMPLE_BASE_URL") or "" 9 | API_KEY = os.getenv("EXAMPLE_API_KEY") or "" 10 | MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "" 11 | 12 | if not BASE_URL or not API_KEY or not MODEL_NAME: 13 | raise ValueError( 14 | "Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code." 15 | ) 16 | 17 | """This example uses a custom provider for a specific agent. Steps: 18 | 1. Create a custom OpenAI client. 19 | 2. Create a `Model` that uses the custom client. 20 | 3. Set the `model` on the Agent. 21 | 22 | Note that in this example, we disable tracing under the assumption that you don't have an API key 23 | from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var 24 | or call set_tracing_export_api_key() to set a tracing specific key. 25 | """ 26 | client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY) 27 | set_tracing_disabled(disabled=True) 28 | 29 | # An alternate approach that would also work: 30 | # PROVIDER = OpenAIProvider(openai_client=client) 31 | # agent = Agent(..., model="some-custom-model") 32 | # Runner.run(agent, ..., run_config=RunConfig(model_provider=PROVIDER)) 33 | 34 | 35 | @function_tool 36 | def get_weather(city: str): 37 | print(f"[debug] getting weather for {city}") 38 | return f"The weather in {city} is sunny." 39 | 40 | 41 | async def main(): 42 | # This agent will use the custom LLM provider 43 | agent = Agent( 44 | name="Assistant", 45 | instructions="You only respond in haikus.", 46 | model=OpenAIChatCompletionsModel(model=MODEL_NAME, openai_client=client), 47 | tools=[get_weather], 48 | ) 49 | 50 | result = await Runner.run(agent, "What's the weather in Tokyo?") 51 | print(result.final_output) 52 | 53 | 54 | if __name__ == "__main__": 55 | asyncio.run(main()) 56 | -------------------------------------------------------------------------------- /examples/model_providers/custom_example_global.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | 4 | from openai import AsyncOpenAI 5 | 6 | from agents import ( 7 | Agent, 8 | Runner, 9 | function_tool, 10 | set_default_openai_api, 11 | set_default_openai_client, 12 | set_tracing_disabled, 13 | ) 14 | 15 | BASE_URL = os.getenv("EXAMPLE_BASE_URL") or "" 16 | API_KEY = os.getenv("EXAMPLE_API_KEY") or "" 17 | MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "" 18 | 19 | if not BASE_URL or not API_KEY or not MODEL_NAME: 20 | raise ValueError( 21 | "Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code." 22 | ) 23 | 24 | 25 | """This example uses a custom provider for all requests by default. We do three things: 26 | 1. Create a custom client. 27 | 2. Set it as the default OpenAI client, and don't use it for tracing. 28 | 3. Set the default API as Chat Completions, as most LLM providers don't yet support Responses API. 29 | 30 | Note that in this example, we disable tracing under the assumption that you don't have an API key 31 | from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var 32 | or call set_tracing_export_api_key() to set a tracing specific key. 33 | """ 34 | 35 | client = AsyncOpenAI( 36 | base_url=BASE_URL, 37 | api_key=API_KEY, 38 | ) 39 | set_default_openai_client(client=client, use_for_tracing=False) 40 | set_default_openai_api("chat_completions") 41 | set_tracing_disabled(disabled=True) 42 | 43 | 44 | @function_tool 45 | def get_weather(city: str): 46 | print(f"[debug] getting weather for {city}") 47 | return f"The weather in {city} is sunny." 48 | 49 | 50 | async def main(): 51 | agent = Agent( 52 | name="Assistant", 53 | instructions="You only respond in haikus.", 54 | model=MODEL_NAME, 55 | tools=[get_weather], 56 | ) 57 | 58 | result = await Runner.run(agent, "What's the weather in Tokyo?") 59 | print(result.final_output) 60 | 61 | 62 | if __name__ == "__main__": 63 | asyncio.run(main()) 64 | -------------------------------------------------------------------------------- /examples/model_providers/custom_example_provider.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import os 5 | 6 | from openai import AsyncOpenAI 7 | 8 | from agents import ( 9 | Agent, 10 | Model, 11 | ModelProvider, 12 | OpenAIChatCompletionsModel, 13 | RunConfig, 14 | Runner, 15 | function_tool, 16 | set_tracing_disabled, 17 | ) 18 | 19 | BASE_URL = os.getenv("EXAMPLE_BASE_URL") or "" 20 | API_KEY = os.getenv("EXAMPLE_API_KEY") or "" 21 | MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "" 22 | 23 | if not BASE_URL or not API_KEY or not MODEL_NAME: 24 | raise ValueError( 25 | "Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code." 26 | ) 27 | 28 | 29 | """This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for 30 | others. Steps: 31 | 1. Create a custom OpenAI client. 32 | 2. Create a ModelProvider that uses the custom client. 33 | 3. Use the ModelProvider in calls to Runner.run(), only when we want to use the custom LLM provider. 34 | 35 | Note that in this example, we disable tracing under the assumption that you don't have an API key 36 | from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var 37 | or call set_tracing_export_api_key() to set a tracing specific key. 38 | """ 39 | client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY) 40 | set_tracing_disabled(disabled=True) 41 | 42 | 43 | class CustomModelProvider(ModelProvider): 44 | def get_model(self, model_name: str | None) -> Model: 45 | return OpenAIChatCompletionsModel(model=model_name or MODEL_NAME, openai_client=client) 46 | 47 | 48 | CUSTOM_MODEL_PROVIDER = CustomModelProvider() 49 | 50 | 51 | @function_tool 52 | def get_weather(city: str): 53 | print(f"[debug] getting weather for {city}") 54 | return f"The weather in {city} is sunny." 55 | 56 | 57 | async def main(): 58 | agent = Agent(name="Assistant", instructions="You only respond in haikus.", tools=[get_weather]) 59 | 60 | # This will use the custom model provider 61 | result = await Runner.run( 62 | agent, 63 | "What's the weather in Tokyo?", 64 | run_config=RunConfig(model_provider=CUSTOM_MODEL_PROVIDER), 65 | ) 66 | print(result.final_output) 67 | 68 | # If you uncomment this, it will use OpenAI directly, not the custom provider 69 | # result = await Runner.run( 70 | # agent, 71 | # "What's the weather in Tokyo?", 72 | # ) 73 | # print(result.final_output) 74 | 75 | 76 | if __name__ == "__main__": 77 | asyncio.run(main()) 78 | -------------------------------------------------------------------------------- /examples/model_providers/litellm_auto.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | 5 | from agents import Agent, Runner, function_tool, set_tracing_disabled 6 | 7 | """This example uses the built-in support for LiteLLM. To use this, ensure you have the 8 | ANTHROPIC_API_KEY environment variable set. 9 | """ 10 | 11 | set_tracing_disabled(disabled=True) 12 | 13 | 14 | @function_tool 15 | def get_weather(city: str): 16 | print(f"[debug] getting weather for {city}") 17 | return f"The weather in {city} is sunny." 18 | 19 | 20 | async def main(): 21 | agent = Agent( 22 | name="Assistant", 23 | instructions="You only respond in haikus.", 24 | # We prefix with litellm/ to tell the Runner to use the LitellmModel 25 | model="litellm/anthropic/claude-3-5-sonnet-20240620", 26 | tools=[get_weather], 27 | ) 28 | 29 | result = await Runner.run(agent, "What's the weather in Tokyo?") 30 | print(result.final_output) 31 | 32 | 33 | if __name__ == "__main__": 34 | import os 35 | 36 | if os.getenv("ANTHROPIC_API_KEY") is None: 37 | raise ValueError( 38 | "ANTHROPIC_API_KEY is not set. Please set it the environment variable and try again." 39 | ) 40 | 41 | asyncio.run(main()) 42 | -------------------------------------------------------------------------------- /examples/model_providers/litellm_provider.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | 5 | from agents import Agent, Runner, function_tool, set_tracing_disabled 6 | from agents.extensions.models.litellm_model import LitellmModel 7 | 8 | """This example uses the LitellmModel directly, to hit any model provider. 9 | You can run it like this: 10 | uv run examples/model_providers/litellm_provider.py --model anthropic/claude-3-5-sonnet-20240620 11 | or 12 | uv run examples/model_providers/litellm_provider.py --model gemini/gemini-2.0-flash 13 | 14 | Find more providers here: https://docs.litellm.ai/docs/providers 15 | """ 16 | 17 | set_tracing_disabled(disabled=True) 18 | 19 | 20 | @function_tool 21 | def get_weather(city: str): 22 | print(f"[debug] getting weather for {city}") 23 | return f"The weather in {city} is sunny." 24 | 25 | 26 | async def main(model: str, api_key: str): 27 | agent = Agent( 28 | name="Assistant", 29 | instructions="You only respond in haikus.", 30 | model=LitellmModel(model=model, api_key=api_key), 31 | tools=[get_weather], 32 | ) 33 | 34 | result = await Runner.run(agent, "What's the weather in Tokyo?") 35 | print(result.final_output) 36 | 37 | 38 | if __name__ == "__main__": 39 | # First try to get model/api key from args 40 | import argparse 41 | 42 | parser = argparse.ArgumentParser() 43 | parser.add_argument("--model", type=str, required=False) 44 | parser.add_argument("--api-key", type=str, required=False) 45 | args = parser.parse_args() 46 | 47 | model = args.model 48 | if not model: 49 | model = input("Enter a model name for Litellm: ") 50 | 51 | api_key = args.api_key 52 | if not api_key: 53 | api_key = input("Enter an API key for Litellm: ") 54 | 55 | asyncio.run(main(model, api_key)) 56 | -------------------------------------------------------------------------------- /examples/research_bot/README.md: -------------------------------------------------------------------------------- 1 | # Research bot 2 | 3 | This is a simple example of a multi-agent research bot. To run it: 4 | 5 | ```bash 6 | python -m examples.research_bot.main 7 | ``` 8 | 9 | ## Architecture 10 | 11 | The flow is: 12 | 13 | 1. User enters their research topic 14 | 2. `planner_agent` comes up with a plan to search the web for information. The plan is a list of search queries, with a search term and a reason for each query. 15 | 3. For each search item, we run a `search_agent`, which uses the Web Search tool to search for that term and summarize the results. These all run in parallel. 16 | 4. Finally, the `writer_agent` receives the search summaries, and creates a written report. 17 | 18 | ## Suggested improvements 19 | 20 | If you're building your own research bot, some ideas to add to this are: 21 | 22 | 1. Retrieval: Add support for fetching relevant information from a vector store. You could use the File Search tool for this. 23 | 2. Image and file upload: Allow users to attach PDFs or other files, as baseline context for the research. 24 | 3. More planning and thinking: Models often produce better results given more time to think. Improve the planning process to come up with a better plan, and add an evaluation step so that the model can choose to improve its results, search for more stuff, etc. 25 | 4. Code execution: Allow running code, which is useful for data analysis. 26 | -------------------------------------------------------------------------------- /examples/research_bot/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /examples/research_bot/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/examples/research_bot/agents/__init__.py -------------------------------------------------------------------------------- /examples/research_bot/agents/planner_agent.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from agents import Agent 4 | 5 | PROMPT = ( 6 | "You are a helpful research assistant. Given a query, come up with a set of web searches " 7 | "to perform to best answer the query. Output between 5 and 20 terms to query for." 8 | ) 9 | 10 | 11 | class WebSearchItem(BaseModel): 12 | reason: str 13 | "Your reasoning for why this search is important to the query." 14 | 15 | query: str 16 | "The search term to use for the web search." 17 | 18 | 19 | class WebSearchPlan(BaseModel): 20 | searches: list[WebSearchItem] 21 | """A list of web searches to perform to best answer the query.""" 22 | 23 | 24 | planner_agent = Agent( 25 | name="PlannerAgent", 26 | instructions=PROMPT, 27 | model="gpt-4o", 28 | output_type=WebSearchPlan, 29 | ) 30 | -------------------------------------------------------------------------------- /examples/research_bot/agents/search_agent.py: -------------------------------------------------------------------------------- 1 | from agents import Agent, WebSearchTool 2 | from agents.model_settings import ModelSettings 3 | 4 | INSTRUCTIONS = ( 5 | "You are a research assistant. Given a search term, you search the web for that term and " 6 | "produce a concise summary of the results. The summary must be 2-3 paragraphs and less than 300 " 7 | "words. Capture the main points. Write succinctly, no need to have complete sentences or good " 8 | "grammar. This will be consumed by someone synthesizing a report, so its vital you capture the " 9 | "essence and ignore any fluff. Do not include any additional commentary other than the summary " 10 | "itself." 11 | ) 12 | 13 | search_agent = Agent( 14 | name="Search agent", 15 | instructions=INSTRUCTIONS, 16 | tools=[WebSearchTool()], 17 | model_settings=ModelSettings(tool_choice="required"), 18 | ) 19 | -------------------------------------------------------------------------------- /examples/research_bot/agents/writer_agent.py: -------------------------------------------------------------------------------- 1 | # Agent used to synthesize a final report from the individual summaries. 2 | from pydantic import BaseModel 3 | 4 | from agents import Agent 5 | 6 | PROMPT = ( 7 | "You are a senior researcher tasked with writing a cohesive report for a research query. " 8 | "You will be provided with the original query, and some initial research done by a research " 9 | "assistant.\n" 10 | "You should first come up with an outline for the report that describes the structure and " 11 | "flow of the report. Then, generate the report and return that as your final output.\n" 12 | "The final output should be in markdown format, and it should be lengthy and detailed. Aim " 13 | "for 5-10 pages of content, at least 1000 words." 14 | ) 15 | 16 | 17 | class ReportData(BaseModel): 18 | short_summary: str 19 | """A short 2-3 sentence summary of the findings.""" 20 | 21 | markdown_report: str 22 | """The final report""" 23 | 24 | follow_up_questions: list[str] 25 | """Suggested topics to research further""" 26 | 27 | 28 | writer_agent = Agent( 29 | name="WriterAgent", 30 | instructions=PROMPT, 31 | model="o3-mini", 32 | output_type=ReportData, 33 | ) 34 | -------------------------------------------------------------------------------- /examples/research_bot/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from .manager import ResearchManager 4 | 5 | 6 | async def main() -> None: 7 | query = input("What would you like to research? ") 8 | await ResearchManager().run(query) 9 | 10 | 11 | if __name__ == "__main__": 12 | asyncio.run(main()) 13 | -------------------------------------------------------------------------------- /examples/research_bot/printer.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from rich.console import Console, Group 4 | from rich.live import Live 5 | from rich.spinner import Spinner 6 | 7 | 8 | class Printer: 9 | def __init__(self, console: Console): 10 | self.live = Live(console=console) 11 | self.items: dict[str, tuple[str, bool]] = {} 12 | self.hide_done_ids: set[str] = set() 13 | self.live.start() 14 | 15 | def end(self) -> None: 16 | self.live.stop() 17 | 18 | def hide_done_checkmark(self, item_id: str) -> None: 19 | self.hide_done_ids.add(item_id) 20 | 21 | def update_item( 22 | self, item_id: str, content: str, is_done: bool = False, hide_checkmark: bool = False 23 | ) -> None: 24 | self.items[item_id] = (content, is_done) 25 | if hide_checkmark: 26 | self.hide_done_ids.add(item_id) 27 | self.flush() 28 | 29 | def mark_item_done(self, item_id: str) -> None: 30 | self.items[item_id] = (self.items[item_id][0], True) 31 | self.flush() 32 | 33 | def flush(self) -> None: 34 | renderables: list[Any] = [] 35 | for item_id, (content, is_done) in self.items.items(): 36 | if is_done: 37 | prefix = "✅ " if item_id not in self.hide_done_ids else "" 38 | renderables.append(prefix + content) 39 | else: 40 | renderables.append(Spinner("dots", text=content)) 41 | self.live.update(Group(*renderables)) 42 | -------------------------------------------------------------------------------- /examples/tools/code_interpreter.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agents import Agent, CodeInterpreterTool, Runner, trace 4 | 5 | 6 | async def main(): 7 | agent = Agent( 8 | name="Code interpreter", 9 | instructions="You love doing math.", 10 | tools=[ 11 | CodeInterpreterTool( 12 | tool_config={"type": "code_interpreter", "container": {"type": "auto"}}, 13 | ) 14 | ], 15 | ) 16 | 17 | with trace("Code interpreter example"): 18 | print("Solving math problem...") 19 | result = Runner.run_streamed(agent, "What is the square root of273 * 312821 plus 1782?") 20 | async for event in result.stream_events(): 21 | if ( 22 | event.type == "run_item_stream_event" 23 | and event.item.type == "tool_call_item" 24 | and event.item.raw_item.type == "code_interpreter_call" 25 | ): 26 | print(f"Code interpreter code:\n```\n{event.item.raw_item.code}\n```\n") 27 | elif event.type == "run_item_stream_event": 28 | print(f"Other event: {event.item.type}") 29 | 30 | print(f"Final output: {result.final_output}") 31 | 32 | 33 | if __name__ == "__main__": 34 | asyncio.run(main()) 35 | -------------------------------------------------------------------------------- /examples/tools/file_search.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agents import Agent, FileSearchTool, Runner, trace 4 | 5 | 6 | async def main(): 7 | agent = Agent( 8 | name="File searcher", 9 | instructions="You are a helpful agent.", 10 | tools=[ 11 | FileSearchTool( 12 | max_num_results=3, 13 | vector_store_ids=["vs_67bf88953f748191be42b462090e53e7"], 14 | include_search_results=True, 15 | ) 16 | ], 17 | ) 18 | 19 | with trace("File search example"): 20 | result = await Runner.run( 21 | agent, "Be concise, and tell me 1 sentence about Arrakis I might not know." 22 | ) 23 | print(result.final_output) 24 | """ 25 | Arrakis, the desert planet in Frank Herbert's "Dune," was inspired by the scarcity of water 26 | as a metaphor for oil and other finite resources. 27 | """ 28 | 29 | print("\n".join([str(out) for out in result.new_items])) 30 | """ 31 | {"id":"...", "queries":["Arrakis"], "results":[...]} 32 | """ 33 | 34 | 35 | if __name__ == "__main__": 36 | asyncio.run(main()) 37 | -------------------------------------------------------------------------------- /examples/tools/image_generator.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import base64 3 | import os 4 | import subprocess 5 | import sys 6 | import tempfile 7 | 8 | from agents import Agent, ImageGenerationTool, Runner, trace 9 | 10 | 11 | def open_file(path: str) -> None: 12 | if sys.platform.startswith("darwin"): 13 | subprocess.run(["open", path], check=False) # macOS 14 | elif os.name == "nt": # Windows 15 | os.astartfile(path) # type: ignore 16 | elif os.name == "posix": 17 | subprocess.run(["xdg-open", path], check=False) # Linux/Unix 18 | else: 19 | print(f"Don't know how to open files on this platform: {sys.platform}") 20 | 21 | 22 | async def main(): 23 | agent = Agent( 24 | name="Image generator", 25 | instructions="You are a helpful agent.", 26 | tools=[ 27 | ImageGenerationTool( 28 | tool_config={"type": "image_generation", "quality": "low"}, 29 | ) 30 | ], 31 | ) 32 | 33 | with trace("Image generation example"): 34 | print("Generating image, this may take a while...") 35 | result = await Runner.run( 36 | agent, "Create an image of a frog eating a pizza, comic book style." 37 | ) 38 | print(result.final_output) 39 | for item in result.new_items: 40 | if ( 41 | item.type == "tool_call_item" 42 | and item.raw_item.type == "image_generation_call" 43 | and (img_result := item.raw_item.result) 44 | ): 45 | with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: 46 | tmp.write(base64.b64decode(img_result)) 47 | temp_path = tmp.name 48 | 49 | # Open the image 50 | open_file(temp_path) 51 | 52 | 53 | if __name__ == "__main__": 54 | asyncio.run(main()) 55 | -------------------------------------------------------------------------------- /examples/tools/web_search.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agents import Agent, Runner, WebSearchTool, trace 4 | 5 | 6 | async def main(): 7 | agent = Agent( 8 | name="Web searcher", 9 | instructions="You are a helpful agent.", 10 | tools=[WebSearchTool(user_location={"type": "approximate", "city": "New York"})], 11 | ) 12 | 13 | with trace("Web search example"): 14 | result = await Runner.run( 15 | agent, 16 | "search the web for 'local sports news' and give me 1 interesting update in a sentence.", 17 | ) 18 | print(result.final_output) 19 | # The New York Giants are reportedly pursuing quarterback Aaron Rodgers after his ... 20 | 21 | 22 | if __name__ == "__main__": 23 | asyncio.run(main()) 24 | -------------------------------------------------------------------------------- /examples/voice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/examples/voice/__init__.py -------------------------------------------------------------------------------- /examples/voice/static/README.md: -------------------------------------------------------------------------------- 1 | # Static voice demo 2 | 3 | This demo operates by capturing a recording, then running a voice pipeline on it. 4 | 5 | Run via: 6 | 7 | ``` 8 | python -m examples.voice.static.main 9 | ``` 10 | 11 | ## How it works 12 | 13 | 1. We create a `VoicePipeline`, setup with a custom workflow. The workflow runs an Agent, but it also has some custom responses if you say the secret word. 14 | 2. When you speak, audio is forwarded to the voice pipeline. When you stop speaking, the agent runs. 15 | 3. The pipeline is run with the audio, which causes it to: 16 | 1. Transcribe the audio 17 | 2. Feed the transcription to the workflow, which runs the agent. 18 | 3. Stream the output of the agent to a text-to-speech model. 19 | 4. Play the audio. 20 | 21 | Some suggested examples to try: 22 | 23 | - Tell me a joke (_the assistant tells you a joke_) 24 | - What's the weather in Tokyo? (_will call the `get_weather` tool and then speak_) 25 | - Hola, como estas? (_will handoff to the spanish agent_) 26 | - Tell me about dogs. (_will respond with the hardcoded "you guessed the secret word" message_) 27 | -------------------------------------------------------------------------------- /examples/voice/static/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/examples/voice/static/__init__.py -------------------------------------------------------------------------------- /examples/voice/static/util.py: -------------------------------------------------------------------------------- 1 | import curses 2 | import time 3 | 4 | import numpy as np 5 | import numpy.typing as npt 6 | import sounddevice as sd 7 | 8 | 9 | def _record_audio(screen: curses.window) -> npt.NDArray[np.float32]: 10 | screen.nodelay(True) # Non-blocking input 11 | screen.clear() 12 | screen.addstr( 13 | "Press to start recording. Press again to stop recording.\n" 14 | ) 15 | screen.refresh() 16 | 17 | recording = False 18 | audio_buffer: list[npt.NDArray[np.float32]] = [] 19 | 20 | def _audio_callback(indata, frames, time_info, status): 21 | if status: 22 | screen.addstr(f"Status: {status}\n") 23 | screen.refresh() 24 | if recording: 25 | audio_buffer.append(indata.copy()) 26 | 27 | # Open the audio stream with the callback. 28 | with sd.InputStream(samplerate=24000, channels=1, dtype=np.float32, callback=_audio_callback): 29 | while True: 30 | key = screen.getch() 31 | if key == ord(" "): 32 | recording = not recording 33 | if recording: 34 | screen.addstr("Recording started...\n") 35 | else: 36 | screen.addstr("Recording stopped.\n") 37 | break 38 | screen.refresh() 39 | time.sleep(0.01) 40 | 41 | # Combine recorded audio chunks. 42 | if audio_buffer: 43 | audio_data = np.concatenate(audio_buffer, axis=0) 44 | else: 45 | audio_data = np.empty((0,), dtype=np.float32) 46 | 47 | return audio_data 48 | 49 | 50 | def record_audio(): 51 | # Using curses to record audio in a way that: 52 | # - doesn't require accessibility permissions on macos 53 | # - doesn't block the terminal 54 | audio_data = curses.wrapper(_record_audio) 55 | return audio_data 56 | 57 | 58 | class AudioPlayer: 59 | def __enter__(self): 60 | self.stream = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16) 61 | self.stream.start() 62 | return self 63 | 64 | def __exit__(self, exc_type, exc_value, traceback): 65 | self.stream.stop() # wait for the stream to finish 66 | self.stream.close() 67 | 68 | def add_audio(self, audio_data: npt.NDArray[np.int16]): 69 | self.stream.write(audio_data) 70 | -------------------------------------------------------------------------------- /examples/voice/streamed/README.md: -------------------------------------------------------------------------------- 1 | # Streamed voice demo 2 | 3 | This is an interactive demo, where you can talk to an Agent conversationally. It uses the voice pipeline's built in turn detection feature, so if you stop speaking the Agent responds. 4 | 5 | Run via: 6 | 7 | ``` 8 | python -m examples.voice.streamed.main 9 | ``` 10 | 11 | ## How it works 12 | 13 | 1. We create a `VoicePipeline`, setup with a `SingleAgentVoiceWorkflow`. This is a workflow that starts at an Assistant agent, has tools and handoffs. 14 | 2. Audio input is captured from the terminal. 15 | 3. The pipeline is run with the recorded audio, which causes it to: 16 | 1. Transcribe the audio 17 | 2. Feed the transcription to the workflow, which runs the agent. 18 | 3. Stream the output of the agent to a text-to-speech model. 19 | 4. Play the audio. 20 | 21 | Some suggested examples to try: 22 | 23 | - Tell me a joke (_the assistant tells you a joke_) 24 | - What's the weather in Tokyo? (_will call the `get_weather` tool and then speak_) 25 | - Hola, como estas? (_will handoff to the spanish agent_) 26 | -------------------------------------------------------------------------------- /examples/voice/streamed/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/examples/voice/streamed/__init__.py -------------------------------------------------------------------------------- /src/agents/_config.py: -------------------------------------------------------------------------------- 1 | from openai import AsyncOpenAI 2 | from typing_extensions import Literal 3 | 4 | from .models import _openai_shared 5 | from .tracing import set_tracing_export_api_key 6 | 7 | 8 | def set_default_openai_key(key: str, use_for_tracing: bool) -> None: 9 | _openai_shared.set_default_openai_key(key) 10 | 11 | if use_for_tracing: 12 | set_tracing_export_api_key(key) 13 | 14 | 15 | def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None: 16 | _openai_shared.set_default_openai_client(client) 17 | 18 | if use_for_tracing: 19 | set_tracing_export_api_key(client.api_key) 20 | 21 | 22 | def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None: 23 | if api == "chat_completions": 24 | _openai_shared.set_use_responses_by_default(False) 25 | else: 26 | _openai_shared.set_use_responses_by_default(True) 27 | -------------------------------------------------------------------------------- /src/agents/_debug.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def _debug_flag_enabled(flag: str) -> bool: 5 | flag_value = os.getenv(flag) 6 | return flag_value is not None and (flag_value == "1" or flag_value.lower() == "true") 7 | 8 | 9 | DONT_LOG_MODEL_DATA = _debug_flag_enabled("OPENAI_AGENTS_DONT_LOG_MODEL_DATA") 10 | """By default we don't log LLM inputs/outputs, to prevent exposing sensitive information. Set this 11 | flag to enable logging them. 12 | """ 13 | 14 | DONT_LOG_TOOL_DATA = _debug_flag_enabled("OPENAI_AGENTS_DONT_LOG_TOOL_DATA") 15 | """By default we don't log tool call inputs/outputs, to prevent exposing sensitive information. Set 16 | this flag to enable logging them. 17 | """ 18 | -------------------------------------------------------------------------------- /src/agents/extensions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/src/agents/extensions/__init__.py -------------------------------------------------------------------------------- /src/agents/extensions/handoff_filters.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from ..handoffs import HandoffInputData 4 | from ..items import ( 5 | HandoffCallItem, 6 | HandoffOutputItem, 7 | RunItem, 8 | ToolCallItem, 9 | ToolCallOutputItem, 10 | TResponseInputItem, 11 | ) 12 | 13 | """Contains common handoff input filters, for convenience. """ 14 | 15 | 16 | def remove_all_tools(handoff_input_data: HandoffInputData) -> HandoffInputData: 17 | """Filters out all tool items: file search, web search and function calls+output.""" 18 | 19 | history = handoff_input_data.input_history 20 | new_items = handoff_input_data.new_items 21 | 22 | filtered_history = ( 23 | _remove_tool_types_from_input(history) if isinstance(history, tuple) else history 24 | ) 25 | filtered_pre_handoff_items = _remove_tools_from_items(handoff_input_data.pre_handoff_items) 26 | filtered_new_items = _remove_tools_from_items(new_items) 27 | 28 | return HandoffInputData( 29 | input_history=filtered_history, 30 | pre_handoff_items=filtered_pre_handoff_items, 31 | new_items=filtered_new_items, 32 | ) 33 | 34 | 35 | def _remove_tools_from_items(items: tuple[RunItem, ...]) -> tuple[RunItem, ...]: 36 | filtered_items = [] 37 | for item in items: 38 | if ( 39 | isinstance(item, HandoffCallItem) 40 | or isinstance(item, HandoffOutputItem) 41 | or isinstance(item, ToolCallItem) 42 | or isinstance(item, ToolCallOutputItem) 43 | ): 44 | continue 45 | filtered_items.append(item) 46 | return tuple(filtered_items) 47 | 48 | 49 | def _remove_tool_types_from_input( 50 | items: tuple[TResponseInputItem, ...], 51 | ) -> tuple[TResponseInputItem, ...]: 52 | tool_types = [ 53 | "function_call", 54 | "function_call_output", 55 | "computer_call", 56 | "computer_call_output", 57 | "file_search_call", 58 | "web_search_call", 59 | ] 60 | 61 | filtered_items: list[TResponseInputItem] = [] 62 | for item in items: 63 | itype = item.get("type") 64 | if itype in tool_types: 65 | continue 66 | filtered_items.append(item) 67 | return tuple(filtered_items) 68 | -------------------------------------------------------------------------------- /src/agents/extensions/handoff_prompt.py: -------------------------------------------------------------------------------- 1 | # A recommended prompt prefix for agents that use handoffs. We recommend including this or 2 | # similar instructions in any agents that use handoffs. 3 | RECOMMENDED_PROMPT_PREFIX = ( 4 | "# System context\n" 5 | "You are part of a multi-agent system called the Agents SDK, designed to make agent " 6 | "coordination and execution easy. Agents uses two primary abstraction: **Agents** and " 7 | "**Handoffs**. An agent encompasses instructions and tools and can hand off a " 8 | "conversation to another agent when appropriate. " 9 | "Handoffs are achieved by calling a handoff function, generally named " 10 | "`transfer_to_`. Transfers between agents are handled seamlessly in the background;" 11 | " do not mention or draw attention to these transfers in your conversation with the user.\n" 12 | ) 13 | 14 | 15 | def prompt_with_handoff_instructions(prompt: str) -> str: 16 | """ 17 | Add recommended instructions to the prompt for agents that use handoffs. 18 | """ 19 | return f"{RECOMMENDED_PROMPT_PREFIX}\n\n{prompt}" 20 | -------------------------------------------------------------------------------- /src/agents/extensions/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/src/agents/extensions/models/__init__.py -------------------------------------------------------------------------------- /src/agents/extensions/models/litellm_provider.py: -------------------------------------------------------------------------------- 1 | from ...models.interface import Model, ModelProvider 2 | from .litellm_model import LitellmModel 3 | 4 | DEFAULT_MODEL: str = "gpt-4.1" 5 | 6 | 7 | class LitellmProvider(ModelProvider): 8 | """A ModelProvider that uses LiteLLM to route to any model provider. You can use it via: 9 | ```python 10 | Runner.run(agent, input, run_config=RunConfig(model_provider=LitellmProvider())) 11 | ``` 12 | See supported models here: [litellm models](https://docs.litellm.ai/docs/providers). 13 | 14 | NOTE: API keys must be set via environment variables. If you're using models that require 15 | additional configuration (e.g. Azure API base or version), those must also be set via the 16 | environment variables that LiteLLM expects. If you have more advanced needs, we recommend 17 | copy-pasting this class and making any modifications you need. 18 | """ 19 | 20 | def get_model(self, model_name: str | None) -> Model: 21 | return LitellmModel(model_name or DEFAULT_MODEL) 22 | -------------------------------------------------------------------------------- /src/agents/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logger = logging.getLogger("openai.agents") 4 | -------------------------------------------------------------------------------- /src/agents/mcp/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | from .server import ( 3 | MCPServer, 4 | MCPServerSse, 5 | MCPServerSseParams, 6 | MCPServerStdio, 7 | MCPServerStdioParams, 8 | MCPServerStreamableHttp, 9 | MCPServerStreamableHttpParams, 10 | ) 11 | except ImportError: 12 | pass 13 | 14 | from .util import MCPUtil 15 | 16 | __all__ = [ 17 | "MCPServer", 18 | "MCPServerSse", 19 | "MCPServerSseParams", 20 | "MCPServerStdio", 21 | "MCPServerStdioParams", 22 | "MCPServerStreamableHttp", 23 | "MCPServerStreamableHttpParams", 24 | "MCPUtil", 25 | ] 26 | -------------------------------------------------------------------------------- /src/agents/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/src/agents/models/__init__.py -------------------------------------------------------------------------------- /src/agents/models/_openai_shared.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from openai import AsyncOpenAI 4 | 5 | _default_openai_key: str | None = None 6 | _default_openai_client: AsyncOpenAI | None = None 7 | _use_responses_by_default: bool = True 8 | 9 | 10 | def set_default_openai_key(key: str) -> None: 11 | global _default_openai_key 12 | _default_openai_key = key 13 | 14 | 15 | def get_default_openai_key() -> str | None: 16 | return _default_openai_key 17 | 18 | 19 | def set_default_openai_client(client: AsyncOpenAI) -> None: 20 | global _default_openai_client 21 | _default_openai_client = client 22 | 23 | 24 | def get_default_openai_client() -> AsyncOpenAI | None: 25 | return _default_openai_client 26 | 27 | 28 | def set_use_responses_by_default(use_responses: bool) -> None: 29 | global _use_responses_by_default 30 | _use_responses_by_default = use_responses 31 | 32 | 33 | def get_use_responses_by_default() -> bool: 34 | return _use_responses_by_default 35 | -------------------------------------------------------------------------------- /src/agents/models/chatcmpl_helpers.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from openai import AsyncOpenAI 4 | 5 | from ..model_settings import ModelSettings 6 | from ..version import __version__ 7 | 8 | _USER_AGENT = f"Agents/Python {__version__}" 9 | HEADERS = {"User-Agent": _USER_AGENT} 10 | 11 | 12 | class ChatCmplHelpers: 13 | @classmethod 14 | def is_openai(cls, client: AsyncOpenAI): 15 | return str(client.base_url).startswith("https://api.openai.com") 16 | 17 | @classmethod 18 | def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None: 19 | # Match the behavior of Responses where store is True when not given 20 | default_store = True if cls.is_openai(client) else None 21 | return model_settings.store if model_settings.store is not None else default_store 22 | 23 | @classmethod 24 | def get_stream_options_param( 25 | cls, client: AsyncOpenAI, model_settings: ModelSettings, stream: bool 26 | ) -> dict[str, bool] | None: 27 | if not stream: 28 | return None 29 | 30 | default_include_usage = True if cls.is_openai(client) else None 31 | include_usage = ( 32 | model_settings.include_usage 33 | if model_settings.include_usage is not None 34 | else default_include_usage 35 | ) 36 | stream_options = {"include_usage": include_usage} if include_usage is not None else None 37 | return stream_options 38 | -------------------------------------------------------------------------------- /src/agents/models/fake_id.py: -------------------------------------------------------------------------------- 1 | FAKE_RESPONSES_ID = "__fake_id__" 2 | """This is a placeholder ID used to fill in the `id` field in Responses API related objects. It's 3 | useful when you're creating Responses objects from non-Responses APIs, e.g. the OpenAI Chat 4 | Completions API or other LLM providers. 5 | """ 6 | -------------------------------------------------------------------------------- /src/agents/py.typed: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/agents/run_context.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Any, Generic 3 | 4 | from typing_extensions import TypeVar 5 | 6 | from .usage import Usage 7 | 8 | TContext = TypeVar("TContext", default=Any) 9 | 10 | 11 | @dataclass 12 | class RunContextWrapper(Generic[TContext]): 13 | """This wraps the context object that you passed to `Runner.run()`. It also contains 14 | information about the usage of the agent run so far. 15 | 16 | NOTE: Contexts are not passed to the LLM. They're a way to pass dependencies and data to code 17 | you implement, like tool functions, callbacks, hooks, etc. 18 | """ 19 | 20 | context: TContext 21 | """The context object (or None), passed by you to `Runner.run()`""" 22 | 23 | usage: Usage = field(default_factory=Usage) 24 | """The usage of the agent run so far. For streamed responses, the usage will be stale until the 25 | last chunk of the stream is processed. 26 | """ 27 | -------------------------------------------------------------------------------- /src/agents/stream_events.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass 4 | from typing import Any, Literal, Union 5 | 6 | from typing_extensions import TypeAlias 7 | 8 | from .agent import Agent 9 | from .items import RunItem, TResponseStreamEvent 10 | 11 | 12 | @dataclass 13 | class RawResponsesStreamEvent: 14 | """Streaming event from the LLM. These are 'raw' events, i.e. they are directly passed through 15 | from the LLM. 16 | """ 17 | 18 | data: TResponseStreamEvent 19 | """The raw responses streaming event from the LLM.""" 20 | 21 | type: Literal["raw_response_event"] = "raw_response_event" 22 | """The type of the event.""" 23 | 24 | 25 | @dataclass 26 | class RunItemStreamEvent: 27 | """Streaming events that wrap a `RunItem`. As the agent processes the LLM response, it will 28 | generate these events for new messages, tool calls, tool outputs, handoffs, etc. 29 | """ 30 | 31 | name: Literal[ 32 | "message_output_created", 33 | "handoff_requested", 34 | # This is misspelled, but we can't change it because that would be a breaking change 35 | "handoff_occured", 36 | "tool_called", 37 | "tool_output", 38 | "reasoning_item_created", 39 | "mcp_approval_requested", 40 | "mcp_list_tools", 41 | ] 42 | """The name of the event.""" 43 | 44 | item: RunItem 45 | """The item that was created.""" 46 | 47 | type: Literal["run_item_stream_event"] = "run_item_stream_event" 48 | 49 | 50 | @dataclass 51 | class AgentUpdatedStreamEvent: 52 | """Event that notifies that there is a new agent running.""" 53 | 54 | new_agent: Agent[Any] 55 | """The new agent.""" 56 | 57 | type: Literal["agent_updated_stream_event"] = "agent_updated_stream_event" 58 | 59 | 60 | StreamEvent: TypeAlias = Union[RawResponsesStreamEvent, RunItemStreamEvent, AgentUpdatedStreamEvent] 61 | """A streaming event from an agent.""" 62 | -------------------------------------------------------------------------------- /src/agents/tracing/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logger = logging.getLogger("openai.agents.tracing") 4 | -------------------------------------------------------------------------------- /src/agents/tracing/processor_interface.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import TYPE_CHECKING, Any 3 | 4 | if TYPE_CHECKING: 5 | from .spans import Span 6 | from .traces import Trace 7 | 8 | 9 | class TracingProcessor(abc.ABC): 10 | """Interface for processing spans.""" 11 | 12 | @abc.abstractmethod 13 | def on_trace_start(self, trace: "Trace") -> None: 14 | """Called when a trace is started. 15 | 16 | Args: 17 | trace: The trace that started. 18 | """ 19 | pass 20 | 21 | @abc.abstractmethod 22 | def on_trace_end(self, trace: "Trace") -> None: 23 | """Called when a trace is finished. 24 | 25 | Args: 26 | trace: The trace that started. 27 | """ 28 | pass 29 | 30 | @abc.abstractmethod 31 | def on_span_start(self, span: "Span[Any]") -> None: 32 | """Called when a span is started. 33 | 34 | Args: 35 | span: The span that started. 36 | """ 37 | pass 38 | 39 | @abc.abstractmethod 40 | def on_span_end(self, span: "Span[Any]") -> None: 41 | """Called when a span is finished. Should not block or raise exceptions. 42 | 43 | Args: 44 | span: The span that finished. 45 | """ 46 | pass 47 | 48 | @abc.abstractmethod 49 | def shutdown(self) -> None: 50 | """Called when the application stops.""" 51 | pass 52 | 53 | @abc.abstractmethod 54 | def force_flush(self) -> None: 55 | """Forces an immediate flush of all queued spans/traces.""" 56 | pass 57 | 58 | 59 | class TracingExporter(abc.ABC): 60 | """Exports traces and spans. For example, could log them or send them to a backend.""" 61 | 62 | @abc.abstractmethod 63 | def export(self, items: list["Trace | Span[Any]"]) -> None: 64 | """Exports a list of traces and spans. 65 | 66 | Args: 67 | items: The items to export. 68 | """ 69 | pass 70 | -------------------------------------------------------------------------------- /src/agents/tracing/scope.py: -------------------------------------------------------------------------------- 1 | # Holds the current active span 2 | import contextvars 3 | from typing import TYPE_CHECKING, Any 4 | 5 | from ..logger import logger 6 | 7 | if TYPE_CHECKING: 8 | from .spans import Span 9 | from .traces import Trace 10 | 11 | _current_span: contextvars.ContextVar["Span[Any] | None"] = contextvars.ContextVar( 12 | "current_span", default=None 13 | ) 14 | 15 | _current_trace: contextvars.ContextVar["Trace | None"] = contextvars.ContextVar( 16 | "current_trace", default=None 17 | ) 18 | 19 | 20 | class Scope: 21 | """ 22 | Manages the current span and trace in the context. 23 | """ 24 | 25 | @classmethod 26 | def get_current_span(cls) -> "Span[Any] | None": 27 | return _current_span.get() 28 | 29 | @classmethod 30 | def set_current_span(cls, span: "Span[Any] | None") -> "contextvars.Token[Span[Any] | None]": 31 | return _current_span.set(span) 32 | 33 | @classmethod 34 | def reset_current_span(cls, token: "contextvars.Token[Span[Any] | None]") -> None: 35 | _current_span.reset(token) 36 | 37 | @classmethod 38 | def get_current_trace(cls) -> "Trace | None": 39 | return _current_trace.get() 40 | 41 | @classmethod 42 | def set_current_trace(cls, trace: "Trace | None") -> "contextvars.Token[Trace | None]": 43 | logger.debug(f"Setting current trace: {trace.trace_id if trace else None}") 44 | return _current_trace.set(trace) 45 | 46 | @classmethod 47 | def reset_current_trace(cls, token: "contextvars.Token[Trace | None]") -> None: 48 | logger.debug("Resetting current trace") 49 | _current_trace.reset(token) 50 | -------------------------------------------------------------------------------- /src/agents/tracing/util.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from datetime import datetime, timezone 3 | 4 | 5 | def time_iso() -> str: 6 | """Returns the current time in ISO 8601 format.""" 7 | return datetime.now(timezone.utc).isoformat() 8 | 9 | 10 | def gen_trace_id() -> str: 11 | """Generates a new trace ID.""" 12 | return f"trace_{uuid.uuid4().hex}" 13 | 14 | 15 | def gen_span_id() -> str: 16 | """Generates a new span ID.""" 17 | return f"span_{uuid.uuid4().hex[:24]}" 18 | 19 | 20 | def gen_group_id() -> str: 21 | """Generates a new group ID.""" 22 | return f"group_{uuid.uuid4().hex[:24]}" 23 | -------------------------------------------------------------------------------- /src/agents/usage.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | 3 | from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails 4 | 5 | 6 | @dataclass 7 | class Usage: 8 | requests: int = 0 9 | """Total requests made to the LLM API.""" 10 | 11 | input_tokens: int = 0 12 | """Total input tokens sent, across all requests.""" 13 | 14 | input_tokens_details: InputTokensDetails = field( 15 | default_factory=lambda: InputTokensDetails(cached_tokens=0) 16 | ) 17 | """Details about the input tokens, matching responses API usage details.""" 18 | output_tokens: int = 0 19 | """Total output tokens received, across all requests.""" 20 | 21 | output_tokens_details: OutputTokensDetails = field( 22 | default_factory=lambda: OutputTokensDetails(reasoning_tokens=0) 23 | ) 24 | """Details about the output tokens, matching responses API usage details.""" 25 | 26 | total_tokens: int = 0 27 | """Total tokens sent and received, across all requests.""" 28 | 29 | def add(self, other: "Usage") -> None: 30 | self.requests += other.requests if other.requests else 0 31 | self.input_tokens += other.input_tokens if other.input_tokens else 0 32 | self.output_tokens += other.output_tokens if other.output_tokens else 0 33 | self.total_tokens += other.total_tokens if other.total_tokens else 0 34 | self.input_tokens_details = InputTokensDetails( 35 | cached_tokens=self.input_tokens_details.cached_tokens 36 | + other.input_tokens_details.cached_tokens 37 | ) 38 | 39 | self.output_tokens_details = OutputTokensDetails( 40 | reasoning_tokens=self.output_tokens_details.reasoning_tokens 41 | + other.output_tokens_details.reasoning_tokens 42 | ) 43 | -------------------------------------------------------------------------------- /src/agents/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/src/agents/util/__init__.py -------------------------------------------------------------------------------- /src/agents/util/_coro.py: -------------------------------------------------------------------------------- 1 | async def noop_coroutine() -> None: 2 | pass 3 | -------------------------------------------------------------------------------- /src/agents/util/_error_tracing.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from ..logger import logger 4 | from ..tracing import Span, SpanError, get_current_span 5 | 6 | 7 | def attach_error_to_span(span: Span[Any], error: SpanError) -> None: 8 | span.set_error(error) 9 | 10 | 11 | def attach_error_to_current_span(error: SpanError) -> None: 12 | span = get_current_span() 13 | if span: 14 | attach_error_to_span(span, error) 15 | else: 16 | logger.warning(f"No span to add error {error} to") 17 | -------------------------------------------------------------------------------- /src/agents/util/_json.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Literal 4 | 5 | from pydantic import TypeAdapter, ValidationError 6 | from typing_extensions import TypeVar 7 | 8 | from ..exceptions import ModelBehaviorError 9 | from ..tracing import SpanError 10 | from ._error_tracing import attach_error_to_current_span 11 | 12 | T = TypeVar("T") 13 | 14 | 15 | def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T: 16 | partial_setting: bool | Literal["off", "on", "trailing-strings"] = ( 17 | "trailing-strings" if partial else False 18 | ) 19 | try: 20 | validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting) 21 | return validated 22 | except ValidationError as e: 23 | attach_error_to_current_span( 24 | SpanError( 25 | message="Invalid JSON provided", 26 | data={}, 27 | ) 28 | ) 29 | raise ModelBehaviorError( 30 | f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}" 31 | ) from e 32 | -------------------------------------------------------------------------------- /src/agents/util/_transforms.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | def transform_string_function_style(name: str) -> str: 5 | # Replace spaces with underscores 6 | name = name.replace(" ", "_") 7 | 8 | # Replace non-alphanumeric characters with underscores 9 | name = re.sub(r"[^a-zA-Z0-9]", "_", name) 10 | 11 | return name.lower() 12 | -------------------------------------------------------------------------------- /src/agents/util/_types.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Awaitable 2 | from typing import Union 3 | 4 | from typing_extensions import TypeVar 5 | 6 | T = TypeVar("T") 7 | MaybeAwaitable = Union[Awaitable[T], T] 8 | -------------------------------------------------------------------------------- /src/agents/version.py: -------------------------------------------------------------------------------- 1 | import importlib.metadata 2 | 3 | try: 4 | __version__ = importlib.metadata.version("openai-agents") 5 | except importlib.metadata.PackageNotFoundError: 6 | # Fallback if running from source without being installed 7 | __version__ = "0.0.0" 8 | -------------------------------------------------------------------------------- /src/agents/voice/__init__.py: -------------------------------------------------------------------------------- 1 | from .events import VoiceStreamEvent, VoiceStreamEventAudio, VoiceStreamEventLifecycle 2 | from .exceptions import STTWebsocketConnectionError 3 | from .input import AudioInput, StreamedAudioInput 4 | from .model import ( 5 | StreamedTranscriptionSession, 6 | STTModel, 7 | STTModelSettings, 8 | TTSModel, 9 | TTSModelSettings, 10 | TTSVoice, 11 | VoiceModelProvider, 12 | ) 13 | from .models.openai_model_provider import OpenAIVoiceModelProvider 14 | from .models.openai_stt import OpenAISTTModel, OpenAISTTTranscriptionSession 15 | from .models.openai_tts import OpenAITTSModel 16 | from .pipeline import VoicePipeline 17 | from .pipeline_config import VoicePipelineConfig 18 | from .result import StreamedAudioResult 19 | from .utils import get_sentence_based_splitter 20 | from .workflow import ( 21 | SingleAgentVoiceWorkflow, 22 | SingleAgentWorkflowCallbacks, 23 | VoiceWorkflowBase, 24 | VoiceWorkflowHelper, 25 | ) 26 | 27 | __all__ = [ 28 | "AudioInput", 29 | "StreamedAudioInput", 30 | "STTModel", 31 | "STTModelSettings", 32 | "TTSModel", 33 | "TTSModelSettings", 34 | "TTSVoice", 35 | "VoiceModelProvider", 36 | "StreamedAudioResult", 37 | "SingleAgentVoiceWorkflow", 38 | "OpenAIVoiceModelProvider", 39 | "OpenAISTTModel", 40 | "OpenAITTSModel", 41 | "VoiceStreamEventAudio", 42 | "VoiceStreamEventLifecycle", 43 | "VoiceStreamEvent", 44 | "VoicePipeline", 45 | "VoicePipelineConfig", 46 | "get_sentence_based_splitter", 47 | "VoiceWorkflowHelper", 48 | "VoiceWorkflowBase", 49 | "SingleAgentWorkflowCallbacks", 50 | "StreamedTranscriptionSession", 51 | "OpenAISTTTranscriptionSession", 52 | "STTWebsocketConnectionError", 53 | ] 54 | -------------------------------------------------------------------------------- /src/agents/voice/events.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass 4 | from typing import Literal, Union 5 | 6 | from typing_extensions import TypeAlias 7 | 8 | from .imports import np, npt 9 | 10 | 11 | @dataclass 12 | class VoiceStreamEventAudio: 13 | """Streaming event from the VoicePipeline""" 14 | 15 | data: npt.NDArray[np.int16 | np.float32] | None 16 | """The audio data.""" 17 | 18 | type: Literal["voice_stream_event_audio"] = "voice_stream_event_audio" 19 | """The type of event.""" 20 | 21 | 22 | @dataclass 23 | class VoiceStreamEventLifecycle: 24 | """Streaming event from the VoicePipeline""" 25 | 26 | event: Literal["turn_started", "turn_ended", "session_ended"] 27 | """The event that occurred.""" 28 | 29 | type: Literal["voice_stream_event_lifecycle"] = "voice_stream_event_lifecycle" 30 | """The type of event.""" 31 | 32 | 33 | @dataclass 34 | class VoiceStreamEventError: 35 | """Streaming event from the VoicePipeline""" 36 | 37 | error: Exception 38 | """The error that occurred.""" 39 | 40 | type: Literal["voice_stream_event_error"] = "voice_stream_event_error" 41 | """The type of event.""" 42 | 43 | 44 | VoiceStreamEvent: TypeAlias = Union[ 45 | VoiceStreamEventAudio, VoiceStreamEventLifecycle, VoiceStreamEventError 46 | ] 47 | """An event from the `VoicePipeline`, streamed via `StreamedAudioResult.stream()`.""" 48 | -------------------------------------------------------------------------------- /src/agents/voice/exceptions.py: -------------------------------------------------------------------------------- 1 | from ..exceptions import AgentsException 2 | 3 | 4 | class STTWebsocketConnectionError(AgentsException): 5 | """Exception raised when the STT websocket connection fails.""" 6 | 7 | def __init__(self, message: str): 8 | self.message = message 9 | -------------------------------------------------------------------------------- /src/agents/voice/imports.py: -------------------------------------------------------------------------------- 1 | try: 2 | import numpy as np 3 | import numpy.typing as npt 4 | import websockets 5 | except ImportError as _e: 6 | raise ImportError( 7 | "`numpy` + `websockets` are required to use voice. You can install them via the optional " 8 | "dependency group: `pip install 'openai-agents[voice]'`." 9 | ) from _e 10 | 11 | __all__ = ["np", "npt", "websockets"] 12 | -------------------------------------------------------------------------------- /src/agents/voice/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/src/agents/voice/models/__init__.py -------------------------------------------------------------------------------- /src/agents/voice/models/openai_tts.py: -------------------------------------------------------------------------------- 1 | from collections.abc import AsyncIterator 2 | from typing import Literal 3 | 4 | from openai import AsyncOpenAI 5 | 6 | from ..model import TTSModel, TTSModelSettings 7 | 8 | DEFAULT_VOICE: Literal["ash"] = "ash" 9 | 10 | 11 | class OpenAITTSModel(TTSModel): 12 | """A text-to-speech model for OpenAI.""" 13 | 14 | def __init__( 15 | self, 16 | model: str, 17 | openai_client: AsyncOpenAI, 18 | ): 19 | """Create a new OpenAI text-to-speech model. 20 | 21 | Args: 22 | model: The name of the model to use. 23 | openai_client: The OpenAI client to use. 24 | """ 25 | self.model = model 26 | self._client = openai_client 27 | 28 | @property 29 | def model_name(self) -> str: 30 | return self.model 31 | 32 | async def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]: 33 | """Run the text-to-speech model. 34 | 35 | Args: 36 | text: The text to convert to speech. 37 | settings: The settings to use for the text-to-speech model. 38 | 39 | Returns: 40 | An iterator of audio chunks. 41 | """ 42 | response = self._client.audio.speech.with_streaming_response.create( 43 | model=self.model, 44 | voice=settings.voice or DEFAULT_VOICE, 45 | input=text, 46 | response_format="pcm", 47 | extra_body={ 48 | "instructions": settings.instructions, 49 | }, 50 | ) 51 | 52 | async with response as stream: 53 | async for chunk in stream.iter_bytes(chunk_size=1024): 54 | yield chunk 55 | -------------------------------------------------------------------------------- /src/agents/voice/pipeline_config.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass, field 4 | from typing import Any 5 | 6 | from ..tracing.util import gen_group_id 7 | from .model import STTModelSettings, TTSModelSettings, VoiceModelProvider 8 | from .models.openai_model_provider import OpenAIVoiceModelProvider 9 | 10 | 11 | @dataclass 12 | class VoicePipelineConfig: 13 | """Configuration for a `VoicePipeline`.""" 14 | 15 | model_provider: VoiceModelProvider = field(default_factory=OpenAIVoiceModelProvider) 16 | """The voice model provider to use for the pipeline. Defaults to OpenAI.""" 17 | 18 | tracing_disabled: bool = False 19 | """Whether to disable tracing of the pipeline. Defaults to `False`.""" 20 | 21 | trace_include_sensitive_data: bool = True 22 | """Whether to include sensitive data in traces. Defaults to `True`. This is specifically for the 23 | voice pipeline, and not for anything that goes on inside your Workflow.""" 24 | 25 | trace_include_sensitive_audio_data: bool = True 26 | """Whether to include audio data in traces. Defaults to `True`.""" 27 | 28 | workflow_name: str = "Voice Agent" 29 | """The name of the workflow to use for tracing. Defaults to `Voice Agent`.""" 30 | 31 | group_id: str = field(default_factory=gen_group_id) 32 | """ 33 | A grouping identifier to use for tracing, to link multiple traces from the same conversation 34 | or process. If not provided, we will create a random group ID. 35 | """ 36 | 37 | trace_metadata: dict[str, Any] | None = None 38 | """ 39 | An optional dictionary of additional metadata to include with the trace. 40 | """ 41 | 42 | stt_settings: STTModelSettings = field(default_factory=STTModelSettings) 43 | """The settings to use for the STT model.""" 44 | 45 | tts_settings: TTSModelSettings = field(default_factory=TTSModelSettings) 46 | """The settings to use for the TTS model.""" 47 | -------------------------------------------------------------------------------- /src/agents/voice/utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import Callable 3 | 4 | 5 | def get_sentence_based_splitter( 6 | min_sentence_length: int = 20, 7 | ) -> Callable[[str], tuple[str, str]]: 8 | """Returns a function that splits text into chunks based on sentence boundaries. 9 | 10 | Args: 11 | min_sentence_length: The minimum length of a sentence to be included in a chunk. 12 | 13 | Returns: 14 | A function that splits text into chunks based on sentence boundaries. 15 | """ 16 | 17 | def sentence_based_text_splitter(text_buffer: str) -> tuple[str, str]: 18 | """ 19 | A function to split the text into chunks. This is useful if you want to split the text into 20 | chunks before sending it to the TTS model rather than waiting for the whole text to be 21 | processed. 22 | 23 | Args: 24 | text_buffer: The text to split. 25 | 26 | Returns: 27 | A tuple of the text to process and the remaining text buffer. 28 | """ 29 | sentences = re.split(r"(?<=[.!?])\s+", text_buffer.strip()) 30 | if len(sentences) >= 1: 31 | combined_sentences = " ".join(sentences[:-1]) 32 | if len(combined_sentences) >= min_sentence_length: 33 | remaining_text_buffer = sentences[-1] 34 | return combined_sentences, remaining_text_buffer 35 | return "", text_buffer 36 | 37 | return sentence_based_text_splitter 38 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | Before running any tests, make sure you have `uv` installed (and ideally run `make sync` after). 4 | 5 | ## Running tests 6 | 7 | ``` 8 | make tests 9 | ``` 10 | 11 | ## Snapshots 12 | 13 | We use [inline-snapshots](https://15r10nk.github.io/inline-snapshot/latest/) for some tests. If your code adds new snapshot tests or breaks existing ones, you can fix/create them. After fixing/creating snapshots, run `make tests` again to verify the tests pass. 14 | 15 | ### Fixing snapshots 16 | 17 | ``` 18 | make snapshots-fix 19 | ``` 20 | 21 | ### Creating snapshots 22 | 23 | ``` 24 | make snapshots-update 25 | ``` 26 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | 5 | from agents.models import _openai_shared 6 | from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel 7 | from agents.models.openai_responses import OpenAIResponsesModel 8 | from agents.tracing import set_trace_processors 9 | from agents.tracing.setup import GLOBAL_TRACE_PROVIDER 10 | 11 | from .testing_processor import SPAN_PROCESSOR_TESTING 12 | 13 | 14 | # This fixture will run once before any tests are executed 15 | @pytest.fixture(scope="session", autouse=True) 16 | def setup_span_processor(): 17 | set_trace_processors([SPAN_PROCESSOR_TESTING]) 18 | 19 | 20 | # This fixture will run before each test 21 | @pytest.fixture(autouse=True) 22 | def clear_span_processor(): 23 | SPAN_PROCESSOR_TESTING.force_flush() 24 | SPAN_PROCESSOR_TESTING.shutdown() 25 | SPAN_PROCESSOR_TESTING.clear() 26 | 27 | 28 | # This fixture will run before each test 29 | @pytest.fixture(autouse=True) 30 | def clear_openai_settings(): 31 | _openai_shared._default_openai_key = None 32 | _openai_shared._default_openai_client = None 33 | _openai_shared._use_responses_by_default = True 34 | 35 | 36 | # This fixture will run after all tests end 37 | @pytest.fixture(autouse=True, scope="session") 38 | def shutdown_trace_provider(): 39 | yield 40 | GLOBAL_TRACE_PROVIDER.shutdown() 41 | 42 | 43 | @pytest.fixture(autouse=True) 44 | def disable_real_model_clients(monkeypatch, request): 45 | # If the test is marked to allow the method call, don't override it. 46 | if request.node.get_closest_marker("allow_call_model_methods"): 47 | return 48 | 49 | def failing_version(*args, **kwargs): 50 | pytest.fail("Real models should not be used in tests!") 51 | 52 | monkeypatch.setattr(OpenAIResponsesModel, "get_response", failing_version) 53 | monkeypatch.setattr(OpenAIResponsesModel, "stream_response", failing_version) 54 | monkeypatch.setattr(OpenAIChatCompletionsModel, "get_response", failing_version) 55 | monkeypatch.setattr(OpenAIChatCompletionsModel, "stream_response", failing_version) 56 | -------------------------------------------------------------------------------- /tests/fastapi/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/tests/fastapi/__init__.py -------------------------------------------------------------------------------- /tests/fastapi/streaming_app.py: -------------------------------------------------------------------------------- 1 | from collections.abc import AsyncIterator 2 | 3 | from fastapi import FastAPI 4 | from starlette.responses import StreamingResponse 5 | 6 | from agents import Agent, Runner, RunResultStreaming 7 | 8 | agent = Agent( 9 | name="Assistant", 10 | instructions="You are a helpful assistant.", 11 | ) 12 | 13 | 14 | app = FastAPI() 15 | 16 | 17 | @app.post("/stream") 18 | async def stream(): 19 | result = Runner.run_streamed(agent, input="Tell me a joke") 20 | stream_handler = StreamHandler(result) 21 | return StreamingResponse(stream_handler.stream_events(), media_type="application/x-ndjson") 22 | 23 | 24 | class StreamHandler: 25 | def __init__(self, result: RunResultStreaming): 26 | self.result = result 27 | 28 | async def stream_events(self) -> AsyncIterator[str]: 29 | async for event in self.result.stream_events(): 30 | yield f"{event.type}\n\n" 31 | -------------------------------------------------------------------------------- /tests/fastapi/test_streaming_context.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from httpx import ASGITransport, AsyncClient 3 | from inline_snapshot import snapshot 4 | 5 | from ..fake_model import FakeModel 6 | from ..test_responses import get_text_message 7 | from .streaming_app import agent, app 8 | 9 | 10 | @pytest.mark.asyncio 11 | async def test_streaming_context(): 12 | """This ensures that FastAPI streaming works. The context for this test is that the Runner 13 | method was called in one async context, and the streaming was ended in another context, 14 | leading to a tracing error because the context was closed in the wrong context. This test 15 | ensures that this actually works. 16 | """ 17 | model = FakeModel() 18 | agent.model = model 19 | model.set_next_output([get_text_message("done")]) 20 | 21 | transport = ASGITransport(app) 22 | async with AsyncClient(transport=transport, base_url="http://test") as ac: 23 | async with ac.stream("POST", "/stream") as r: 24 | assert r.status_code == 200 25 | body = (await r.aread()).decode("utf-8") 26 | lines = [line for line in body.splitlines() if line] 27 | assert lines == snapshot( 28 | ["agent_updated_stream_event", "raw_response_event", "run_item_stream_event"] 29 | ) 30 | -------------------------------------------------------------------------------- /tests/mcp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/tests/mcp/__init__.py -------------------------------------------------------------------------------- /tests/mcp/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | 5 | # Skip MCP tests on Python 3.9 6 | def pytest_ignore_collect(collection_path, config): 7 | if sys.version_info[:2] == (3, 9): 8 | this_dir = os.path.dirname(__file__) 9 | 10 | if str(collection_path).startswith(this_dir): 11 | return True 12 | -------------------------------------------------------------------------------- /tests/mcp/helpers.py: -------------------------------------------------------------------------------- 1 | import json 2 | import shutil 3 | from typing import Any 4 | 5 | from mcp import Tool as MCPTool 6 | from mcp.types import CallToolResult, TextContent 7 | 8 | from agents.mcp import MCPServer 9 | 10 | tee = shutil.which("tee") or "" 11 | assert tee, "tee not found" 12 | 13 | 14 | # Added dummy stream classes for patching stdio_client to avoid real I/O during tests 15 | class DummyStream: 16 | async def send(self, msg): 17 | pass 18 | 19 | async def receive(self): 20 | raise Exception("Dummy receive not implemented") 21 | 22 | 23 | class DummyStreamsContextManager: 24 | async def __aenter__(self): 25 | return (DummyStream(), DummyStream()) 26 | 27 | async def __aexit__(self, exc_type, exc_val, exc_tb): 28 | pass 29 | 30 | 31 | class FakeMCPServer(MCPServer): 32 | def __init__(self, tools: list[MCPTool] | None = None): 33 | self.tools: list[MCPTool] = tools or [] 34 | self.tool_calls: list[str] = [] 35 | self.tool_results: list[str] = [] 36 | 37 | def add_tool(self, name: str, input_schema: dict[str, Any]): 38 | self.tools.append(MCPTool(name=name, inputSchema=input_schema)) 39 | 40 | async def connect(self): 41 | pass 42 | 43 | async def cleanup(self): 44 | pass 45 | 46 | async def list_tools(self): 47 | return self.tools 48 | 49 | async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult: 50 | self.tool_calls.append(tool_name) 51 | self.tool_results.append(f"result_{tool_name}_{json.dumps(arguments)}") 52 | return CallToolResult( 53 | content=[TextContent(text=self.tool_results[-1], type="text")], 54 | ) 55 | 56 | @property 57 | def name(self) -> str: 58 | return "fake_mcp_server" 59 | -------------------------------------------------------------------------------- /tests/mcp/test_caching.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import AsyncMock, patch 2 | 3 | import pytest 4 | from mcp.types import ListToolsResult, Tool as MCPTool 5 | 6 | from agents.mcp import MCPServerStdio 7 | 8 | from .helpers import DummyStreamsContextManager, tee 9 | 10 | 11 | @pytest.mark.asyncio 12 | @patch("mcp.client.stdio.stdio_client", return_value=DummyStreamsContextManager()) 13 | @patch("mcp.client.session.ClientSession.initialize", new_callable=AsyncMock, return_value=None) 14 | @patch("mcp.client.session.ClientSession.list_tools") 15 | async def test_server_caching_works( 16 | mock_list_tools: AsyncMock, mock_initialize: AsyncMock, mock_stdio_client 17 | ): 18 | """Test that if we turn caching on, the list of tools is cached and not fetched from the server 19 | on each call to `list_tools()`. 20 | """ 21 | server = MCPServerStdio( 22 | params={ 23 | "command": tee, 24 | }, 25 | cache_tools_list=True, 26 | ) 27 | 28 | tools = [ 29 | MCPTool(name="tool1", inputSchema={}), 30 | MCPTool(name="tool2", inputSchema={}), 31 | ] 32 | 33 | mock_list_tools.return_value = ListToolsResult(tools=tools) 34 | 35 | async with server: 36 | # Call list_tools() multiple times 37 | tools = await server.list_tools() 38 | assert tools == tools 39 | 40 | assert mock_list_tools.call_count == 1, "list_tools() should have been called once" 41 | 42 | # Call list_tools() again, should return the cached value 43 | tools = await server.list_tools() 44 | assert tools == tools 45 | 46 | assert mock_list_tools.call_count == 1, "list_tools() should not have been called again" 47 | 48 | # Invalidate the cache and call list_tools() again 49 | server.invalidate_tools_cache() 50 | tools = await server.list_tools() 51 | assert tools == tools 52 | 53 | assert mock_list_tools.call_count == 2, "list_tools() should be called again" 54 | 55 | # Without invalidating the cache, calling list_tools() again should return the cached value 56 | tools = await server.list_tools() 57 | assert tools == tools 58 | -------------------------------------------------------------------------------- /tests/mcp/test_connect_disconnect.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import AsyncMock, patch 2 | 3 | import pytest 4 | from mcp.types import ListToolsResult, Tool as MCPTool 5 | 6 | from agents.mcp import MCPServerStdio 7 | 8 | from .helpers import DummyStreamsContextManager, tee 9 | 10 | 11 | @pytest.mark.asyncio 12 | @patch("mcp.client.stdio.stdio_client", return_value=DummyStreamsContextManager()) 13 | @patch("mcp.client.session.ClientSession.initialize", new_callable=AsyncMock, return_value=None) 14 | @patch("mcp.client.session.ClientSession.list_tools") 15 | async def test_async_ctx_manager_works( 16 | mock_list_tools: AsyncMock, mock_initialize: AsyncMock, mock_stdio_client 17 | ): 18 | """Test that the async context manager works.""" 19 | server = MCPServerStdio( 20 | params={ 21 | "command": tee, 22 | }, 23 | cache_tools_list=True, 24 | ) 25 | 26 | tools = [ 27 | MCPTool(name="tool1", inputSchema={}), 28 | MCPTool(name="tool2", inputSchema={}), 29 | ] 30 | 31 | mock_list_tools.return_value = ListToolsResult(tools=tools) 32 | 33 | assert server.session is None, "Server should not be connected" 34 | 35 | async with server: 36 | assert server.session is not None, "Server should be connected" 37 | 38 | assert server.session is None, "Server should be disconnected" 39 | 40 | 41 | @pytest.mark.asyncio 42 | @patch("mcp.client.stdio.stdio_client", return_value=DummyStreamsContextManager()) 43 | @patch("mcp.client.session.ClientSession.initialize", new_callable=AsyncMock, return_value=None) 44 | @patch("mcp.client.session.ClientSession.list_tools") 45 | async def test_manual_connect_disconnect_works( 46 | mock_list_tools: AsyncMock, mock_initialize: AsyncMock, mock_stdio_client 47 | ): 48 | """Test that the async context manager works.""" 49 | server = MCPServerStdio( 50 | params={ 51 | "command": tee, 52 | }, 53 | cache_tools_list=True, 54 | ) 55 | 56 | tools = [ 57 | MCPTool(name="tool1", inputSchema={}), 58 | MCPTool(name="tool2", inputSchema={}), 59 | ] 60 | 61 | mock_list_tools.return_value = ListToolsResult(tools=tools) 62 | 63 | assert server.session is None, "Server should not be connected" 64 | 65 | await server.connect() 66 | assert server.session is not None, "Server should be connected" 67 | 68 | await server.cleanup() 69 | assert server.session is None, "Server should be disconnected" 70 | -------------------------------------------------------------------------------- /tests/mcp/test_server_errors.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from agents.exceptions import UserError 4 | from agents.mcp.server import _MCPServerWithClientSession 5 | 6 | 7 | class CrashingClientSessionServer(_MCPServerWithClientSession): 8 | def __init__(self): 9 | super().__init__(cache_tools_list=False, client_session_timeout_seconds=5) 10 | self.cleanup_called = False 11 | 12 | def create_streams(self): 13 | raise ValueError("Crash!") 14 | 15 | async def cleanup(self): 16 | self.cleanup_called = True 17 | await super().cleanup() 18 | 19 | @property 20 | def name(self) -> str: 21 | return "crashing_client_session_server" 22 | 23 | 24 | @pytest.mark.asyncio 25 | async def test_server_errors_cause_error_and_cleanup_called(): 26 | server = CrashingClientSessionServer() 27 | 28 | with pytest.raises(ValueError): 29 | await server.connect() 30 | 31 | assert server.cleanup_called 32 | 33 | 34 | @pytest.mark.asyncio 35 | async def test_not_calling_connect_causes_error(): 36 | server = CrashingClientSessionServer() 37 | 38 | with pytest.raises(UserError): 39 | await server.list_tools() 40 | 41 | with pytest.raises(UserError): 42 | await server.call_tool("foo", {}) 43 | -------------------------------------------------------------------------------- /tests/model_settings/test_serialization.py: -------------------------------------------------------------------------------- 1 | import json 2 | from dataclasses import fields 3 | 4 | from openai.types.shared import Reasoning 5 | 6 | from agents.model_settings import ModelSettings 7 | 8 | 9 | def verify_serialization(model_settings: ModelSettings) -> None: 10 | """Verify that ModelSettings can be serialized to a JSON string.""" 11 | json_dict = model_settings.to_json_dict() 12 | json_string = json.dumps(json_dict) 13 | assert json_string is not None 14 | 15 | 16 | def test_basic_serialization() -> None: 17 | """Tests whether ModelSettings can be serialized to a JSON string.""" 18 | 19 | # First, lets create a ModelSettings instance 20 | model_settings = ModelSettings( 21 | temperature=0.5, 22 | top_p=0.9, 23 | max_tokens=100, 24 | ) 25 | 26 | # Now, lets serialize the ModelSettings instance to a JSON string 27 | verify_serialization(model_settings) 28 | 29 | 30 | def test_all_fields_serialization() -> None: 31 | """Tests whether ModelSettings can be serialized to a JSON string.""" 32 | 33 | # First, lets create a ModelSettings instance 34 | model_settings = ModelSettings( 35 | temperature=0.5, 36 | top_p=0.9, 37 | frequency_penalty=0.0, 38 | presence_penalty=0.0, 39 | tool_choice="auto", 40 | parallel_tool_calls=True, 41 | truncation="auto", 42 | max_tokens=100, 43 | reasoning=Reasoning(), 44 | metadata={"foo": "bar"}, 45 | store=False, 46 | include_usage=False, 47 | extra_query={"foo": "bar"}, 48 | extra_body={"foo": "bar"}, 49 | extra_headers={"foo": "bar"}, 50 | ) 51 | 52 | # Verify that every single field is set to a non-None value 53 | for field in fields(model_settings): 54 | assert getattr(model_settings, field.name) is not None, ( 55 | f"You must set the {field.name} field" 56 | ) 57 | 58 | # Now, lets serialize the ModelSettings instance to a JSON string 59 | verify_serialization(model_settings) 60 | -------------------------------------------------------------------------------- /tests/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/tests/models/__init__.py -------------------------------------------------------------------------------- /tests/models/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | 5 | # Skip voice tests on Python 3.9 6 | def pytest_ignore_collect(collection_path, config): 7 | if sys.version_info[:2] == (3, 9): 8 | this_dir = os.path.dirname(__file__) 9 | 10 | if str(collection_path).startswith(this_dir): 11 | return True 12 | -------------------------------------------------------------------------------- /tests/models/test_litellm_extra_body.py: -------------------------------------------------------------------------------- 1 | import litellm 2 | import pytest 3 | from litellm.types.utils import Choices, Message, ModelResponse, Usage 4 | 5 | from agents.extensions.models.litellm_model import LitellmModel 6 | from agents.model_settings import ModelSettings 7 | from agents.models.interface import ModelTracing 8 | 9 | 10 | @pytest.mark.allow_call_model_methods 11 | @pytest.mark.asyncio 12 | async def test_extra_body_is_forwarded(monkeypatch): 13 | """ 14 | Forward `extra_body` entries into litellm.acompletion kwargs. 15 | 16 | This ensures that user-provided parameters (e.g. cached_content) 17 | arrive alongside default arguments. 18 | """ 19 | captured: dict[str, object] = {} 20 | 21 | async def fake_acompletion(model, messages=None, **kwargs): 22 | captured.update(kwargs) 23 | msg = Message(role="assistant", content="ok") 24 | choice = Choices(index=0, message=msg) 25 | return ModelResponse(choices=[choice], usage=Usage(0, 0, 0)) 26 | 27 | monkeypatch.setattr(litellm, "acompletion", fake_acompletion) 28 | settings = ModelSettings( 29 | temperature=0.1, 30 | extra_body={"cached_content": "some_cache", "foo": 123} 31 | ) 32 | model = LitellmModel(model="test-model") 33 | 34 | await model.get_response( 35 | system_instructions=None, 36 | input=[], 37 | model_settings=settings, 38 | tools=[], 39 | output_schema=None, 40 | handoffs=[], 41 | tracing=ModelTracing.DISABLED, 42 | previous_response_id=None, 43 | ) 44 | 45 | assert {"cached_content": "some_cache", "foo": 123}.items() <= captured.items() 46 | -------------------------------------------------------------------------------- /tests/models/test_map.py: -------------------------------------------------------------------------------- 1 | from agents import Agent, OpenAIResponsesModel, RunConfig, Runner 2 | from agents.extensions.models.litellm_model import LitellmModel 3 | 4 | 5 | def test_no_prefix_is_openai(): 6 | agent = Agent(model="gpt-4o", instructions="", name="test") 7 | model = Runner._get_model(agent, RunConfig()) 8 | assert isinstance(model, OpenAIResponsesModel) 9 | 10 | 11 | def openai_prefix_is_openai(): 12 | agent = Agent(model="openai/gpt-4o", instructions="", name="test") 13 | model = Runner._get_model(agent, RunConfig()) 14 | assert isinstance(model, OpenAIResponsesModel) 15 | 16 | 17 | def test_litellm_prefix_is_litellm(): 18 | agent = Agent(model="litellm/foo/bar", instructions="", name="test") 19 | model = Runner._get_model(agent, RunConfig()) 20 | assert isinstance(model, LitellmModel) 21 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import openai 4 | import pytest 5 | 6 | from agents import set_default_openai_api, set_default_openai_client, set_default_openai_key 7 | from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel 8 | from agents.models.openai_provider import OpenAIProvider 9 | from agents.models.openai_responses import OpenAIResponsesModel 10 | 11 | 12 | def test_cc_no_default_key_errors(monkeypatch): 13 | monkeypatch.delenv("OPENAI_API_KEY", raising=False) 14 | with pytest.raises(openai.OpenAIError): 15 | OpenAIProvider(use_responses=False).get_model("gpt-4") 16 | 17 | 18 | def test_cc_set_default_openai_key(): 19 | set_default_openai_key("test_key") 20 | chat_model = OpenAIProvider(use_responses=False).get_model("gpt-4") 21 | assert chat_model._client.api_key == "test_key" # type: ignore 22 | 23 | 24 | def test_cc_set_default_openai_client(): 25 | client = openai.AsyncOpenAI(api_key="test_key") 26 | set_default_openai_client(client) 27 | chat_model = OpenAIProvider(use_responses=False).get_model("gpt-4") 28 | assert chat_model._client.api_key == "test_key" # type: ignore 29 | 30 | 31 | def test_resp_no_default_key_errors(monkeypatch): 32 | monkeypatch.delenv("OPENAI_API_KEY", raising=False) 33 | assert os.getenv("OPENAI_API_KEY") is None 34 | with pytest.raises(openai.OpenAIError): 35 | OpenAIProvider(use_responses=True).get_model("gpt-4") 36 | 37 | 38 | def test_resp_set_default_openai_key(): 39 | set_default_openai_key("test_key") 40 | resp_model = OpenAIProvider(use_responses=True).get_model("gpt-4") 41 | assert resp_model._client.api_key == "test_key" # type: ignore 42 | 43 | 44 | def test_resp_set_default_openai_client(): 45 | client = openai.AsyncOpenAI(api_key="test_key") 46 | set_default_openai_client(client) 47 | resp_model = OpenAIProvider(use_responses=True).get_model("gpt-4") 48 | assert resp_model._client.api_key == "test_key" # type: ignore 49 | 50 | 51 | def test_set_default_openai_api(): 52 | assert isinstance(OpenAIProvider().get_model("gpt-4"), OpenAIResponsesModel), ( 53 | "Default should be responses" 54 | ) 55 | 56 | set_default_openai_api("chat_completions") 57 | assert isinstance(OpenAIProvider().get_model("gpt-4"), OpenAIChatCompletionsModel), ( 58 | "Should be chat completions model" 59 | ) 60 | 61 | set_default_openai_api("responses") 62 | assert isinstance(OpenAIProvider().get_model("gpt-4"), OpenAIResponsesModel), ( 63 | "Should be responses model" 64 | ) 65 | -------------------------------------------------------------------------------- /tests/test_responses.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any 4 | 5 | from openai.types.responses import ( 6 | ResponseFunctionToolCall, 7 | ResponseOutputItem, 8 | ResponseOutputMessage, 9 | ResponseOutputText, 10 | ) 11 | 12 | from agents import ( 13 | Agent, 14 | FunctionTool, 15 | Handoff, 16 | TResponseInputItem, 17 | default_tool_error_function, 18 | function_tool, 19 | ) 20 | 21 | 22 | def get_text_input_item(content: str) -> TResponseInputItem: 23 | return { 24 | "content": content, 25 | "role": "user", 26 | } 27 | 28 | 29 | def get_text_message(content: str) -> ResponseOutputItem: 30 | return ResponseOutputMessage( 31 | id="1", 32 | type="message", 33 | role="assistant", 34 | content=[ResponseOutputText(text=content, type="output_text", annotations=[])], 35 | status="completed", 36 | ) 37 | 38 | 39 | def get_function_tool( 40 | name: str | None = None, return_value: str | None = None, hide_errors: bool = False 41 | ) -> FunctionTool: 42 | def _foo() -> str: 43 | return return_value or "result_ok" 44 | 45 | return function_tool( 46 | _foo, 47 | name_override=name, 48 | failure_error_function=None if hide_errors else default_tool_error_function, 49 | ) 50 | 51 | 52 | def get_function_tool_call(name: str, arguments: str | None = None) -> ResponseOutputItem: 53 | return ResponseFunctionToolCall( 54 | id="1", 55 | call_id="2", 56 | type="function_call", 57 | name=name, 58 | arguments=arguments or "", 59 | ) 60 | 61 | 62 | def get_handoff_tool_call( 63 | to_agent: Agent[Any], override_name: str | None = None, args: str | None = None 64 | ) -> ResponseOutputItem: 65 | name = override_name or Handoff.default_tool_name(to_agent) 66 | return get_function_tool_call(name, args) 67 | 68 | 69 | def get_final_output_message(args: str) -> ResponseOutputItem: 70 | return ResponseOutputMessage( 71 | id="1", 72 | type="message", 73 | role="assistant", 74 | content=[ResponseOutputText(text=args, type="output_text", annotations=[])], 75 | status="completed", 76 | ) 77 | -------------------------------------------------------------------------------- /tests/test_result_cast.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import pytest 4 | from pydantic import BaseModel 5 | 6 | from agents import Agent, RunContextWrapper, RunResult 7 | 8 | 9 | def create_run_result(final_output: Any) -> RunResult: 10 | return RunResult( 11 | input="test", 12 | new_items=[], 13 | raw_responses=[], 14 | final_output=final_output, 15 | input_guardrail_results=[], 16 | output_guardrail_results=[], 17 | _last_agent=Agent(name="test"), 18 | context_wrapper=RunContextWrapper(context=None), 19 | ) 20 | 21 | 22 | class Foo(BaseModel): 23 | bar: int 24 | 25 | 26 | def test_result_cast_typechecks(): 27 | """Correct casts should work fine.""" 28 | result = create_run_result(1) 29 | assert result.final_output_as(int) == 1 30 | 31 | result = create_run_result("test") 32 | assert result.final_output_as(str) == "test" 33 | 34 | result = create_run_result(Foo(bar=1)) 35 | assert result.final_output_as(Foo) == Foo(bar=1) 36 | 37 | 38 | def test_bad_cast_doesnt_raise(): 39 | """Bad casts shouldn't error unless we ask for it.""" 40 | result = create_run_result(1) 41 | result.final_output_as(str) 42 | 43 | result = create_run_result("test") 44 | result.final_output_as(Foo) 45 | 46 | 47 | def test_bad_cast_with_param_raises(): 48 | """Bad casts should raise a TypeError when we ask for it.""" 49 | result = create_run_result(1) 50 | with pytest.raises(TypeError): 51 | result.final_output_as(str, raise_if_incorrect_type=True) 52 | 53 | result = create_run_result("test") 54 | with pytest.raises(TypeError): 55 | result.final_output_as(Foo, raise_if_incorrect_type=True) 56 | 57 | result = create_run_result(Foo(bar=1)) 58 | with pytest.raises(TypeError): 59 | result.final_output_as(int, raise_if_incorrect_type=True) 60 | -------------------------------------------------------------------------------- /tests/test_run_error_details.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pytest 4 | 5 | from agents import Agent, MaxTurnsExceeded, RunErrorDetails, Runner 6 | 7 | from .fake_model import FakeModel 8 | from .test_responses import get_function_tool, get_function_tool_call, get_text_message 9 | 10 | 11 | @pytest.mark.asyncio 12 | async def test_run_error_includes_data(): 13 | model = FakeModel() 14 | agent = Agent(name="test", model=model, tools=[get_function_tool("foo", "res")]) 15 | model.add_multiple_turn_outputs([ 16 | [get_text_message("1"), get_function_tool_call("foo", json.dumps({"a": "b"}))], 17 | [get_text_message("done")], 18 | ]) 19 | with pytest.raises(MaxTurnsExceeded) as exc: 20 | await Runner.run(agent, input="hello", max_turns=1) 21 | data = exc.value.run_data 22 | assert isinstance(data, RunErrorDetails) 23 | assert data.last_agent == agent 24 | assert len(data.raw_responses) == 1 25 | assert len(data.new_items) > 0 26 | 27 | 28 | @pytest.mark.asyncio 29 | async def test_streamed_run_error_includes_data(): 30 | model = FakeModel() 31 | agent = Agent(name="test", model=model, tools=[get_function_tool("foo", "res")]) 32 | model.add_multiple_turn_outputs([ 33 | [get_text_message("1"), get_function_tool_call("foo", json.dumps({"a": "b"}))], 34 | [get_text_message("done")], 35 | ]) 36 | result = Runner.run_streamed(agent, input="hello", max_turns=1) 37 | with pytest.raises(MaxTurnsExceeded) as exc: 38 | async for _ in result.stream_events(): 39 | pass 40 | data = exc.value.run_data 41 | assert isinstance(data, RunErrorDetails) 42 | assert data.last_agent == agent 43 | assert len(data.raw_responses) == 1 44 | assert len(data.new_items) > 0 45 | -------------------------------------------------------------------------------- /tests/test_tool_converter.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pydantic import BaseModel 3 | 4 | from agents import Agent, Handoff, function_tool, handoff 5 | from agents.exceptions import UserError 6 | from agents.models.chatcmpl_converter import Converter 7 | from agents.tool import FileSearchTool, WebSearchTool 8 | 9 | 10 | def some_function(a: str, b: list[int]) -> str: 11 | return "hello" 12 | 13 | 14 | def test_to_openai_with_function_tool(): 15 | some_function(a="foo", b=[1, 2, 3]) 16 | 17 | tool = function_tool(some_function) 18 | result = Converter.tool_to_openai(tool) 19 | 20 | assert result["type"] == "function" 21 | assert result["function"]["name"] == "some_function" 22 | params = result.get("function", {}).get("parameters") 23 | assert params is not None 24 | properties = params.get("properties", {}) 25 | assert isinstance(properties, dict) 26 | assert properties.keys() == {"a", "b"} 27 | 28 | 29 | class Foo(BaseModel): 30 | a: str 31 | b: list[int] 32 | 33 | 34 | def test_convert_handoff_tool(): 35 | agent = Agent(name="test_1", handoff_description="test_2") 36 | handoff_obj = handoff(agent=agent) 37 | result = Converter.convert_handoff_tool(handoff_obj) 38 | 39 | assert result["type"] == "function" 40 | assert result["function"]["name"] == Handoff.default_tool_name(agent) 41 | assert result["function"].get("description") == Handoff.default_tool_description(agent) 42 | params = result.get("function", {}).get("parameters") 43 | assert params is not None 44 | 45 | for key, value in handoff_obj.input_json_schema.items(): 46 | assert params[key] == value 47 | 48 | 49 | def test_tool_converter_hosted_tools_errors(): 50 | with pytest.raises(UserError): 51 | Converter.tool_to_openai(WebSearchTool()) 52 | 53 | with pytest.raises(UserError): 54 | Converter.tool_to_openai(FileSearchTool(vector_store_ids=["abc"], max_num_results=1)) 55 | -------------------------------------------------------------------------------- /tests/test_usage.py: -------------------------------------------------------------------------------- 1 | from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails 2 | 3 | from agents.usage import Usage 4 | 5 | 6 | def test_usage_add_aggregates_all_fields(): 7 | u1 = Usage( 8 | requests=1, 9 | input_tokens=10, 10 | input_tokens_details=InputTokensDetails(cached_tokens=3), 11 | output_tokens=20, 12 | output_tokens_details=OutputTokensDetails(reasoning_tokens=5), 13 | total_tokens=30, 14 | ) 15 | u2 = Usage( 16 | requests=2, 17 | input_tokens=7, 18 | input_tokens_details=InputTokensDetails(cached_tokens=4), 19 | output_tokens=8, 20 | output_tokens_details=OutputTokensDetails(reasoning_tokens=6), 21 | total_tokens=15, 22 | ) 23 | 24 | u1.add(u2) 25 | 26 | assert u1.requests == 3 27 | assert u1.input_tokens == 17 28 | assert u1.output_tokens == 28 29 | assert u1.total_tokens == 45 30 | assert u1.input_tokens_details.cached_tokens == 7 31 | assert u1.output_tokens_details.reasoning_tokens == 11 32 | 33 | 34 | def test_usage_add_aggregates_with_none_values(): 35 | u1 = Usage() 36 | u2 = Usage( 37 | requests=2, 38 | input_tokens=7, 39 | input_tokens_details=InputTokensDetails(cached_tokens=4), 40 | output_tokens=8, 41 | output_tokens_details=OutputTokensDetails(reasoning_tokens=6), 42 | total_tokens=15, 43 | ) 44 | 45 | u1.add(u2) 46 | 47 | assert u1.requests == 2 48 | assert u1.input_tokens == 7 49 | assert u1.output_tokens == 8 50 | assert u1.total_tokens == 15 51 | assert u1.input_tokens_details.cached_tokens == 4 52 | assert u1.output_tokens_details.reasoning_tokens == 6 53 | -------------------------------------------------------------------------------- /tests/tracing/test_processor_api_key.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from agents.tracing.processors import BackendSpanExporter 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_processor_api_key(monkeypatch): 8 | # If the API key is not set, it should be None 9 | monkeypatch.delenv("OPENAI_API_KEY", None) 10 | processor = BackendSpanExporter() 11 | assert processor.api_key is None 12 | 13 | # If we set it afterwards, it should be the new value 14 | processor.set_api_key("test_api_key") 15 | assert processor.api_key == "test_api_key" 16 | 17 | 18 | @pytest.mark.asyncio 19 | async def test_processor_api_key_from_env(monkeypatch): 20 | # If the API key is not set at creation time but set before access time, it should be the new 21 | # value 22 | monkeypatch.delenv("OPENAI_API_KEY", None) 23 | processor = BackendSpanExporter() 24 | 25 | # If we set it afterwards, it should be the new value 26 | monkeypatch.setenv("OPENAI_API_KEY", "foo_bar_123") 27 | assert processor.api_key == "foo_bar_123" 28 | -------------------------------------------------------------------------------- /tests/voice/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openai/openai-agents-python/cfe9099f3f30607c92e3e7fd62d59990c0642e70/tests/voice/__init__.py -------------------------------------------------------------------------------- /tests/voice/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | 5 | # Skip voice tests on Python 3.9 6 | def pytest_ignore_collect(collection_path, config): 7 | if sys.version_info[:2] == (3, 9): 8 | this_dir = os.path.dirname(__file__) 9 | 10 | if str(collection_path).startswith(this_dir): 11 | return True 12 | -------------------------------------------------------------------------------- /tests/voice/helpers.py: -------------------------------------------------------------------------------- 1 | try: 2 | from agents.voice import StreamedAudioResult 3 | except ImportError: 4 | pass 5 | 6 | 7 | async def extract_events(result: StreamedAudioResult) -> tuple[list[str], list[bytes]]: 8 | """Collapse pipeline stream events to simple labels for ordering assertions.""" 9 | flattened: list[str] = [] 10 | audio_chunks: list[bytes] = [] 11 | 12 | async for ev in result.stream(): 13 | if ev.type == "voice_stream_event_audio": 14 | if ev.data is not None: 15 | audio_chunks.append(ev.data.tobytes()) 16 | flattened.append("audio") 17 | elif ev.type == "voice_stream_event_lifecycle": 18 | flattened.append(ev.event) 19 | elif ev.type == "voice_stream_event_error": 20 | flattened.append("error") 21 | return flattened, audio_chunks 22 | --------------------------------------------------------------------------------