├── .changeset
├── README.md
└── config.json
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── feature_request.md
│ ├── model_provider.md
│ └── question.md
├── PULL_REQUEST_TEMPLATE
│ └── pull_request_template.md
└── workflows
│ ├── docs.yml
│ ├── issues.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .husky
└── pre-commit
├── .npmrc
├── .prettierignore
├── .prettierrc
├── .vscode
├── extensions.json
└── launch.json
├── AGENTS.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── SECURITY.md
├── docs
├── .gitignore
├── README.md
├── astro.config.mjs
├── package-lock.json
├── package.json
├── public
│ └── favicon.svg
├── src
│ ├── assets
│ │ ├── dark-logo.svg
│ │ ├── dark-wordmark.svg
│ │ ├── houston.webp
│ │ ├── light-logo.svg
│ │ └── light-wordmark.svg
│ ├── components
│ │ ├── Hero.astro
│ │ ├── Logo.astro
│ │ └── Title.astro
│ ├── content.config.ts
│ ├── content
│ │ └── docs
│ │ │ ├── extensions
│ │ │ ├── ai-sdk.mdx
│ │ │ └── twilio.mdx
│ │ │ ├── guides
│ │ │ ├── agents.mdx
│ │ │ ├── config.mdx
│ │ │ ├── context.mdx
│ │ │ ├── guardrails.mdx
│ │ │ ├── handoffs.mdx
│ │ │ ├── human-in-the-loop.mdx
│ │ │ ├── models.mdx
│ │ │ ├── multi-agent.md
│ │ │ ├── quickstart.mdx
│ │ │ ├── results.mdx
│ │ │ ├── running-agents.mdx
│ │ │ ├── streaming.mdx
│ │ │ ├── tools.mdx
│ │ │ ├── tracing.mdx
│ │ │ ├── troubleshooting.mdx
│ │ │ ├── voice-agents.mdx
│ │ │ └── voice-agents
│ │ │ │ ├── build.mdx
│ │ │ │ ├── quickstart.mdx
│ │ │ │ └── transport.mdx
│ │ │ ├── index.mdx
│ │ │ └── ja
│ │ │ ├── extensions
│ │ │ ├── ai-sdk.mdx
│ │ │ └── twilio.mdx
│ │ │ ├── guides
│ │ │ ├── agents.mdx
│ │ │ ├── config.mdx
│ │ │ ├── context.mdx
│ │ │ ├── guardrails.mdx
│ │ │ ├── handoffs.mdx
│ │ │ ├── human-in-the-loop.mdx
│ │ │ ├── models.mdx
│ │ │ ├── multi-agent.md
│ │ │ ├── quickstart.mdx
│ │ │ ├── results.mdx
│ │ │ ├── running-agents.md
│ │ │ ├── running-agents.mdx
│ │ │ ├── streaming.mdx
│ │ │ ├── tools.mdx
│ │ │ ├── tracing.mdx
│ │ │ ├── troubleshooting.mdx
│ │ │ ├── voice-agents.mdx
│ │ │ └── voice-agents
│ │ │ │ ├── build.mdx
│ │ │ │ ├── quickstart.mdx
│ │ │ │ └── transport.mdx
│ │ │ └── index.mdx
│ ├── scripts
│ │ └── translate.ts
│ └── styles
│ │ └── global.css
├── tailwind.config.mjs
└── tsconfig.json
├── eslint.config.js
├── examples
├── agent-patterns
│ ├── .gitignore
│ ├── README.md
│ ├── agents-as-tools.ts
│ ├── deterministic.ts
│ ├── forcing-tool-use.ts
│ ├── human-in-the-loop-stream.ts
│ ├── human-in-the-loop.ts
│ ├── input-guardrails.ts
│ ├── llm-as-a-judge.ts
│ ├── output-guardrails.ts
│ ├── package.json
│ ├── parallelization.ts
│ ├── routing.ts
│ ├── streamed.ts
│ ├── streaming-guardrails.ts
│ └── tsconfig.json
├── ai-sdk
│ ├── README.md
│ ├── ai-sdk-model.ts
│ ├── package.json
│ ├── stream.ts
│ └── tsconfig.json
├── basic
│ ├── README.md
│ ├── agent-lifecycle-example.ts
│ ├── chat.ts
│ ├── dynamic-system-prompt.ts
│ ├── hello-world.ts
│ ├── index.ts
│ ├── json-schema-output-type.ts
│ ├── lifecycle-example.ts
│ ├── local-image.ts
│ ├── media
│ │ └── image_bison.jpg
│ ├── package.json
│ ├── previous-response-id.ts
│ ├── remote-image.ts
│ ├── stream-items.ts
│ ├── stream-text.ts
│ ├── tool-use-behavior.ts
│ ├── tools.ts
│ └── tsconfig.json
├── customer-service
│ ├── README.md
│ ├── index.ts
│ ├── package.json
│ └── tsconfig.json
├── docs
│ ├── README.md
│ ├── agents
│ │ ├── agentCloning.ts
│ │ ├── agentForcingToolUse.ts
│ │ ├── agentWithAodOutputType.ts
│ │ ├── agentWithContext.ts
│ │ ├── agentWithDynamicInstructions.ts
│ │ ├── agentWithHandoffs.ts
│ │ ├── agentWithLifecycleHooks.ts
│ │ ├── agentWithTools.ts
│ │ └── simpleAgent.ts
│ ├── config
│ │ ├── getLogger.ts
│ │ ├── setDefaultOpenAIClient.ts
│ │ ├── setDefaultOpenAIKey.ts
│ │ ├── setOpenAIAPI.ts
│ │ ├── setTracingDisabled.ts
│ │ └── setTracingExportApiKey.ts
│ ├── context
│ │ └── localContext.ts
│ ├── custom-trace.ts
│ ├── extensions
│ │ ├── ai-sdk-setup.ts
│ │ └── twilio-basic.ts
│ ├── guardrails
│ │ ├── guardrails-input.ts
│ │ └── guardrails-output.ts
│ ├── handoffs
│ │ ├── basicUsage.ts
│ │ ├── customizeHandoff.ts
│ │ ├── handoffInput.ts
│ │ ├── inputFilter.ts
│ │ └── recommendedPrompt.ts
│ ├── hello-world-with-runner.ts
│ ├── hello-world.ts
│ ├── human-in-the-loop
│ │ ├── index.ts
│ │ └── toolApprovalDefinition.ts
│ ├── models
│ │ ├── agentWithModel.ts
│ │ ├── customProviders.ts
│ │ ├── modelSettings.ts
│ │ ├── openaiProvider.ts
│ │ └── runnerWithModel.ts
│ ├── package.json
│ ├── quickstart
│ │ └── index.ts
│ ├── readme
│ │ ├── readme-functions.ts
│ │ ├── readme-handoffs.ts
│ │ ├── readme-hello-world.ts
│ │ └── readme-voice-agent.ts
│ ├── results
│ │ ├── handoffFinalOutputTypes.ts
│ │ └── historyLoop.ts
│ ├── running-agents
│ │ ├── chatLoop.ts
│ │ ├── exceptions1.ts
│ │ └── exceptions2.ts
│ ├── streaming
│ │ ├── basicStreaming.ts
│ │ ├── handleAllEvents.ts
│ │ ├── nodeTextStream.ts
│ │ └── streamedHITL.ts
│ ├── tools
│ │ ├── agentsAsTools.ts
│ │ ├── functionTools.ts
│ │ ├── hostedTools.ts
│ │ └── nonStrictSchemaTools.ts
│ ├── toppage
│ │ ├── textAgent.ts
│ │ └── voiceAgent.ts
│ ├── tsconfig.json
│ └── voice-agents
│ │ ├── agent.ts
│ │ ├── audioInterrupted.ts
│ │ ├── configureSession.ts
│ │ ├── createAgent.ts
│ │ ├── createSession.ts
│ │ ├── customWebRTCTransport.ts
│ │ ├── defineTool.ts
│ │ ├── delegationAgent.ts
│ │ ├── guardrailSettings.ts
│ │ ├── guardrails.ts
│ │ ├── handleAudio.ts
│ │ ├── helloWorld.ts
│ │ ├── historyUpdated.ts
│ │ ├── multiAgents.ts
│ │ ├── sendMessage.ts
│ │ ├── serverAgent.ts
│ │ ├── sessionHistory.ts
│ │ ├── sessionInterrupt.ts
│ │ ├── thinClient.ts
│ │ ├── toolApprovalEvent.ts
│ │ ├── toolHistory.ts
│ │ ├── transportEvents.ts
│ │ ├── turnDetection.ts
│ │ ├── updateHistory.ts
│ │ └── websocketSession.ts
├── financial-research-agent
│ ├── README.md
│ ├── agents.ts
│ ├── main.ts
│ ├── manager.ts
│ ├── package.json
│ └── tsconfig.json
├── handoffs
│ ├── README.md
│ ├── index.ts
│ ├── package.json
│ ├── tsconfig.json
│ └── types.ts
├── mcp
│ ├── README.md
│ ├── filesystem-example.ts
│ ├── package.json
│ ├── sample_files
│ │ ├── books.txt
│ │ └── favorite_songs.txt
│ └── tsconfig.json
├── model-providers
│ ├── README.md
│ ├── custom-example-agent.ts
│ ├── custom-example-global.ts
│ ├── custom-example-provider.ts
│ ├── package.json
│ └── tsconfig.json
├── realtime-demo
│ ├── .gitignore
│ ├── README.md
│ ├── index.html
│ ├── package.json
│ ├── public
│ │ └── vite.svg
│ ├── src
│ │ ├── main.ts
│ │ ├── style.css
│ │ └── utils.ts
│ ├── token.ts
│ ├── tsconfig.json
│ ├── vite-env.d.ts
│ └── vite.config.ts
├── realtime-next
│ ├── .gitignore
│ ├── README.md
│ ├── next.config.ts
│ ├── package.json
│ ├── postcss.config.mjs
│ ├── public
│ │ ├── file.svg
│ │ ├── globe.svg
│ │ ├── next.svg
│ │ ├── vercel.svg
│ │ └── window.svg
│ ├── src
│ │ ├── app
│ │ │ ├── favicon.ico
│ │ │ ├── globals.css
│ │ │ ├── layout.tsx
│ │ │ ├── page.tsx
│ │ │ ├── raw-client
│ │ │ │ └── page.tsx
│ │ │ ├── server
│ │ │ │ ├── backendAgent.tsx
│ │ │ │ └── token.tsx
│ │ │ └── websocket
│ │ │ │ └── page.tsx
│ │ └── components
│ │ │ ├── App.tsx
│ │ │ ├── History.tsx
│ │ │ ├── icons
│ │ │ ├── ClockIcon.tsx
│ │ │ └── FunctionsIcon.tsx
│ │ │ ├── messages
│ │ │ ├── FunctionCall.tsx
│ │ │ └── TextMessage.tsx
│ │ │ └── ui
│ │ │ ├── Button.tsx
│ │ │ └── utils.ts
│ ├── tsconfig.json
│ └── vercel.json
├── realtime-twilio
│ ├── README.md
│ ├── index.ts
│ ├── package.json
│ └── tsconfig.json
├── research-bot
│ ├── README.md
│ ├── agents.ts
│ ├── main.ts
│ ├── manager.ts
│ ├── package.json
│ └── tsconfig.json
└── tools
│ ├── README.md
│ ├── code-interpreter.ts
│ ├── computer-use.ts
│ ├── file-search.ts
│ ├── image-generation.ts
│ ├── package.json
│ ├── tsconfig.json
│ └── web-search.ts
├── helpers
└── tests
│ └── setup.ts
├── integration-tests
├── README.md
├── _helpers
│ └── setup.ts
├── bun.test.ts
├── bun
│ ├── .gitignore
│ ├── .npmrc
│ ├── index.ts
│ ├── package.json
│ └── tsconfig.json
├── cloudflare-workers
│ └── worker
│ │ ├── .npmrc
│ │ ├── .vscode
│ │ └── settings.json
│ │ ├── package.json
│ │ ├── src
│ │ └── index.ts
│ │ ├── tsconfig.json
│ │ ├── worker-configuration.d.ts
│ │ └── wrangler.jsonc
├── cloudflare.test.ts
├── deno.test.ts
├── deno
│ ├── .npmrc
│ ├── main.ts
│ └── package.json
├── node.test.ts
└── node
│ ├── .npmrc
│ ├── index.cjs
│ ├── index.mjs
│ └── package.json
├── package.json
├── packages
├── agents-core
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── package.json
│ ├── src
│ │ ├── agent.ts
│ │ ├── computer.ts
│ │ ├── config.ts
│ │ ├── errors.ts
│ │ ├── events.ts
│ │ ├── extensions
│ │ │ ├── handoffFilters.ts
│ │ │ ├── handoffPrompt.ts
│ │ │ └── index.ts
│ │ ├── guardrail.ts
│ │ ├── handoff.ts
│ │ ├── helpers
│ │ │ └── message.ts
│ │ ├── index.ts
│ │ ├── items.ts
│ │ ├── lifecycle.ts
│ │ ├── logger.ts
│ │ ├── mcp.ts
│ │ ├── metadata.ts
│ │ ├── model.ts
│ │ ├── providers.ts
│ │ ├── result.ts
│ │ ├── run.ts
│ │ ├── runContext.ts
│ │ ├── runImplementation.ts
│ │ ├── runState.ts
│ │ ├── shims
│ │ │ ├── interface.ts
│ │ │ ├── mcp-stdio
│ │ │ │ ├── browser.ts
│ │ │ │ └── node.ts
│ │ │ ├── shims-browser.ts
│ │ │ ├── shims-node.ts
│ │ │ ├── shims-workerd.ts
│ │ │ └── shims.ts
│ │ ├── tool.ts
│ │ ├── tracing
│ │ │ ├── context.ts
│ │ │ ├── createSpans.ts
│ │ │ ├── index.ts
│ │ │ ├── processor.ts
│ │ │ ├── provider.ts
│ │ │ ├── spans.ts
│ │ │ ├── traces.ts
│ │ │ └── utils.ts
│ │ ├── types
│ │ │ ├── aliases.ts
│ │ │ ├── helpers.ts
│ │ │ ├── index.ts
│ │ │ └── protocol.ts
│ │ ├── usage.ts
│ │ └── utils
│ │ │ ├── index.ts
│ │ │ ├── messages.ts
│ │ │ ├── safeExecute.ts
│ │ │ ├── serialize.ts
│ │ │ ├── smartString.ts
│ │ │ ├── tools.ts
│ │ │ └── typeGuards.ts
│ ├── test
│ │ ├── agent.test.ts
│ │ ├── errors.test.ts
│ │ ├── extensions
│ │ │ ├── handoffFilters.test.ts
│ │ │ └── handoffPrompt.test.ts
│ │ ├── guardrail.test.ts
│ │ ├── handoff.test.ts
│ │ ├── handoffs.test.ts
│ │ ├── helpers
│ │ │ └── message.test.ts
│ │ ├── index.test.ts
│ │ ├── items.test.ts
│ │ ├── mcp.test.ts
│ │ ├── mcpCache.test.ts
│ │ ├── metadata.test.ts
│ │ ├── model.test.ts
│ │ ├── providers.test.ts
│ │ ├── result.test.ts
│ │ ├── run.stream.test.ts
│ │ ├── run.test.ts
│ │ ├── run.utils.test.ts
│ │ ├── runContext.test.ts
│ │ ├── runImplementation.test.ts
│ │ ├── runState.test.ts
│ │ ├── shims
│ │ │ └── mcp-stdio
│ │ │ │ ├── browser.test.ts
│ │ │ │ └── node.test.ts
│ │ ├── stubs.ts
│ │ ├── tool.test.ts
│ │ ├── tracing.test.ts
│ │ ├── usage.test.ts
│ │ └── utils
│ │ │ ├── index.test.ts
│ │ │ ├── messages.test.ts
│ │ │ ├── safeExecute.test.ts
│ │ │ ├── serialize.test.ts
│ │ │ ├── smartString.test.ts
│ │ │ ├── tools.test.ts
│ │ │ └── typeGuards.test.ts
│ ├── tsconfig.json
│ └── tsconfig.test.json
├── agents-extensions
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── package.json
│ ├── src
│ │ ├── TwilioRealtimeTransport.ts
│ │ ├── aiSdk.ts
│ │ ├── index.ts
│ │ └── metadata.ts
│ ├── test
│ │ ├── TwilioRealtimeTransport.test.ts
│ │ ├── aiSdk.test.ts
│ │ └── index.test.ts
│ ├── tsconfig.json
│ └── tsconfig.test.json
├── agents-openai
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── package.json
│ ├── src
│ │ ├── defaults.ts
│ │ ├── index.ts
│ │ ├── logger.ts
│ │ ├── metadata.ts
│ │ ├── openaiChatCompletionsConverter.ts
│ │ ├── openaiChatCompletionsModel.ts
│ │ ├── openaiChatCompletionsStreaming.ts
│ │ ├── openaiProvider.ts
│ │ ├── openaiResponsesModel.ts
│ │ ├── openaiTracingExporter.ts
│ │ └── tools.ts
│ ├── test
│ │ ├── defaults.test.ts
│ │ ├── index.test.ts
│ │ ├── openaiChatCompletionsConverter.test.ts
│ │ ├── openaiChatCompletionsModel.test.ts
│ │ ├── openaiChatCompletionsStreaming.test.ts
│ │ ├── openaiProvider.test.ts
│ │ ├── openaiResponsesModel.helpers.test.ts
│ │ ├── openaiResponsesModel.test.ts
│ │ ├── openaiTracingExporter.test.ts
│ │ └── tools.test.ts
│ ├── tsconfig.json
│ └── tsconfig.test.json
├── agents-realtime
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── package.json
│ ├── src
│ │ ├── clientMessages.ts
│ │ ├── guardrail.ts
│ │ ├── index.ts
│ │ ├── items.ts
│ │ ├── logger.ts
│ │ ├── metadata.ts
│ │ ├── openaiRealtimeBase.ts
│ │ ├── openaiRealtimeEvents.ts
│ │ ├── openaiRealtimeWebRtc.ts
│ │ ├── openaiRealtimeWebsocket.ts
│ │ ├── realtimeAgent.ts
│ │ ├── realtimeSession.ts
│ │ ├── realtimeSessionEvents.ts
│ │ ├── shims
│ │ │ ├── shims-browser.ts
│ │ │ ├── shims-node.ts
│ │ │ └── shims.ts
│ │ ├── transportLayer.ts
│ │ ├── transportLayerEvents.ts
│ │ └── utils.ts
│ ├── test
│ │ ├── index.test.ts
│ │ ├── openaiRealtimeBase.test.ts
│ │ ├── openaiRealtimeEvents.test.ts
│ │ ├── openaiRealtimeWebRtc.environment.test.ts
│ │ ├── openaiRealtimeWebRtc.test.ts
│ │ ├── openaiRealtimeWebsocket.test.ts
│ │ ├── realtimeSession.test.ts
│ │ ├── stubs.ts
│ │ └── utils.test.ts
│ ├── tsconfig.json
│ ├── tsconfig.test.json
│ └── vite.config.js
└── agents
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── package.json
│ ├── src
│ ├── index.ts
│ ├── metadata.ts
│ ├── realtime
│ │ └── index.ts
│ └── utils
│ │ └── index.ts
│ ├── test
│ ├── index.test.ts
│ └── metadata.test.ts
│ ├── tsconfig.json
│ └── tsconfig.test.json
├── pnpm-lock.yaml
├── pnpm-workspace.yaml
├── scripts
├── dev.mts
└── embedMeta.ts
├── tsc-multi.json
├── tsconfig.examples.json
├── tsconfig.json
├── tsconfig.test.json
├── verdaccio-config.yml
├── vitest.config.ts
└── vitest.integration.config.ts
/.changeset/README.md:
--------------------------------------------------------------------------------
1 | # Changesets
2 |
3 | Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works
4 | with multi-package repos, or single-package repos to help you version and publish your code. You can
5 | find the full documentation for it [in our repository](https://github.com/changesets/changesets)
6 |
7 | We have a quick list of common questions to get you started engaging with this project in
8 | [our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md)
9 |
--------------------------------------------------------------------------------
/.changeset/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://unpkg.com/@changesets/config@3.1.1/schema.json",
3 | "changelog": "@changesets/cli/changelog",
4 | "commit": false,
5 | "fixed": [],
6 | "linked": [],
7 | "access": "restricted",
8 | "baseBranch": "main",
9 | "updateInternalDependencies": "patch",
10 | "ignore": []
11 | }
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Report a bug
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 | ---
8 |
9 | ### Please read this first
10 |
11 | - **Have you read the docs?** [Agents SDK docs](https://openai.github.io/openai-agents-js/)
12 | - **Have you searched for related issues?** Others may have faced similar issues.
13 |
14 | ### Describe the bug
15 |
16 | A clear and concise description of what the bug is.
17 |
18 | ### Debug information
19 |
20 | - Agents SDK version: (e.g. `v0.0.1`)
21 | - Runtime environment (e.g. `Node.js 22.16.0`)
22 |
23 | ### Repro steps
24 |
25 | Ideally provide a minimal JavaScript/TypeScript script that can be run to reproduce the bug.
26 |
27 | ### Expected behavior
28 |
29 | A clear and concise description of what you expected to happen.
30 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 | ---
8 |
9 | ### Please read this first
10 |
11 | - **Have you read the docs?** [Agents SDK docs](https://openai.github.io/openai-agents-js/)
12 | - **Have you searched for related issues?** Others may have had similar requests
13 |
14 | ### Describe the feature
15 |
16 | What is the feature you're requesting? How would it work? Please provide examples and details if possible.
17 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/model_provider.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Custom model providers
3 | about: Questions or bugs about using non-OpenAI models
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 | ---
8 |
9 | ### Please read this first
10 |
11 | - **Have you read the custom model provider docs, including the 'Common issues' section?** [Model provider docs](https://openai.github.io/openai-agents-js/guides/models#custom-model-providers)
12 | - **Have you searched for related issues?** Others may have faced similar issues.
13 |
14 | ### Describe the question
15 |
16 | A clear and concise description of what the question or bug is.
17 |
18 | ### Debug information
19 |
20 | - Agents SDK version: (e.g. `v0.0.1`)
21 | - Runtime environment (e.g. `Node.js 22.16.0`)
22 |
23 | ### Repro steps
24 |
25 | Ideally provide a minimal python script that can be run to reproduce the issue.
26 |
27 | ### Expected behavior
28 |
29 | A clear and concise description of what you expected to happen.
30 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Question
3 | about: Questions about the SDK
4 | title: ''
5 | labels: question
6 | assignees: ''
7 | ---
8 |
9 | ### Please read this first
10 |
11 | - **Have you read the docs?** [Agents SDK docs](https://openai.github.io/openai-agents-js/)
12 | - **Have you searched for related issues?** Others may have had similar requests
13 |
14 | ### Question
15 |
16 | Describe your question. Provide details if available.
17 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ### Summary
2 |
3 |
4 |
5 | ### Test plan
6 |
7 |
8 |
9 | ### Issue number
10 |
11 |
12 |
13 | ### Checks
14 |
15 | - [ ] I've added new tests (if relevant)
16 | - [ ] I've added/updated the relevant documentation
17 | - [ ] I've run `pnpm test` and `pnpm test:examples`
18 | - [ ] (If you made a major change) I've run `pnpm test:integration` [(see details)](https://github.com/openai/openai-agents-js/tree/main/integration-tests)
19 | - [ ] I've made sure tests pass
20 | - [ ] I've added a changeset using `pnpm changeset` to indicate my changes
21 |
--------------------------------------------------------------------------------
/.github/workflows/issues.yml:
--------------------------------------------------------------------------------
1 | name: Close inactive issues
2 | on:
3 | schedule:
4 | - cron: "30 1 * * *"
5 |
6 | jobs:
7 | close-issues:
8 | runs-on: ubuntu-latest
9 | permissions:
10 | issues: write
11 | pull-requests: write
12 | steps:
13 | - uses: actions/stale@v9
14 | with:
15 | days-before-issue-stale: 7
16 | days-before-issue-close: 3
17 | stale-issue-label: "stale"
18 | stale-issue-message: "This issue is stale because it has been open for 7 days with no activity."
19 | close-issue-message: "This issue was closed because it has been inactive for 3 days since being marked as stale."
20 | any-of-issue-labels: 'question,needs-more-info'
21 | days-before-pr-stale: 10
22 | days-before-pr-close: 7
23 | stale-pr-label: "stale"
24 | stale-pr-message: "This PR is stale because it has been open for 10 days with no activity."
25 | close-pr-message: "This PR was closed because it has been inactive for 7 days since being marked as stale."
26 | repo-token: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Changesets
2 |
3 | on:
4 | workflow_run:
5 | workflows: ['CI']
6 | types:
7 | - completed
8 | branches:
9 | - main
10 |
11 | env:
12 | CI: true
13 |
14 | jobs:
15 | version:
16 | if: ${{ github.event.workflow_run.conclusion == 'success' && github.repository_owner == 'openai' }}
17 | timeout-minutes: 15
18 | runs-on: ubuntu-latest
19 | permissions:
20 | contents: write
21 | pull-requests: write
22 | id-token: write
23 | steps:
24 | - name: Checkout code repository
25 | uses: actions/checkout@v4
26 |
27 | - name: Setup pnpm
28 | uses: pnpm/action-setup@v4
29 |
30 | - name: Setup node.js
31 | uses: actions/setup-node@v4
32 | with:
33 | node-version: 20
34 | cache: 'pnpm'
35 |
36 | - name: Install dependencies
37 | run: pnpm install
38 |
39 | - name: Build packages
40 | run: pnpm build
41 |
42 | - name: Create and publish versions
43 | uses: changesets/action@v1
44 | with:
45 | commit: 'chore: update versions'
46 | title: 'chore: update versions'
47 | publish: pnpm ci:publish
48 | env:
49 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
50 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
51 | NPM_CONFIG_PROVENANCE: true
52 | NPM_CONFIG_ACCESS: public
53 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 |
8 | jobs:
9 | test:
10 | runs-on: ubuntu-latest
11 | strategy:
12 | matrix:
13 | # https://nodejs.org/en/about/previous-releases
14 | node-version: [20, 22, 24]
15 | steps:
16 | - name: Checkout repository
17 | uses: actions/checkout@v4
18 | - name: Setup Node.js
19 | uses: actions/setup-node@v4
20 | with:
21 | node-version: ${{ matrix.node-version }}
22 | - name: Install pnpm
23 | uses: pnpm/action-setup@v4
24 | with:
25 | version: 10.11.0
26 | run_install: true
27 | - name: Build all packages
28 | run: pnpm build
29 | - name: Run linter
30 | run: pnpm lint
31 | - name: Compile examples
32 | run: pnpm -r build-check
33 | - name: Run tests
34 | run: pnpm test
35 |
--------------------------------------------------------------------------------
/.husky/pre-commit:
--------------------------------------------------------------------------------
1 | # run prettier on staged files
2 | pnpm prettier $(git diff --cached --name-only --diff-filter=ACMR | sed 's| |\\ |g') --write --ignore-unknown
3 | git update-index --again
4 |
5 | # run lint on staged files
6 | pnpm eslint $(git diff --cached --name-only --diff-filter=ACMR | sed 's| |\\ |g') --fix
7 | git update-index --again
8 |
9 | # check for secrets
10 | if [ -z "$CI" ] && [ -z "$GITHUB_ACTIONS" ]; then
11 | trufflehog git file://. --since-commit HEAD --fail
12 | fi
13 |
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | node-linker=hoisted
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | packages/*/src/metadata.ts
2 | *.yaml
3 | *.yml
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "singleQuote": true,
3 | "tabWidth": 2,
4 | "useTabs": false
5 | }
6 |
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | "recommendations": ["astro-build.astro-vscode", "vitest.explorer"],
3 | "unwantedRecommendations": []
4 | }
5 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0.2.0",
3 | "configurations": [
4 | {
5 | "cwd": "${workspaceFolder}/docs",
6 | "command": "./node_modules/.bin/astro dev",
7 | "name": "Docs development server",
8 | "request": "launch",
9 | "type": "node-terminal"
10 | }
11 | ]
12 | }
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 OpenAI
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | For a more in-depth look at our security policy, please check out our [Coordinated Vulnerability Disclosure Policy](https://openai.com/security/disclosure/#:~:text=Disclosure%20Policy,-Security%20is%20essential&text=OpenAI%27s%20coordinated%20vulnerability%20disclosure%20policy,expect%20from%20us%20in%20return.).
4 |
5 | Our PGP key can located [at this address.](https://cdn.openai.com/security.txt)
6 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | # build output
2 | dist/
3 | # generated types
4 | .astro/
5 |
6 | # dependencies
7 | node_modules/
8 |
9 | # logs
10 | npm-debug.log*
11 | yarn-debug.log*
12 | yarn-error.log*
13 | pnpm-debug.log*
14 |
15 |
16 | # environment variables
17 | .env
18 | .env.production
19 |
20 | # macOS-specific files
21 | .DS_Store
22 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Docs
2 |
3 | The documentation is generated using Astro Starlight.
4 |
5 | ## Running the docs
6 |
7 | To run the docs from the root of the project run:
8 |
9 | ```bash
10 | pnpm docs:dev
11 | ```
12 |
13 | ## Translating docs
14 |
15 | All of our documentation is available in Japanese. For this we use a script to translate the docs.
16 |
17 | ```bash
18 | pnpm docs:translate
19 | ```
20 |
21 | ## Building the docs
22 |
23 | The docs are automatically built and deployed using GitHub Actions. To build them locally run:
24 |
25 | ```bash
26 | pnpm -F docs build
27 | ```
28 |
--------------------------------------------------------------------------------
/docs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "docs",
4 | "type": "module",
5 | "scripts": {
6 | "dev": "astro dev",
7 | "start": "astro dev",
8 | "build": "astro build",
9 | "preview": "astro preview",
10 | "astro": "astro",
11 | "translate": "tsx src/scripts/translate.ts"
12 | },
13 | "dependencies": {
14 | "@astrojs/starlight": "^0.34.3",
15 | "@astrojs/starlight-tailwind": "^3.0.1",
16 | "@openai/agents": "workspace:*",
17 | "@tailwindcss/vite": "^4.0.17",
18 | "astro": "^5.5.3",
19 | "sharp": "^0.34.2",
20 | "starlight-llms-txt": "^0.5.0",
21 | "starlight-typedoc": "^0.21.0",
22 | "typedoc": "^0.28.1",
23 | "typedoc-plugin-markdown": "^4.6.0"
24 | },
25 | "devDependencies": {
26 | "tailwindcss": "^3.3.3",
27 | "tsx": "^4.19.4",
28 | "typedoc-plugin-zod": "^1.4.1"
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/docs/src/assets/houston.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openai/openai-agents-js/4a0fe867e4acdc0bc7ce7cbba5d3015bc9d18ec9/docs/src/assets/houston.webp
--------------------------------------------------------------------------------
/docs/src/components/Hero.astro:
--------------------------------------------------------------------------------
1 | ---
2 | import { Code, TabItem, Tabs } from '@astrojs/starlight/components';
3 | import helloWorldExample from '../../../examples/docs/toppage/textAgent.ts?raw';
4 | import helloWorldVoiceExample from '../../../examples/docs/toppage/voiceAgent.ts?raw';
5 | const path = Astro.url.pathname;
6 | const pathPrefix =
7 | path !== '/' ? (!path.endsWith('/') ? path + '/' : path) : '';
8 | ---
9 |
10 |
11 |
12 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
34 |
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/docs/src/components/Title.astro:
--------------------------------------------------------------------------------
1 | ---
2 | import Logo from './Logo.astro';
3 | ---
4 |
5 |
6 |
--------------------------------------------------------------------------------
/docs/src/content.config.ts:
--------------------------------------------------------------------------------
1 | import { defineCollection } from 'astro:content';
2 | import { docsLoader } from '@astrojs/starlight/loaders';
3 | import { docsSchema } from '@astrojs/starlight/schema';
4 |
5 | export const collections = {
6 | docs: defineCollection({ loader: docsLoader(), schema: docsSchema() }),
7 | };
8 |
--------------------------------------------------------------------------------
/docs/src/content/docs/guides/troubleshooting.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Troubleshooting
3 | description: Learn how to troubleshoot issues with the OpenAI Agents SDK.
4 | ---
5 |
6 | ## Supported environments
7 |
8 | The OpenAI Agents SDK is supported on the following server environments:
9 |
10 | - Node.js 22+
11 | - Deno 2.35+
12 | - Bun 1.2.5+
13 |
14 | ### Limited support
15 |
16 | - **Cloudflare Workers**: The Agents SDK can be used in Cloudflare Workers, but currently comes with some limitations:
17 | - The SDK current requires `nodejs_compat` to be enabled
18 | - The SDK can currently only be imported using dynamic imports `await import('@openai/agents')`
19 | - Due to Cloudflare Workers' limited support for `AsyncLocalStorage` some traces might not be accurate
20 | - **Browsers**:
21 | - Tracing is currently not supported in browsers
22 | - **v8 isolates**:
23 | - While you should be able to bundle the SDK for v8 isolates if you use a bundler with the right browser polyfills, tracing will not work
24 | - v8 isolates have not been extensively tested
25 |
26 | ## Debug logging
27 |
28 | If you are running into problems with the SDK, you can enable debug logging to get more information about what is happening.
29 |
30 | Enable debug logging by setting the `DEBUG` environment variable to `openai-agents:*`.
31 |
32 | ```bash
33 | DEBUG=openai-agents:*
34 | ```
35 |
36 | Alternatively, you can scope the debugging to specific parts of the SDK:
37 |
38 | - `openai-agents:core` — for the main execution logic of the SDK
39 | - `openai-agents:openai` — for the OpenAI API calls
40 | - `openai-agents:realtime` — for the Realtime Agents components
41 |
--------------------------------------------------------------------------------
/docs/src/content/docs/ja/extensions/ai-sdk.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: AI SDK で任意モデルを指定
3 | description: Connect your Agents SDK agents to any model through the Vercel's AI SDK
4 | ---
5 |
6 | import { Aside, Steps, Code } from '@astrojs/starlight/components';
7 | import aiSdkSetupExample from '../../../../../../examples/docs/extensions/ai-sdk-setup.ts?raw';
8 |
9 |
14 |
15 | Agents SDK は、標準で Responses API または Chat Completions API を通じて OpenAI モデルと動作します。別のモデルを使用したい場合は、[Vercel's AI SDK](https://sdk.vercel.ai/) がサポートするさまざまなモデルを、このアダプター経由で Agents SDK に組み込むことができます。
16 |
17 | ## セットアップ
18 |
19 |
20 |
21 | 1. extensions パッケージをインストールして AI SDK アダプターを追加します:
22 |
23 | ```bash
24 | npm install @openai/agents-extensions
25 | ```
26 |
27 | 2. [Vercel's AI SDK](https://sdk.vercel.ai/docs/models/overview) から使用したいモデルパッケージを選択してインストールします:
28 |
29 | ```bash
30 | npm install @ai-sdk/openai
31 | ```
32 |
33 | 3. エージェントに接続するためにアダプターとモデルをインポートします:
34 |
35 | ```typescript
36 | import { openai } from '@ai-sdk/openai';
37 | import { aisdk } from '@openai/agents-extensions';
38 | ```
39 |
40 | 4. エージェントが使用するモデルのインスタンスを初期化します:
41 |
42 | ```typescript
43 | const model = aisdk(openai('o4-mini'));
44 | ```
45 |
46 |
47 |
48 | ## 例
49 |
50 |
51 |
--------------------------------------------------------------------------------
/docs/src/content/docs/ja/guides/context.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: コンテキスト管理
3 | description: Learn how to provide local data via RunContext and expose context to the LLM
4 | ---
5 |
6 | import { Aside, Code } from '@astrojs/starlight/components';
7 | import localContextExample from '../../../../../../examples/docs/context/localContext.ts?raw';
8 |
9 | コンテキストという言葉には複数の意味があります。主に次の 2 種類のコンテキストを扱います:
10 |
11 | 1. **ローカルコンテキスト**
12 | 実行中にコードからアクセスできる依存関係やデータ、`onHandoff` のようなコールバック、ライフサイクルフック
13 | 2. **エージェント/LLM コンテキスト**
14 | 言語モデルが応答を生成するときに参照できる情報
15 |
16 | ## ローカルコンテキスト
17 |
18 | ローカルコンテキストは `RunContext` 型で表されます。状態や依存関係を保持する任意のオブジェクトを作成し、それを `Runner.run()` に渡します。すべてのツール呼び出しとフックは `RunContext` ラッパーを受け取り、そのオブジェクトを読み書きできます。
19 |
20 |
25 |
26 | 同じ実行に参加するすべてのエージェント、ツール、フックは同じ **型** のコンテキストを使用する必要があります。
27 |
28 | ローカルコンテキストは次のような用途に適しています:
29 |
30 | - 実行に関するデータ (ユーザー名、ID など)
31 | - ロガーやデータフェッチャーなどの依存関係
32 | - ヘルパー関数
33 |
34 |
38 |
39 | ## エージェント/LLM コンテキスト
40 |
41 | LLM が呼び出されるとき、モデルが参照できるのは会話履歴だけです。追加情報を利用可能にする方法はいくつかあります:
42 |
43 | 1. Agent の `instructions` に追加する
44 | システムメッセージや開発者メッセージとしても知られます。静的文字列を指定するか、コンテキストを受け取って文字列を返す関数を指定できます。
45 | 2. `Runner.run()` 呼び出し時の `input` に含める
46 | `instructions` と似ていますが、[指揮系統](https://cdn.openai.com/spec/model-spec-2024-05-08.html#follow-the-chain-of-command)の下位にメッセージを配置できます。
47 | 3. 関数ツールを介して公開し、LLM が必要に応じてデータを取得できるようにする
48 | 4. リトリーバルや Web 検索ツールを使用して、ファイル、データベース、Web から取得した関連データをもとに回答を補強する
49 |
--------------------------------------------------------------------------------
/docs/src/content/docs/ja/guides/troubleshooting.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: トラブルシューティング
3 | description: Learn how to troubleshoot issues with the OpenAI Agents SDK.
4 | ---
5 |
6 | ## サポートされている環境
7 |
8 | OpenAI Agents SDK は次のサーバー環境でサポートされています:
9 |
10 | - Node.js 22+
11 | - Deno 2.35+
12 | - Bun 1.2.5+
13 |
14 | ### 限定サポート
15 |
16 | - **Cloudflare Workers**: Agents SDK は Cloudflare Workers で使用できますが、現在はいくつかの制限があります:
17 | - 現在、この SDK を使用するには `nodejs_compat` を有効にする必要があります
18 | - SDK は動的インポート `await import('@openai/agents')` でのみ読み込むことができます
19 | - `AsyncLocalStorage` へのサポートが限定的なため、一部のトレースが正確にならない可能性があります
20 | - **ブラウザ**:
21 | - ブラウザでは現在トレーシングはサポートされていません
22 | - **v8 isolates**:
23 | - 適切なブラウザ用ポリフィルを備えたバンドラーを使用すれば v8 isolates 向けに SDK をバンドルできますが、トレーシングは機能しません
24 | - v8 isolates での動作は十分にテストされていません
25 |
26 | ## デバッグログ
27 |
28 | SDK の使用で問題が発生した場合、デバッグログを有効にすると状況の詳細が確認できます。
29 |
30 | `DEBUG` 環境変数に `openai-agents:*` を設定するとデバッグログが有効になります。
31 |
32 | ```bash
33 | DEBUG=openai-agents:*
34 | ```
35 |
36 | また、以下のように対象を限定してデバッグすることもできます:
37 |
38 | - `openai-agents:core` — SDK の主な実行ロジック用
39 | - `openai-agents:openai` — OpenAI API 呼び出し用
40 | - `openai-agents:realtime` — Realtime Agents コンポーネント用
41 |
--------------------------------------------------------------------------------
/docs/tailwind.config.mjs:
--------------------------------------------------------------------------------
1 | import starlightPlugin from '@astrojs/starlight-tailwind';
2 |
3 | /** @type {import('tailwindcss').Config} */
4 | export default {
5 | darkMode: ['class'],
6 | content: ['./src/**/*.{astro,html,js,jsx,md,mdx,svelte,ts,tsx,vue}'],
7 | plugins: [starlightPlugin()],
8 | };
9 |
--------------------------------------------------------------------------------
/docs/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "astro/tsconfigs/strict",
3 | "include": [".astro/types.d.ts", "**/*"],
4 | "exclude": ["dist"]
5 | }
6 |
--------------------------------------------------------------------------------
/eslint.config.js:
--------------------------------------------------------------------------------
1 | // export default {
2 | // parser: '@typescript-eslint/parser',
3 | // plugins: ['@typescript-eslint', 'unused-imports', 'prettier'],
4 | // rules: {
5 | // 'no-unused-vars': 'off',
6 | // 'prettier/prettier': 'error',
7 | // 'unused-imports/no-unused-imports': 'error',
8 | // },
9 | // root: true,
10 | // };
11 |
12 | import eslint from '@eslint/js';
13 | // import someOtherConfig from 'eslint-config-other-configuration-that-enables-formatting-rules';
14 | import prettierConfig from 'eslint-config-prettier';
15 | import tseslint from 'typescript-eslint';
16 | import { globalIgnores } from 'eslint/config';
17 |
18 | export default tseslint.config(
19 | globalIgnores([
20 | '**/dist/**',
21 | '**/node_modules/**',
22 | '**/docs/.astro/**',
23 | 'examples/realtime-next/**',
24 | 'examples/realtime-demo/**',
25 | 'integration-tests//**',
26 | ]),
27 | eslint.configs.recommended,
28 | tseslint.configs.recommended,
29 | prettierConfig,
30 | [
31 | {
32 | rules: {
33 | '@typescript-eslint/no-explicit-any': 'off',
34 | '@typescript-eslint/no-unused-vars': [
35 | 'error',
36 | {
37 | argsIgnorePattern: '^_',
38 | varsIgnorePattern: '^_',
39 | caughtErrorsIgnorePattern: '^_',
40 | },
41 | ],
42 | },
43 | },
44 | {
45 | files: ['examples/docs/**'],
46 | rules: {
47 | '@typescript-eslint/no-unused-vars': 'off',
48 | },
49 | },
50 | ],
51 | );
52 |
--------------------------------------------------------------------------------
/examples/agent-patterns/.gitignore:
--------------------------------------------------------------------------------
1 | result.json
--------------------------------------------------------------------------------
/examples/agent-patterns/input-guardrails.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run, withTrace } from '@openai/agents';
2 | import { z } from 'zod';
3 |
4 | async function main() {
5 | withTrace('Input Guardrail Example', async () => {
6 | const guardrailAgent = new Agent({
7 | name: 'Guardrail agent',
8 | instructions:
9 | 'Check if the user is asking you to do their math homework.',
10 | outputType: z.object({ isMathHomework: z.boolean() }),
11 | });
12 |
13 | const agent = new Agent({
14 | name: 'Customer support agent',
15 | instructions:
16 | 'You are a customer support agent. You help customers with their questions.',
17 | inputGuardrails: [
18 | {
19 | name: 'Math Homework Guardrail',
20 | execute: async ({ input, context }) => {
21 | const result = await run(guardrailAgent, input, { context });
22 | return {
23 | tripwireTriggered: result.finalOutput?.isMathHomework ?? false,
24 | outputInfo: result.finalOutput,
25 | };
26 | },
27 | },
28 | ],
29 | });
30 |
31 | const inputs = [
32 | 'What is the capital of California?',
33 | 'Can you help me solve for x: 2x + 5 = 11?',
34 | ];
35 | for (const input of inputs) {
36 | try {
37 | const result = await run(agent, input);
38 | console.log(result.finalOutput);
39 | } catch (e: unknown) {
40 | console.log(
41 | `Sorry, I can't help you with your math homework. (error: ${e})`,
42 | );
43 | }
44 | }
45 | });
46 | }
47 |
48 | main().catch(console.error);
49 |
--------------------------------------------------------------------------------
/examples/agent-patterns/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "agent-patterns",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "chalk": "^5.4.1",
7 | "zod": "~3.25.40"
8 | },
9 | "scripts": {
10 | "build-check": "tsc --noEmit",
11 | "start:agents-as-tools": "tsx agents-as-tools.ts",
12 | "start:deterministic": "tsx deterministic.ts",
13 | "start:forcing-tool-use": "tsx forcing-tool-use.ts -t default",
14 | "start:human-in-the-loop-stream": "tsx human-in-the-loop-stream.ts",
15 | "start:human-in-the-loop": "tsx human-in-the-loop.ts",
16 | "start:input-guardrails": "tsx input-guardrails.ts",
17 | "start:llm-as-a-judge": "tsx llm-as-a-judge.ts",
18 | "start:output-guardrails": "tsx output-guardrails.ts",
19 | "start:parallelization": "tsx parallelization.ts",
20 | "start:routing": "tsx routing.ts",
21 | "start:streamed": "tsx streamed.ts",
22 | "start:streaming-guardrails": "tsx streaming-guardrails.ts"
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/examples/agent-patterns/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/ai-sdk/README.md:
--------------------------------------------------------------------------------
1 | # AI SDK Example
2 |
3 | This example shows how to run the Agents SDK with a model provided by the [AI SDK](https://www.npmjs.com/package/@ai-sdk/openai).
4 |
5 | The [ai-sdk-model.ts](./ai-sdk-model.ts) script:
6 |
7 | - Wraps the AI SDK `openai` provider with `aisdk` from `@openai/agents-extensions`.
8 | - Creates a simple `get_weather` tool that returns a mock weather string.
9 | - Defines a data agent that uses this model and tool.
10 | - Runs a parent agent that hands off to the data agent to answer a weather question.
11 |
12 | ## Running the script
13 |
14 | From the repository root, execute:
15 |
16 | ```bash
17 | pnpm -F ai-sdk start:sdk-model
18 | ```
19 |
20 | The script prints the final output produced by the runner.
21 |
22 |
--------------------------------------------------------------------------------
/examples/ai-sdk/ai-sdk-model.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import { Agent, run, tool } from '@openai/agents';
3 | import { openai } from '@ai-sdk/openai';
4 | import { aisdk } from '@openai/agents-extensions';
5 |
6 | const model = aisdk(openai('gpt-4.1-nano'));
7 |
8 | const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
9 |
10 | const getWeatherTool = tool({
11 | name: 'get_weather',
12 | description: 'Get the weather for a given city',
13 | parameters: z.object({ city: z.string() }),
14 | execute: async (input) => {
15 | await sleep(300);
16 | return `The weather in ${input.city} is sunny`;
17 | },
18 | });
19 |
20 | const dataAgent = new Agent({
21 | name: 'Weather Data Agent',
22 | instructions: 'You are a weather data agent.',
23 | handoffDescription:
24 | 'When you are asked about the weather, you will use tools to get the weather.',
25 | tools: [getWeatherTool],
26 | model, // Using the AI SDK model for this agent
27 | });
28 |
29 | const agent = new Agent({
30 | name: 'Helpful Assistant',
31 | instructions:
32 | 'You are a helpful assistant. When you need to get the weather, you can hand off the task to the Weather Data Agent.',
33 | handoffs: [dataAgent],
34 | });
35 |
36 | async function main() {
37 | const result = await run(
38 | agent,
39 | 'Hello what is the weather in San Francisco and oakland?',
40 | );
41 | console.log(result.finalOutput);
42 | }
43 |
44 | main();
45 |
--------------------------------------------------------------------------------
/examples/ai-sdk/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "ai-sdk",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "@openai/agents-extensions": "workspace:*",
7 | "@ai-sdk/openai": "^1.1.3",
8 | "zod": "~3.25.40"
9 | },
10 | "scripts": {
11 | "build-check": "tsc --noEmit",
12 | "start:sdk-model": "tsx ai-sdk-model.ts",
13 | "start:stream": "tsx stream.ts"
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/examples/ai-sdk/stream.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import { Agent, run, tool } from '@openai/agents';
3 | import { openai } from '@ai-sdk/openai';
4 | import { aisdk } from '@openai/agents-extensions';
5 |
6 | const model = aisdk(openai('gpt-4.1-nano'));
7 |
8 | const getWeatherTool = tool({
9 | name: 'get_weather',
10 | description: 'Get the weather for a given city',
11 | parameters: z.object({ city: z.string() }),
12 | async execute({ city }) {
13 | return `The weather in ${city} is sunny`;
14 | },
15 | });
16 |
17 | const agent = new Agent({
18 | name: 'Weather agent',
19 | instructions: 'You provide weather information.',
20 | tools: [getWeatherTool],
21 | model,
22 | });
23 |
24 | async function main() {
25 | const stream = await run(agent, 'What is the weather in San Francisco?', {
26 | stream: true,
27 | });
28 |
29 | for await (const text of stream.toTextStream()) {
30 | process.stdout.write(text);
31 | }
32 | console.log();
33 | }
34 |
35 | if (require.main === module) {
36 | main().catch(console.error);
37 | }
38 |
--------------------------------------------------------------------------------
/examples/ai-sdk/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/basic/dynamic-system-prompt.ts:
--------------------------------------------------------------------------------
1 | import { Agent, RunContext, run } from '@openai/agents';
2 |
3 | type Style = 'haiku' | 'pirate' | 'robot';
4 |
5 | interface CustomContext {
6 | style: Style;
7 | }
8 |
9 | function customInstructions(
10 | runContext: RunContext,
11 | _agent: Agent,
12 | ): string {
13 | const context = runContext.context;
14 | if (context.style === 'haiku') {
15 | return 'Only respond in haikus.';
16 | } else if (context.style === 'pirate') {
17 | return 'Respond as a pirate.';
18 | } else {
19 | return "Respond as a robot and say 'beep boop' a lot.";
20 | }
21 | }
22 |
23 | const agent = new Agent({
24 | name: 'Chat agent',
25 | instructions: customInstructions,
26 | });
27 |
28 | async function main() {
29 | const choices: Style[] = ['haiku', 'pirate', 'robot'];
30 | const choice = choices[Math.floor(Math.random() * choices.length)];
31 | const context: CustomContext = { style: choice };
32 | console.log(`Using style: ${choice}\n`);
33 |
34 | const userMessage = 'Tell me a joke.';
35 | console.log(`User: ${userMessage}`);
36 | const result = await run(agent, userMessage, { context });
37 |
38 | console.log(`Assistant: ${result.finalOutput}`);
39 | }
40 |
41 | if (require.main === module) {
42 | main().catch(console.error);
43 | }
44 |
--------------------------------------------------------------------------------
/examples/basic/hello-world.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | async function main() {
4 | const agent = new Agent({
5 | name: 'Assistant',
6 | instructions: 'You only respond in haikus.',
7 | });
8 |
9 | const result = await run(agent, 'Tell me about recursion in programming.');
10 | console.log(result.finalOutput);
11 | // Example output:
12 | // Function calls itself,
13 | // Looping in smaller pieces,
14 | // Endless by design.
15 | }
16 |
17 | if (require.main === module) {
18 | main().catch(console.error);
19 | }
20 |
--------------------------------------------------------------------------------
/examples/basic/index.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import { Agent, run, tool } from '@openai/agents';
3 |
4 | const getWeatherTool = tool({
5 | name: 'get_weather',
6 | description: 'Get the weather for a given city',
7 | parameters: z.object({
8 | demo: z.string(),
9 | }),
10 | execute: async (input) => {
11 | return `The weather in ${input.demo} is sunny`;
12 | },
13 | });
14 |
15 | const dataAgentTwo = new Agent({
16 | name: 'Data agent',
17 | instructions: 'You are a data agent',
18 | handoffDescription: 'You know everything about the weather',
19 | tools: [getWeatherTool],
20 | });
21 |
22 | const agent = new Agent({
23 | name: 'Basic test agent',
24 | instructions: 'You are a basic agent',
25 | handoffs: [dataAgentTwo],
26 | });
27 |
28 | async function main() {
29 | const result = await run(agent, 'What is the weather in San Francisco?');
30 |
31 | console.log(result.finalOutput);
32 | }
33 |
34 | main();
35 |
--------------------------------------------------------------------------------
/examples/basic/json-schema-output-type.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run, JsonSchemaDefinition } from '@openai/agents';
2 |
3 | const WeatherSchema: JsonSchemaDefinition = {
4 | type: 'json_schema',
5 | name: 'Weather',
6 | strict: true,
7 | schema: {
8 | type: 'object',
9 | properties: { city: { type: 'string' }, forecast: { type: 'string' } },
10 | required: ['city', 'forecast'],
11 | additionalProperties: false,
12 | },
13 | };
14 |
15 | async function main() {
16 | const agent = new Agent({
17 | name: 'Weather reporter',
18 | instructions: 'Return the city and a short weather forecast.',
19 | outputType: WeatherSchema,
20 | });
21 |
22 | const result = await run(agent, 'What is the weather in London?');
23 | console.log(result.finalOutput);
24 | // { city: 'London', forecast: '...'}
25 | }
26 |
27 | main().catch(console.error);
28 |
--------------------------------------------------------------------------------
/examples/basic/local-image.ts:
--------------------------------------------------------------------------------
1 | import fs from 'fs';
2 | import path from 'path';
3 | import { Agent, run } from '@openai/agents';
4 |
5 | const bisonImagePath = path.join(__dirname, 'media/image_bison.jpg');
6 |
7 | function imageToBase64(imagePath: string): string {
8 | const imageBuffer = fs.readFileSync(imagePath);
9 | return imageBuffer.toString('base64');
10 | }
11 |
12 | async function main() {
13 | const agent = new Agent({
14 | name: 'Assistant',
15 | instructions: 'You are a helpful assistant.',
16 | });
17 |
18 | const b64Image = imageToBase64(bisonImagePath);
19 | const result = await run(agent, [
20 | {
21 | role: 'user',
22 | content: [
23 | {
24 | type: 'input_image',
25 | image: `data:image/jpeg;base64,${b64Image}`,
26 | providerData: {
27 | detail: 'auto',
28 | },
29 | },
30 | ],
31 | },
32 | {
33 | role: 'user',
34 | content: 'What do you see in this image?',
35 | },
36 | ]);
37 |
38 | console.log(result.finalOutput);
39 | // This image shows a large American bison standing on a grassy hill. The bison has a shaggy brown coat, with parts of its fur shedding, and prominent curved horns. The background is mostly a light, overcast sky, which makes the bison stand out prominently in the image. There is green grass and some small wild plants in the foreground. The overall scene appears natural and serene, likely in a prairie or grassland environment.
40 | }
41 |
42 | if (require.main === module) {
43 | main().catch(console.error);
44 | }
45 |
--------------------------------------------------------------------------------
/examples/basic/media/image_bison.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openai/openai-agents-js/4a0fe867e4acdc0bc7ce7cbba5d3015bc9d18ec9/examples/basic/media/image_bison.jpg
--------------------------------------------------------------------------------
/examples/basic/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "basic",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "zod": "~3.25.40"
7 | },
8 | "scripts": {
9 | "build-check": "tsc --noEmit",
10 | "start": "tsx index.ts",
11 | "start:agent-lifecycle-example": "tsx agent-lifecycle-example.ts",
12 | "start:chat": "tsx chat.ts",
13 | "start:dynamic-system-prompt": "tsx dynamic-system-prompt.ts",
14 | "start:hello-world": "tsx hello-world.ts",
15 | "start:lifecycle-example": "tsx lifecycle-example.ts",
16 | "start:local-image": "tsx local-image.ts",
17 | "start:previous-response-id": "tsx previous-response-id.ts",
18 | "start:remote-image": "tsx remote-image.ts",
19 | "start:stream-items": "tsx stream-items.ts",
20 | "start:stream-text": "tsx stream-text.ts",
21 | "start:json-schema-output-type": "tsx json-schema-output-type.ts",
22 | "start:tool-use-behavior": "tsx tool-use-behavior.ts",
23 | "start:tools": "tsx tools.ts"
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/examples/basic/remote-image.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | const URL =
4 | 'https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg';
5 |
6 | async function main() {
7 | const agent = new Agent({
8 | name: 'Assistant',
9 | instructions: 'You are a helpful assistant.',
10 | });
11 |
12 | const result = await run(agent, [
13 | {
14 | role: 'user',
15 | content: [
16 | {
17 | type: 'input_image',
18 | image: URL,
19 | providerData: {
20 | detail: 'auto',
21 | },
22 | },
23 | ],
24 | },
25 | {
26 | role: 'user',
27 | content: 'What do you see in this image?',
28 | },
29 | ]);
30 |
31 | console.log(result.finalOutput);
32 | // This image shows the Golden Gate Bridge, a famous suspension bridge located in San Francisco, California. The bridge is painted in its signature "International Orange" color and spans the Golden Gate Strait, connecting San Francisco to Marin County. The photo is taken during daylight, with the city and hills visible in the background and water beneath the bridge. The bridge is an iconic landmark and a symbol of San Francisco.
33 | }
34 |
35 | if (require.main === module) {
36 | main().catch(console.error);
37 | }
38 |
--------------------------------------------------------------------------------
/examples/basic/stream-text.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | async function main() {
4 | const agent = new Agent({
5 | name: 'Joker',
6 | instructions: 'You are a helpful assistant.',
7 | });
8 |
9 | const stream = await run(agent, 'Please tell me 5 jokes.', {
10 | stream: true,
11 | });
12 | for await (const event of stream.toTextStream()) {
13 | process.stdout.write(event);
14 | }
15 | console.log();
16 | }
17 |
18 | if (require.main === module) {
19 | main().catch(console.error);
20 | }
21 |
--------------------------------------------------------------------------------
/examples/basic/tools.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run, tool } from '@openai/agents';
2 | import { z } from 'zod';
3 |
4 | type Weather = {
5 | city: string;
6 | temperatureRange: string;
7 | conditions: string;
8 | };
9 |
10 | const getWeather = tool({
11 | name: 'get_weather',
12 | description: 'Get the weather for a city.',
13 | parameters: z.object({ city: z.string() }),
14 | execute: async ({ city }): Promise => {
15 | return {
16 | city,
17 | temperatureRange: '14-20C',
18 | conditions: 'Sunny with wind.',
19 | };
20 | },
21 | });
22 |
23 | const agent = new Agent({
24 | name: 'Hello world',
25 | instructions: 'You are a helpful agent.',
26 | tools: [getWeather],
27 | });
28 |
29 | async function main() {
30 | const result = await run(agent, "What's the weather in Tokyo?");
31 | console.log(result.finalOutput);
32 | // The weather in Tokyo is sunny with some wind, and the temperature ranges between 14°C and 20°C.
33 | }
34 |
35 | if (require.main === module) {
36 | main();
37 | }
38 |
--------------------------------------------------------------------------------
/examples/basic/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/customer-service/README.md:
--------------------------------------------------------------------------------
1 | # Customer Service Agent
2 |
3 | This example demonstrates a multi-agent customer service workflow for an airline. The `index.ts` script sets up a triage agent that can delegate to specialized FAQ and seat booking agents. Tools are used to look up common questions and to update a passenger's seat. Interaction occurs through a simple CLI loop, showing how agents can hand off between each other and call tools.
4 |
5 | Run the demo with:
6 |
7 | ```bash
8 | pnpm examples:customer-service
9 | ```
10 |
11 |
--------------------------------------------------------------------------------
/examples/customer-service/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "customer-service",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "@openai/agents-core": "workspace:*",
7 | "zod": "~3.25.40"
8 | },
9 | "scripts": {
10 | "build-check": "tsc --noEmit",
11 | "start": "tsx index.ts"
12 | }
13 | }
--------------------------------------------------------------------------------
/examples/customer-service/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/docs/agents/agentCloning.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 |
3 | const pirateAgent = new Agent({
4 | name: 'Pirate',
5 | instructions: 'Respond like a pirate – lots of “Arrr!”',
6 | model: 'o4-mini',
7 | });
8 |
9 | const robotAgent = pirateAgent.clone({
10 | name: 'Robot',
11 | instructions: 'Respond like a robot – be precise and factual.',
12 | });
13 |
--------------------------------------------------------------------------------
/examples/docs/agents/agentForcingToolUse.ts:
--------------------------------------------------------------------------------
1 | import { Agent, tool } from '@openai/agents';
2 | import { z } from 'zod';
3 |
4 | const calculatorTool = tool({
5 | name: 'Calculator',
6 | description: 'Use this tool to answer questions about math problems.',
7 | parameters: z.object({ question: z.string() }),
8 | execute: async (input) => {
9 | throw new Error('TODO: implement this');
10 | },
11 | });
12 |
13 | const agent = new Agent({
14 | name: 'Strict tool user',
15 | instructions: 'Always answer using the calculator tool.',
16 | tools: [calculatorTool],
17 | modelSettings: { toolChoice: 'auto' },
18 | });
19 |
--------------------------------------------------------------------------------
/examples/docs/agents/agentWithAodOutputType.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 | import { z } from 'zod';
3 |
4 | const CalendarEvent = z.object({
5 | name: z.string(),
6 | date: z.string(),
7 | participants: z.array(z.string()),
8 | });
9 |
10 | const extractor = new Agent({
11 | name: 'Calendar extractor',
12 | instructions: 'Extract calendar events from the supplied text.',
13 | outputType: CalendarEvent,
14 | });
15 |
--------------------------------------------------------------------------------
/examples/docs/agents/agentWithContext.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 |
3 | interface Purchase {
4 | id: string;
5 | uid: string;
6 | deliveryStatus: string;
7 | }
8 | interface UserContext {
9 | uid: string;
10 | isProUser: boolean;
11 |
12 | // this function can be used within tools
13 | fetchPurchases(): Promise;
14 | }
15 |
16 | const agent = new Agent({
17 | name: 'Personal shopper',
18 | instructions: 'Recommend products the user will love.',
19 | });
20 |
21 | // Later
22 | import { run } from '@openai/agents';
23 |
24 | const result = await run(agent, 'Find me a new pair of running shoes', {
25 | context: { uid: 'abc', isProUser: true, fetchPurchases: async () => [] },
26 | });
27 |
--------------------------------------------------------------------------------
/examples/docs/agents/agentWithDynamicInstructions.ts:
--------------------------------------------------------------------------------
1 | import { Agent, RunContext } from '@openai/agents';
2 |
3 | interface UserContext {
4 | name: string;
5 | }
6 |
7 | function buildInstructions(runContext: RunContext) {
8 | return `The user's name is ${runContext.context.name}. Be extra friendly!`;
9 | }
10 |
11 | const agent = new Agent({
12 | name: 'Personalized helper',
13 | instructions: buildInstructions,
14 | });
15 |
--------------------------------------------------------------------------------
/examples/docs/agents/agentWithHandoffs.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 |
3 | const bookingAgent = new Agent({
4 | name: 'Booking Agent',
5 | instructions: 'Help users with booking requests.',
6 | });
7 |
8 | const refundAgent = new Agent({
9 | name: 'Refund Agent',
10 | instructions: 'Process refund requests politely and efficiently.',
11 | });
12 |
13 | // Use Agent.create method to ensure the finalOutput type considers handoffs
14 | const triageAgent = Agent.create({
15 | name: 'Triage Agent',
16 | instructions: [
17 | 'Help the user with their questions.',
18 | 'If the user asks about booking, hand off to the booking agent.',
19 | 'If the user asks about refunds, hand off to the refund agent.',
20 | ].join('\n'),
21 | handoffs: [bookingAgent, refundAgent],
22 | });
23 |
--------------------------------------------------------------------------------
/examples/docs/agents/agentWithLifecycleHooks.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Verbose agent',
5 | instructions: 'Explain things thoroughly.',
6 | });
7 |
8 | agent.on('agent_start', (ctx, agent) => {
9 | console.log(`[${agent.name}] started`);
10 | });
11 | agent.on('agent_end', (ctx, output) => {
12 | console.log(`[agent] produced:`, output);
13 | });
14 |
--------------------------------------------------------------------------------
/examples/docs/agents/agentWithTools.ts:
--------------------------------------------------------------------------------
1 | import { Agent, tool } from '@openai/agents';
2 | import { z } from 'zod';
3 |
4 | const getWeather = tool({
5 | name: 'get_weather',
6 | description: 'Return the weather for a given city.',
7 | parameters: z.object({ city: z.string() }),
8 | async execute({ city }) {
9 | return `The weather in ${city} is sunny.`;
10 | },
11 | });
12 |
13 | const agent = new Agent({
14 | name: 'Weather bot',
15 | instructions: 'You are a helpful weather bot.',
16 | model: 'o4-mini',
17 | tools: [getWeather],
18 | });
19 |
--------------------------------------------------------------------------------
/examples/docs/agents/simpleAgent.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Haiku Agent',
5 | instructions: 'Always respond in haiku form.',
6 | model: 'o4-mini', // optional – falls back to the default model
7 | });
8 |
--------------------------------------------------------------------------------
/examples/docs/config/getLogger.ts:
--------------------------------------------------------------------------------
1 | import { getLogger } from '@openai/agents';
2 |
3 | const logger = getLogger('my-app');
4 | logger.debug('something happened');
5 |
--------------------------------------------------------------------------------
/examples/docs/config/setDefaultOpenAIClient.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from 'openai';
2 | import { setDefaultOpenAIClient } from '@openai/agents';
3 |
4 | const customClient = new OpenAI({ baseURL: '...', apiKey: '...' });
5 | setDefaultOpenAIClient(customClient);
6 |
--------------------------------------------------------------------------------
/examples/docs/config/setDefaultOpenAIKey.ts:
--------------------------------------------------------------------------------
1 | import { setDefaultOpenAIKey } from '@openai/agents';
2 |
3 | setDefaultOpenAIKey(process.env.OPENAI_API_KEY!); // sk-...
4 |
--------------------------------------------------------------------------------
/examples/docs/config/setOpenAIAPI.ts:
--------------------------------------------------------------------------------
1 | import { setOpenAIAPI } from '@openai/agents';
2 |
3 | setOpenAIAPI('chat_completions');
4 |
--------------------------------------------------------------------------------
/examples/docs/config/setTracingDisabled.ts:
--------------------------------------------------------------------------------
1 | import { setTracingDisabled } from '@openai/agents';
2 |
3 | setTracingDisabled(true);
4 |
--------------------------------------------------------------------------------
/examples/docs/config/setTracingExportApiKey.ts:
--------------------------------------------------------------------------------
1 | import { setTracingExportApiKey } from '@openai/agents';
2 |
3 | setTracingExportApiKey('sk-...');
4 |
--------------------------------------------------------------------------------
/examples/docs/context/localContext.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run, RunContext, tool } from '@openai/agents';
2 | import { z } from 'zod';
3 |
4 | interface UserInfo {
5 | name: string;
6 | uid: number;
7 | }
8 |
9 | const fetchUserAge = tool({
10 | name: 'fetch_user_age',
11 | description: 'Return the age of the current user',
12 | parameters: z.object({}),
13 | execute: async (
14 | _args,
15 | runContext?: RunContext,
16 | ): Promise => {
17 | return `User ${runContext?.context.name} is 47 years old`;
18 | },
19 | });
20 |
21 | async function main() {
22 | const userInfo: UserInfo = { name: 'John', uid: 123 };
23 |
24 | const agent = new Agent({
25 | name: 'Assistant',
26 | tools: [fetchUserAge],
27 | });
28 |
29 | const result = await run(agent, 'What is the age of the user?', {
30 | context: userInfo,
31 | });
32 |
33 | console.log(result.finalOutput);
34 | // The user John is 47 years old.
35 | }
36 |
37 | if (require.main === module) {
38 | main().catch(console.error);
39 | }
40 |
--------------------------------------------------------------------------------
/examples/docs/custom-trace.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run, withTrace } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Joke generator',
5 | instructions: 'Tell funny jokes.',
6 | });
7 |
8 | await withTrace('Joke workflow', async () => {
9 | const result = await run(agent, 'Tell me a joke');
10 | const secondResult = await run(
11 | agent,
12 | `Rate this joke: ${result.finalOutput}`,
13 | );
14 | console.log(`Joke: ${result.finalOutput}`);
15 | console.log(`Rating: ${secondResult.finalOutput}`);
16 | });
17 |
--------------------------------------------------------------------------------
/examples/docs/extensions/ai-sdk-setup.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | // Import the model package you installed
4 | import { openai } from '@ai-sdk/openai';
5 |
6 | // Import the adapter
7 | import { aisdk } from '@openai/agents-extensions';
8 |
9 | // Create a model instance to be used by the agent
10 | const model = aisdk(openai('o4-mini'));
11 |
12 | // Create an agent with the model
13 | const agent = new Agent({
14 | name: 'My Agent',
15 | instructions: 'You are a helpful assistant.',
16 | model,
17 | });
18 |
19 | // Run the agent with the new model
20 | run(agent, 'What is the capital of Germany?');
21 |
--------------------------------------------------------------------------------
/examples/docs/extensions/twilio-basic.ts:
--------------------------------------------------------------------------------
1 | import { TwilioRealtimeTransportLayer } from '@openai/agents-extensions';
2 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
3 |
4 | const agent = new RealtimeAgent({
5 | name: 'My Agent',
6 | });
7 |
8 | // Create a new transport mechanism that will bridge the connection between Twilio and
9 | // the OpenAI Realtime API.
10 | const twilioTransport = new TwilioRealtimeTransportLayer({
11 | // @ts-expect-error - this is not defined
12 | twilioWebSocket: websoketConnection,
13 | });
14 |
15 | const session = new RealtimeSession(agent, {
16 | // set your own transport
17 | transport: twilioTransport,
18 | });
19 |
--------------------------------------------------------------------------------
/examples/docs/guardrails/guardrails-input.ts:
--------------------------------------------------------------------------------
1 | import {
2 | Agent,
3 | run,
4 | InputGuardrailTripwireTriggered,
5 | InputGuardrail,
6 | } from '@openai/agents';
7 | import { z } from 'zod';
8 |
9 | const guardrailAgent = new Agent({
10 | name: 'Guardrail check',
11 | instructions: 'Check if the user is asking you to do their math homework.',
12 | outputType: z.object({
13 | isMathHomework: z.boolean(),
14 | reasoning: z.string(),
15 | }),
16 | });
17 |
18 | const mathGuardrail: InputGuardrail = {
19 | name: 'Math Homework Guardrail',
20 | execute: async ({ input, context }) => {
21 | const result = await run(guardrailAgent, input, { context });
22 | return {
23 | outputInfo: result.finalOutput,
24 | tripwireTriggered: result.finalOutput?.isMathHomework ?? false,
25 | };
26 | },
27 | };
28 |
29 | const agent = new Agent({
30 | name: 'Customer support agent',
31 | instructions:
32 | 'You are a customer support agent. You help customers with their questions.',
33 | inputGuardrails: [mathGuardrail],
34 | });
35 |
36 | async function main() {
37 | try {
38 | await run(agent, 'Hello, can you help me solve for x: 2x + 3 = 11?');
39 | console.log("Guardrail didn't trip - this is unexpected");
40 | } catch (e) {
41 | if (e instanceof InputGuardrailTripwireTriggered) {
42 | console.log('Math homework guardrail tripped');
43 | }
44 | }
45 | }
46 |
47 | main().catch(console.error);
48 |
--------------------------------------------------------------------------------
/examples/docs/handoffs/basicUsage.ts:
--------------------------------------------------------------------------------
1 | import { Agent, handoff } from '@openai/agents';
2 |
3 | const billingAgent = new Agent({ name: 'Billing agent' });
4 | const refundAgent = new Agent({ name: 'Refund agent' });
5 |
6 | // Use Agent.create method to ensure the finalOutput type considers handoffs
7 | const triageAgent = Agent.create({
8 | name: 'Triage agent',
9 | handoffs: [billingAgent, handoff(refundAgent)],
10 | });
11 |
--------------------------------------------------------------------------------
/examples/docs/handoffs/customizeHandoff.ts:
--------------------------------------------------------------------------------
1 | import { Agent, handoff, RunContext } from '@openai/agents';
2 |
3 | function onHandoff(ctx: RunContext) {
4 | console.log('Handoff called');
5 | }
6 |
7 | const agent = new Agent({ name: 'My agent' });
8 |
9 | const handoffObj = handoff(agent, {
10 | onHandoff,
11 | toolNameOverride: 'custom_handoff_tool',
12 | toolDescriptionOverride: 'Custom description',
13 | });
14 |
--------------------------------------------------------------------------------
/examples/docs/handoffs/handoffInput.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import { Agent, handoff, RunContext } from '@openai/agents';
3 |
4 | const EscalationData = z.object({ reason: z.string() });
5 | type EscalationData = z.infer;
6 |
7 | async function onHandoff(
8 | ctx: RunContext,
9 | input: EscalationData | undefined,
10 | ) {
11 | console.log(`Escalation agent called with reason: ${input?.reason}`);
12 | }
13 |
14 | const agent = new Agent({ name: 'Escalation agent' });
15 |
16 | const handoffObj = handoff(agent, {
17 | onHandoff,
18 | inputType: EscalationData,
19 | });
20 |
--------------------------------------------------------------------------------
/examples/docs/handoffs/inputFilter.ts:
--------------------------------------------------------------------------------
1 | import { Agent, handoff } from '@openai/agents';
2 | import { removeAllTools } from '@openai/agents-core/extensions';
3 |
4 | const agent = new Agent({ name: 'FAQ agent' });
5 |
6 | const handoffObj = handoff(agent, {
7 | inputFilter: removeAllTools,
8 | });
9 |
--------------------------------------------------------------------------------
/examples/docs/handoffs/recommendedPrompt.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 | import { RECOMMENDED_PROMPT_PREFIX } from '@openai/agents-core/extensions';
3 |
4 | const billingAgent = new Agent({
5 | name: 'Billing agent',
6 | instructions: `${RECOMMENDED_PROMPT_PREFIX}
7 | Fill in the rest of your prompt here.`,
8 | });
9 |
--------------------------------------------------------------------------------
/examples/docs/hello-world-with-runner.ts:
--------------------------------------------------------------------------------
1 | import { Agent, Runner } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Assistant',
5 | instructions: 'You are a helpful assistant',
6 | });
7 |
8 | // You can pass custom configuration to the runner
9 | const runner = new Runner();
10 |
11 | const result = await runner.run(
12 | agent,
13 | 'Write a haiku about recursion in programming.',
14 | );
15 | console.log(result.finalOutput);
16 |
17 | // Code within the code,
18 | // Functions calling themselves,
19 | // Infinite loop's dance.
20 |
--------------------------------------------------------------------------------
/examples/docs/hello-world.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Assistant',
5 | instructions: 'You are a helpful assistant',
6 | });
7 |
8 | const result = await run(
9 | agent,
10 | 'Write a haiku about recursion in programming.',
11 | );
12 | console.log(result.finalOutput);
13 |
14 | // Code within the code,
15 | // Functions calling themselves,
16 | // Infinite loop's dance.
17 |
--------------------------------------------------------------------------------
/examples/docs/human-in-the-loop/toolApprovalDefinition.ts:
--------------------------------------------------------------------------------
1 | import { tool } from '@openai/agents';
2 | import z from 'zod';
3 |
4 | const sensitiveTool = tool({
5 | name: 'cancelOrder',
6 | description: 'Cancel order',
7 | parameters: z.object({
8 | orderId: z.number(),
9 | }),
10 | // always requires approval
11 | needsApproval: true,
12 | execute: async ({ orderId }, args) => {
13 | // prepare order return
14 | },
15 | });
16 |
17 | const sendEmail = tool({
18 | name: 'sendEmail',
19 | description: 'Send an email',
20 | parameters: z.object({
21 | to: z.string(),
22 | subject: z.string(),
23 | body: z.string(),
24 | }),
25 | needsApproval: async (_context, { subject }) => {
26 | // check if the email is spam
27 | return subject.includes('spam');
28 | },
29 | execute: async ({ to, subject, body }, args) => {
30 | // send email
31 | },
32 | });
33 |
--------------------------------------------------------------------------------
/examples/docs/models/agentWithModel.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Creative writer',
5 | model: 'gpt-4.1',
6 | });
7 |
--------------------------------------------------------------------------------
/examples/docs/models/customProviders.ts:
--------------------------------------------------------------------------------
1 | import {
2 | ModelProvider,
3 | Model,
4 | ModelRequest,
5 | AgentOutputType,
6 | ModelResponse,
7 | ResponseStreamEvent,
8 | TextOutput,
9 | } from '@openai/agents-core';
10 |
11 | import { Agent, Runner } from '@openai/agents';
12 |
13 | class EchoModel implements Model {
14 | name: string;
15 | constructor() {
16 | this.name = 'Echo';
17 | }
18 | async getResponse(request: ModelRequest): Promise {
19 | return {
20 | usage: {},
21 | output: [{ role: 'assistant', content: request.input as string }],
22 | } as any;
23 | }
24 | async *getStreamedResponse(
25 | _request: ModelRequest,
26 | ): AsyncIterable {
27 | yield {
28 | type: 'response.completed',
29 | response: { output: [], usage: {} },
30 | } as any;
31 | }
32 | }
33 |
34 | class EchoProvider implements ModelProvider {
35 | getModel(_modelName?: string): Promise | Model {
36 | return new EchoModel();
37 | }
38 | }
39 |
40 | const runner = new Runner({ modelProvider: new EchoProvider() });
41 | console.log(runner.config.modelProvider.getModel());
42 | const agent = new Agent({
43 | name: 'Test Agent',
44 | instructions: 'You are a helpful assistant.',
45 | model: new EchoModel(),
46 | modelSettings: { temperature: 0.7, toolChoice: 'auto' },
47 | });
48 | console.log(agent.model);
49 |
--------------------------------------------------------------------------------
/examples/docs/models/modelSettings.ts:
--------------------------------------------------------------------------------
1 | import { Runner, Agent } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Creative writer',
5 | // ...
6 | modelSettings: { temperature: 0.7, toolChoice: 'auto' },
7 | });
8 |
9 | // or globally
10 | new Runner({ modelSettings: { temperature: 0.3 } });
11 |
--------------------------------------------------------------------------------
/examples/docs/models/openaiProvider.ts:
--------------------------------------------------------------------------------
1 | import {
2 | Agent,
3 | Runner,
4 | setDefaultOpenAIKey,
5 | setDefaultOpenAIClient,
6 | setTracingExportApiKey,
7 | } from '@openai/agents';
8 | import { OpenAI } from 'openai';
9 |
10 | setDefaultOpenAIKey(process.env.OPENAI_API_KEY!);
11 |
12 | setDefaultOpenAIClient(new OpenAI({ apiKey: process.env.OPENAI_API_KEY! }));
13 |
14 | const runner = new Runner({ model: 'gpt‑4o-mini' });
15 | const agent = new Agent({
16 | name: 'Test Agent',
17 | instructions: 'You are a helpful assistant.',
18 | modelSettings: { temperature: 0.7, toolChoice: 'auto' },
19 | });
20 |
21 | async function main() {
22 | const result = await runner.run(agent, 'Hey, I need your help!');
23 | console.log(result.finalOutput);
24 | }
25 |
26 | if (require.main === module) {
27 | main().catch((err) => {
28 | console.error(err);
29 | process.exit(1);
30 | });
31 | }
32 |
33 | setTracingExportApiKey(process.env.OPENAI_API_KEY!);
34 |
--------------------------------------------------------------------------------
/examples/docs/models/runnerWithModel.ts:
--------------------------------------------------------------------------------
1 | import { Runner } from '@openai/agents';
2 |
3 | const runner = new Runner({ model: 'gpt‑4.1-mini' });
4 |
--------------------------------------------------------------------------------
/examples/docs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "docs",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "@openai/agents-core": "workspace:*",
7 | "@openai/agents-realtime": "workspace:*",
8 | "@openai/agents-extensions": "workspace:*",
9 | "@ai-sdk/openai": "^1.0.0",
10 | "server-only": "^0.0.1",
11 | "openai": "^5.0.1",
12 | "zod": "~3.25.40"
13 | },
14 | "scripts": {
15 | "build-check": "tsc --noEmit"
16 | },
17 | "devDependencies": {
18 | "typedoc-plugin-zod": "^1.4.1"
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/examples/docs/quickstart/index.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | const historyTutorAgent = new Agent({
4 | name: 'History Tutor',
5 | instructions:
6 | 'You provide assistance with historical queries. Explain important events and context clearly.',
7 | });
8 |
9 | const mathTutorAgent = new Agent({
10 | name: 'Math Tutor',
11 | instructions:
12 | 'You provide help with math problems. Explain your reasoning at each step and include examples',
13 | });
14 |
15 | const triageAgent = new Agent({
16 | name: 'Triage Agent',
17 | instructions:
18 | "You determine which agent to use based on the user's homework question",
19 | handoffs: [historyTutorAgent, mathTutorAgent],
20 | });
21 |
22 | async function main() {
23 | const result = await run(triageAgent, 'What is the capital of France?');
24 | console.log(result.finalOutput);
25 | }
26 |
27 | main().catch((err) => console.error(err));
28 |
--------------------------------------------------------------------------------
/examples/docs/readme/readme-functions.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import { Agent, run, tool } from '@openai/agents';
3 |
4 | const getWeatherTool = tool({
5 | name: 'get_weather',
6 | description: 'Get the weather for a given city',
7 | parameters: z.object({ city: z.string() }),
8 | execute: async (input) => {
9 | return `The weather in ${input.city} is sunny`;
10 | },
11 | });
12 |
13 | const agent = new Agent({
14 | name: 'Data agent',
15 | instructions: 'You are a data agent',
16 | tools: [getWeatherTool],
17 | });
18 |
19 | async function main() {
20 | const result = await run(agent, 'What is the weather in Tokyo?');
21 | console.log(result.finalOutput);
22 | }
23 |
24 | main().catch(console.error);
25 |
--------------------------------------------------------------------------------
/examples/docs/readme/readme-handoffs.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import { Agent, run, tool } from '@openai/agents';
3 |
4 | const getWeatherTool = tool({
5 | name: 'get_weather',
6 | description: 'Get the weather for a given city',
7 | parameters: z.object({ city: z.string() }),
8 | execute: async (input) => {
9 | return `The weather in ${input.city} is sunny`;
10 | },
11 | });
12 |
13 | const dataAgent = new Agent({
14 | name: 'Data agent',
15 | instructions: 'You are a data agent',
16 | handoffDescription: 'You know everything about the weather',
17 | tools: [getWeatherTool],
18 | });
19 |
20 | // Use Agent.create method to ensure the finalOutput type considers handoffs
21 | const agent = Agent.create({
22 | name: 'Basic test agent',
23 | instructions: 'You are a basic agent',
24 | handoffs: [dataAgent],
25 | });
26 |
27 | async function main() {
28 | const result = await run(agent, 'What is the weather in San Francisco?');
29 | console.log(result.finalOutput);
30 | }
31 |
32 | main().catch(console.error);
33 |
--------------------------------------------------------------------------------
/examples/docs/readme/readme-hello-world.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | async function main() {
4 | const agent = new Agent({
5 | name: 'Assistant',
6 | instructions: 'You are a helpful assistant',
7 | });
8 | const result = await run(
9 | agent,
10 | 'Write a haiku about recursion in programming.',
11 | );
12 | console.log(result.finalOutput);
13 | // Code within the code,
14 | // Functions calling themselves,
15 | // Infinite loop's dance.
16 | }
17 |
18 | main().catch(console.error);
19 |
--------------------------------------------------------------------------------
/examples/docs/readme/readme-voice-agent.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import { RealtimeAgent, RealtimeSession, tool } from '@openai/agents-realtime';
3 |
4 | const getWeatherTool = tool({
5 | name: 'get_weather',
6 | description: 'Get the weather for a given city',
7 | parameters: z.object({ city: z.string() }),
8 | execute: async (input) => {
9 | return `The weather in ${input.city} is sunny`;
10 | },
11 | });
12 |
13 | const agent = new RealtimeAgent({
14 | name: 'Data agent',
15 | instructions: 'You are a data agent',
16 | tools: [getWeatherTool],
17 | });
18 |
19 | async function main() {
20 | // Intended to be run the browser
21 | const { apiKey } = await fetch('/path/to/ephemerial/key/generation').then(
22 | (resp) => resp.json(),
23 | );
24 | // automatically configures audio input/output so start talking
25 | const session = new RealtimeSession(agent);
26 | await session.connect({ apiKey });
27 | }
28 |
29 | main().catch(console.error);
30 |
--------------------------------------------------------------------------------
/examples/docs/results/handoffFinalOutputTypes.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 | import { z } from 'zod';
3 |
4 | const refundAgent = new Agent({
5 | name: 'Refund Agent',
6 | instructions:
7 | 'You are a refund agent. You are responsible for refunding customers.',
8 | outputType: z.object({
9 | refundApproved: z.boolean(),
10 | }),
11 | });
12 |
13 | const orderAgent = new Agent({
14 | name: 'Order Agent',
15 | instructions:
16 | 'You are an order agent. You are responsible for processing orders.',
17 | outputType: z.object({
18 | orderId: z.string(),
19 | }),
20 | });
21 |
22 | const triageAgent = Agent.create({
23 | name: 'Triage Agent',
24 | instructions:
25 | 'You are a triage agent. You are responsible for triaging customer issues.',
26 | handoffs: [refundAgent, orderAgent],
27 | });
28 |
29 | const result = await run(triageAgent, 'I need to a refund for my order');
30 |
31 | const output = result.finalOutput;
32 | // ^? { refundApproved: boolean } | { orderId: string } | string | undefined
33 |
--------------------------------------------------------------------------------
/examples/docs/results/historyLoop.ts:
--------------------------------------------------------------------------------
1 | import { AgentInputItem, Agent, user, run } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Assistant',
5 | instructions:
6 | 'You are a helpful assistant knowledgeable about recent AGI research.',
7 | });
8 |
9 | let history: AgentInputItem[] = [
10 | // intial message
11 | user('Are we there yet?'),
12 | ];
13 |
14 | for (let i = 0; i < 10; i++) {
15 | // run 10 times
16 | const result = await run(agent, history);
17 |
18 | // update the history to the new output
19 | history = result.history;
20 |
21 | history.push(user('How about now?'));
22 | }
23 |
--------------------------------------------------------------------------------
/examples/docs/running-agents/chatLoop.ts:
--------------------------------------------------------------------------------
1 | import { Agent, AgentInputItem, run } from '@openai/agents';
2 |
3 | let thread: AgentInputItem[] = [];
4 |
5 | const agent = new Agent({
6 | name: 'Assistant',
7 | });
8 |
9 | async function userSays(text: string) {
10 | const result = await run(
11 | agent,
12 | thread.concat({ role: 'user', content: text }),
13 | );
14 |
15 | thread = result.history; // Carry over history + newly generated items
16 | return result.finalOutput;
17 | }
18 |
19 | await userSays('What city is the Golden Gate Bridge in?');
20 | // -> "San Francisco"
21 |
22 | await userSays('What state is it in?');
23 | // -> "California"
24 |
--------------------------------------------------------------------------------
/examples/docs/running-agents/exceptions2.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import { Agent, run, tool, ToolCallError } from '@openai/agents';
3 |
4 | const unstableTool = tool({
5 | name: 'get_weather (unstable)',
6 | description: 'Get the weather for a given city',
7 | parameters: z.object({ city: z.string() }),
8 | errorFunction: (_, error) => {
9 | throw error; // the built-in error handler returns string instead
10 | },
11 | execute: async () => {
12 | throw new Error('Failed to get weather');
13 | },
14 | });
15 |
16 | const stableTool = tool({
17 | name: 'get_weather (stable)',
18 | description: 'Get the weather for a given city',
19 | parameters: z.object({ city: z.string() }),
20 | execute: async (input) => {
21 | return `The weather in ${input.city} is sunny`;
22 | },
23 | });
24 |
25 | const agent = new Agent({
26 | name: 'Data agent',
27 | instructions: 'You are a data agent',
28 | tools: [unstableTool],
29 | });
30 |
31 | async function main() {
32 | try {
33 | const result = await run(agent, 'What is the weather in Tokyo?');
34 | console.log(result.finalOutput);
35 | } catch (e) {
36 | if (e instanceof ToolCallError) {
37 | console.error(`Tool call failed: ${e}`);
38 | // If you want to retry the execution with different settings,
39 | // you can reuse the runner's latest state this way:
40 | if (e.state) {
41 | agent.tools = [stableTool]; // fallback
42 | const result = await run(agent, e.state);
43 | console.log(result.finalOutput);
44 | }
45 | } else {
46 | throw e;
47 | }
48 | }
49 | }
50 |
51 | main().catch(console.error);
52 |
--------------------------------------------------------------------------------
/examples/docs/streaming/basicStreaming.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Storyteller',
5 | instructions:
6 | 'You are a storyteller. You will be given a topic and you will tell a story about it.',
7 | });
8 |
9 | const result = await run(agent, 'Tell me a story about a cat.', {
10 | stream: true,
11 | });
12 |
--------------------------------------------------------------------------------
/examples/docs/streaming/handleAllEvents.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Storyteller',
5 | instructions:
6 | 'You are a storyteller. You will be given a topic and you will tell a story about it.',
7 | });
8 |
9 | const result = await run(agent, 'Tell me a story about a cat.', {
10 | stream: true,
11 | });
12 |
13 | for await (const event of result) {
14 | // these are the raw events from the model
15 | if (event.type === 'raw_model_stream_event') {
16 | console.log(`${event.type} %o`, event.data);
17 | }
18 | // agent updated events
19 | if (event.type == 'agent_updated_stream_event') {
20 | console.log(`${event.type} %s`, event.agent.name);
21 | }
22 | // Agent SDK specific events
23 | if (event.type === 'run_item_stream_event') {
24 | console.log(`${event.type} %o`, event.item);
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/examples/docs/streaming/nodeTextStream.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Storyteller',
5 | instructions:
6 | 'You are a storyteller. You will be given a topic and you will tell a story about it.',
7 | });
8 |
9 | const result = await run(agent, 'Tell me a story about a cat.', {
10 | stream: true,
11 | });
12 |
13 | result
14 | .toTextStream({
15 | compatibleWithNodeStreams: true,
16 | })
17 | .pipe(process.stdout);
18 |
--------------------------------------------------------------------------------
/examples/docs/streaming/streamedHITL.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Storyteller',
5 | instructions:
6 | 'You are a storyteller. You will be given a topic and you will tell a story about it.',
7 | });
8 |
9 | let stream = await run(
10 | agent,
11 | 'What is the weather in San Francisco and Oakland?',
12 | { stream: true },
13 | );
14 | stream.toTextStream({ compatibleWithNodeStreams: true }).pipe(process.stdout);
15 | await stream.completed;
16 |
17 | while (stream.interruptions?.length) {
18 | console.log(
19 | 'Human-in-the-loop: approval required for the following tool calls:',
20 | );
21 | const state = stream.state;
22 | for (const interruption of stream.interruptions) {
23 | const ok = await confirm(
24 | `Agent ${interruption.agent.name} would like to use the tool ${interruption.rawItem.name} with "${interruption.rawItem.arguments}". Do you approve?`,
25 | );
26 | if (ok) {
27 | state.approve(interruption);
28 | } else {
29 | state.reject(interruption);
30 | }
31 | }
32 |
33 | // Resume execution with streaming output
34 | stream = await run(agent, state, { stream: true });
35 | const textStream = stream.toTextStream({ compatibleWithNodeStreams: true });
36 | textStream.pipe(process.stdout);
37 | await stream.completed;
38 | }
39 |
--------------------------------------------------------------------------------
/examples/docs/tools/agentsAsTools.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '@openai/agents';
2 |
3 | const summarizer = new Agent({
4 | name: 'Summarizer',
5 | instructions: 'Generate a concise summary of the supplied text.',
6 | });
7 |
8 | const summarizerTool = summarizer.asTool({
9 | toolName: 'summarize_text',
10 | toolDescription: 'Generate a concise summary of the supplied text.',
11 | });
12 |
13 | const mainAgent = new Agent({
14 | name: 'Research assistant',
15 | tools: [summarizerTool],
16 | });
17 |
--------------------------------------------------------------------------------
/examples/docs/tools/functionTools.ts:
--------------------------------------------------------------------------------
1 | import { tool } from '@openai/agents';
2 | import { z } from 'zod';
3 |
4 | const getWeatherTool = tool({
5 | name: 'get_weather',
6 | description: 'Get the weather for a given city',
7 | parameters: z.object({ city: z.string() }),
8 | async execute({ city }) {
9 | return `The weather in ${city} is sunny.`;
10 | },
11 | });
12 |
--------------------------------------------------------------------------------
/examples/docs/tools/hostedTools.ts:
--------------------------------------------------------------------------------
1 | import { Agent, webSearchTool, fileSearchTool } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Travel assistant',
5 | tools: [webSearchTool(), fileSearchTool('VS_ID')],
6 | });
7 |
--------------------------------------------------------------------------------
/examples/docs/tools/nonStrictSchemaTools.ts:
--------------------------------------------------------------------------------
1 | import { tool } from '@openai/agents';
2 |
3 | interface LooseToolInput {
4 | text: string;
5 | }
6 |
7 | const looseTool = tool({
8 | description: 'Echo input; be forgiving about typos',
9 | strict: false,
10 | parameters: {
11 | type: 'object',
12 | properties: { text: { type: 'string' } },
13 | required: ['text'],
14 | additionalProperties: true,
15 | },
16 | execute: async (input) => {
17 | // because strict is false we need to do our own verification
18 | if (typeof input !== 'object' || input === null || !('text' in input)) {
19 | return 'Invalid input. Please try again';
20 | }
21 | return (input as LooseToolInput).text;
22 | },
23 | });
24 |
--------------------------------------------------------------------------------
/examples/docs/toppage/textAgent.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run } from '@openai/agents';
2 |
3 | const agent = new Agent({
4 | name: 'Assistant',
5 | instructions: 'You are a helpful assistant.',
6 | });
7 |
8 | const result = await run(
9 | agent,
10 | 'Write a haiku about recursion in programming.',
11 | );
12 | console.log(result.finalOutput);
13 |
--------------------------------------------------------------------------------
/examples/docs/toppage/voiceAgent.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Assistant',
5 | instructions: 'You are a helpful assistant.',
6 | });
7 |
8 | // Automatically connects your microphone and audio output in the browser via WebRTC.
9 | const session = new RealtimeSession(agent);
10 | await session.connect({
11 | apiKey: '',
12 | });
13 |
--------------------------------------------------------------------------------
/examples/docs/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json",
3 | "compilerOptions": {
4 | "noUnusedLocals": false
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/agent.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | export const agent = new RealtimeAgent({
4 | name: 'Assistant',
5 | });
6 |
7 | export const session = new RealtimeSession(agent, {
8 | model: 'gpt-4o-realtime-preview-2025-06-03',
9 | });
10 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/audioInterrupted.ts:
--------------------------------------------------------------------------------
1 | import { session } from './agent';
2 |
3 | session.on('audio_interrupted', () => {
4 | // handle local playback interruption
5 | });
6 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/configureSession.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Greeter',
5 | instructions: 'Greet the user with cheer and answer questions.',
6 | });
7 |
8 | const session = new RealtimeSession(agent, {
9 | model: 'gpt-4o-realtime-preview-2025-06-03',
10 | config: {
11 | inputAudioFormat: 'pcm16',
12 | outputAudioFormat: 'pcm16',
13 | inputAudioTranscription: {
14 | model: 'gpt-4o-mini-transcribe',
15 | },
16 | },
17 | });
18 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/createAgent.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Greeter',
5 | instructions: 'Greet the user with cheer and answer questions.',
6 | });
7 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/createSession.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Greeter',
5 | instructions: 'Greet the user with cheer and answer questions.',
6 | });
7 |
8 | async function main() {
9 | // define which agent you want to start your session with
10 | const session = new RealtimeSession(agent, {
11 | model: 'gpt-4o-realtime-preview-2025-06-03',
12 | });
13 | // start your session
14 | await session.connect({ apiKey: '' });
15 | }
16 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/customWebRTCTransport.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession, OpenAIRealtimeWebRTC } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Greeter',
5 | instructions: 'Greet the user with cheer and answer questions.',
6 | });
7 |
8 | async function main() {
9 | const transport = new OpenAIRealtimeWebRTC({
10 | mediaStream: await navigator.mediaDevices.getUserMedia({ audio: true }),
11 | audioElement: document.createElement('audio'),
12 | });
13 |
14 | const customSession = new RealtimeSession(agent, { transport });
15 | }
16 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/defineTool.ts:
--------------------------------------------------------------------------------
1 | import { tool, RealtimeAgent } from '@openai/agents/realtime';
2 | import { z } from 'zod';
3 |
4 | const getWeather = tool({
5 | name: 'get_weather',
6 | description: 'Return the weather for a city.',
7 | parameters: z.object({ city: z.string() }),
8 | async execute({ city }) {
9 | return `The weather in ${city} is sunny.`;
10 | },
11 | });
12 |
13 | const weatherAgent = new RealtimeAgent({
14 | name: 'Weather assistant',
15 | instructions: 'Answer weather questions.',
16 | tools: [getWeather],
17 | });
18 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/delegationAgent.ts:
--------------------------------------------------------------------------------
1 | import {
2 | RealtimeAgent,
3 | RealtimeContextData,
4 | tool,
5 | } from '@openai/agents/realtime';
6 | import { handleRefundRequest } from './serverAgent';
7 | import z from 'zod';
8 |
9 | const refundSupervisorParameters = z.object({
10 | request: z.string(),
11 | });
12 |
13 | const refundSupervisor = tool<
14 | typeof refundSupervisorParameters,
15 | RealtimeContextData
16 | >({
17 | name: 'escalateToRefundSupervisor',
18 | description: 'Escalate a refund request to the refund supervisor',
19 | parameters: refundSupervisorParameters,
20 | execute: async ({ request }, details) => {
21 | // This will execute on the server
22 | return handleRefundRequest(request, details?.context?.history ?? []);
23 | },
24 | });
25 |
26 | const agent = new RealtimeAgent({
27 | name: 'Customer Support',
28 | instructions:
29 | 'You are a customer support agent. If you receive any requests for refunds, you need to delegate to your supervisor.',
30 | tools: [refundSupervisor],
31 | });
32 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/guardrailSettings.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Greeter',
5 | instructions: 'Greet the user with cheer and answer questions.',
6 | });
7 |
8 | const guardedSession = new RealtimeSession(agent, {
9 | outputGuardrails: [
10 | /*...*/
11 | ],
12 | outputGuardrailSettings: {
13 | debounceTextLength: 500, // run guardrail every 500 characters or set it to -1 to run it only at the end
14 | },
15 | });
16 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/guardrails.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeOutputGuardrail, RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Greeter',
5 | instructions: 'Greet the user with cheer and answer questions.',
6 | });
7 |
8 | const guardrails: RealtimeOutputGuardrail[] = [
9 | {
10 | name: 'No mention of Dom',
11 | async execute({ agentOutput }) {
12 | const domInOutput = agentOutput.includes('Dom');
13 | return {
14 | tripwireTriggered: domInOutput,
15 | outputInfo: { domInOutput },
16 | };
17 | },
18 | },
19 | ];
20 |
21 | const guardedSession = new RealtimeSession(agent, {
22 | outputGuardrails: guardrails,
23 | });
24 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/handleAudio.ts:
--------------------------------------------------------------------------------
1 | import {
2 | RealtimeAgent,
3 | RealtimeSession,
4 | TransportLayerAudio,
5 | } from '@openai/agents/realtime';
6 |
7 | const agent = new RealtimeAgent({ name: 'My agent' });
8 | const session = new RealtimeSession(agent);
9 | const newlyRecordedAudio = new ArrayBuffer(0);
10 |
11 | session.on('audio', (event: TransportLayerAudio) => {
12 | // play your audio
13 | });
14 |
15 | // send new audio to the agent
16 | session.sendAudio(newlyRecordedAudio);
17 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/helloWorld.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Assistant',
5 | instructions: 'You are a helpful assistant.',
6 | });
7 |
8 | const session = new RealtimeSession(agent);
9 |
10 | // Automatically connects your microphone and audio output
11 | // in the browser via WebRTC.
12 | await session.connect({
13 | apiKey: '',
14 | });
15 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/historyUpdated.ts:
--------------------------------------------------------------------------------
1 | import { session } from './agent';
2 |
3 | session.on('history_updated', (newHistory) => {
4 | // save the new history
5 | });
6 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/multiAgents.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent } from '@openai/agents/realtime';
2 |
3 | const mathTutorAgent = new RealtimeAgent({
4 | name: 'Math Tutor',
5 | handoffDescription: 'Specialist agent for math questions',
6 | instructions:
7 | 'You provide help with math problems. Explain your reasoning at each step and include examples',
8 | });
9 |
10 | const agent = new RealtimeAgent({
11 | name: 'Greeter',
12 | instructions: 'Greet the user with cheer and answer questions.',
13 | handoffs: [mathTutorAgent],
14 | });
15 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/sendMessage.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeSession, RealtimeAgent } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Assistant',
5 | });
6 |
7 | const session = new RealtimeSession(agent, {
8 | model: 'gpt-4o-realtime-preview-2025-06-03',
9 | });
10 |
11 | session.sendMessage('Hello, how are you?');
12 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/serverAgent.ts:
--------------------------------------------------------------------------------
1 | // This runs on the server
2 | import 'server-only';
3 |
4 | import { Agent, run } from '@openai/agents';
5 | import type { RealtimeItem } from '@openai/agents/realtime';
6 | import z from 'zod';
7 |
8 | const agent = new Agent({
9 | name: 'Refund Expert',
10 | instructions:
11 | 'You are a refund expert. You are given a request to process a refund and you need to determine if the request is valid.',
12 | model: 'o4-mini',
13 | outputType: z.object({
14 | reasong: z.string(),
15 | refundApproved: z.boolean(),
16 | }),
17 | });
18 |
19 | export async function handleRefundRequest(
20 | request: string,
21 | history: RealtimeItem[],
22 | ) {
23 | const input = `
24 | The user has requested a refund.
25 |
26 | The request is: ${request}
27 |
28 | Current conversation history:
29 | ${JSON.stringify(history, null, 2)}
30 | `.trim();
31 |
32 | const result = await run(agent, input);
33 |
34 | return JSON.stringify(result.finalOutput, null, 2);
35 | }
36 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/sessionHistory.ts:
--------------------------------------------------------------------------------
1 | import { session } from './agent';
2 |
3 | console.log(session.history);
4 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/sessionInterrupt.ts:
--------------------------------------------------------------------------------
1 | import { session } from './agent';
2 |
3 | session.interrupt();
4 | // this will still trigger the `audio_interrupted` event for you
5 | // to cut off the audio playback when using WebSockets
6 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/thinClient.ts:
--------------------------------------------------------------------------------
1 | import { OpenAIRealtimeWebRTC } from '@openai/agents/realtime';
2 |
3 | const client = new OpenAIRealtimeWebRTC();
4 | const audioBuffer = new ArrayBuffer(0);
5 |
6 | await client.connect({
7 | apiKey: '',
8 | model: 'gpt-4o-mini-realtime-preview',
9 | initialSessionConfig: {
10 | instructions: 'Speak like a pirate',
11 | voice: 'ash',
12 | modalities: ['text', 'audio'],
13 | inputAudioFormat: 'pcm16',
14 | outputAudioFormat: 'pcm16',
15 | },
16 | });
17 |
18 | // optionally for WebSockets
19 | client.on('audio', (newAudio) => {});
20 |
21 | client.sendAudio(audioBuffer);
22 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/toolApprovalEvent.ts:
--------------------------------------------------------------------------------
1 | import { session } from './agent';
2 |
3 | session.on('tool_approval_requested', (_context, _agent, request) => {
4 | // show a UI to the user to approve or reject the tool call
5 | // you can use the `session.approve(...)` or `session.reject(...)` methods to approve or reject the tool call
6 |
7 | session.approve(request.approvalItem); // or session.reject(request.rawItem);
8 | });
9 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/toolHistory.ts:
--------------------------------------------------------------------------------
1 | import {
2 | tool,
3 | RealtimeContextData,
4 | RealtimeItem,
5 | } from '@openai/agents/realtime';
6 | import { z } from 'zod';
7 |
8 | const parameters = z.object({
9 | request: z.string(),
10 | });
11 |
12 | const refundTool = tool({
13 | name: 'Refund Expert',
14 | description: 'Evaluate a refund',
15 | parameters,
16 | execute: async ({ request }, details) => {
17 | // The history might not be available
18 | const history: RealtimeItem[] = details?.context?.history ?? [];
19 | // making your call to process the refund request
20 | },
21 | });
22 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/transportEvents.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Greeter',
5 | instructions: 'Greet the user with cheer and answer questions.',
6 | });
7 |
8 | const session = new RealtimeSession(agent, {
9 | model: 'gpt-4o-realtime-preview-2025-06-03',
10 | });
11 |
12 | session.transport.on('*', (event) => {
13 | // JSON parsed version of the event received on the connection
14 | });
15 |
16 | // Send any valid event as JSON. For example triggering a new response
17 | session.transport.sendEvent({
18 | type: 'response.create',
19 | // ...
20 | });
21 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/turnDetection.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeSession } from '@openai/agents/realtime';
2 | import { agent } from './agent';
3 |
4 | const session = new RealtimeSession(agent, {
5 | model: 'gpt-4o-realtime-preview-2025-06-03',
6 | config: {
7 | turnDetection: {
8 | type: 'semantic_vad',
9 | eagerness: 'medium',
10 | create_response: true,
11 | interrupt_response: true,
12 | },
13 | },
14 | });
15 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/updateHistory.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeSession, RealtimeAgent } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Assistant',
5 | });
6 |
7 | const session = new RealtimeSession(agent, {
8 | model: 'gpt-4o-realtime-preview-2025-06-03',
9 | });
10 |
11 | await session.connect({ apiKey: '' });
12 |
13 | // listening to the history_updated event
14 | session.on('history_updated', (history) => {
15 | // returns the full history of the session
16 | console.log(history);
17 | });
18 |
19 | // Option 1: explicit setting
20 | session.updateHistory([
21 | /* specific history */
22 | ]);
23 |
24 | // Option 2: override based on current state like removing all agent messages
25 | session.updateHistory((currentHistory) => {
26 | return currentHistory.filter(
27 | (item) => !(item.type === 'message' && item.role === 'assistant'),
28 | );
29 | });
30 |
--------------------------------------------------------------------------------
/examples/docs/voice-agents/websocketSession.ts:
--------------------------------------------------------------------------------
1 | import { RealtimeAgent, RealtimeSession } from '@openai/agents/realtime';
2 |
3 | const agent = new RealtimeAgent({
4 | name: 'Greeter',
5 | instructions: 'Greet the user with cheer and answer questions.',
6 | });
7 |
8 | const myRecordedArrayBuffer = new ArrayBuffer(0);
9 |
10 | const wsSession = new RealtimeSession(agent, {
11 | transport: 'websocket',
12 | model: 'gpt-4o-realtime-preview-2025-06-03',
13 | });
14 | await wsSession.connect({ apiKey: process.env.OPENAI_API_KEY! });
15 |
16 | wsSession.on('audio', (event) => {
17 | // event.data is a chunk of PCM16 audio
18 | });
19 |
20 | wsSession.sendAudio(myRecordedArrayBuffer);
21 |
--------------------------------------------------------------------------------
/examples/financial-research-agent/README.md:
--------------------------------------------------------------------------------
1 | # Financial Research Agent
2 |
3 | This example demonstrates a multi-agent workflow that produces a short financial analysis report.
4 |
5 | The entrypoint in `main.ts` prompts for a query, then traces the run and hands control to `FinancialResearchManager`.
6 |
7 | The manager orchestrates several specialized agents:
8 |
9 | 1. **Planner** – creates a list of search tasks for the query.
10 | 2. **Search** – runs each search in parallel and gathers summaries.
11 | 3. **Writer** – synthesizes the search results, optionally calling fundamentals and risk analyst tools.
12 | 4. **Verifier** – checks the final report for consistency and issues.
13 |
14 | After running these steps the manager prints a short summary, the full markdown report, suggested follow-up questions, and verification results.
15 |
16 | Run the example with:
17 |
18 | ```bash
19 | pnpm examples:financial-research-agent
20 | ```
21 |
--------------------------------------------------------------------------------
/examples/financial-research-agent/main.ts:
--------------------------------------------------------------------------------
1 | import { withTrace } from '@openai/agents';
2 | import { FinancialResearchManager } from './manager';
3 |
4 | // Entrypoint for the financial bot example.
5 | // Run this as `npx tsx examples/financial-research-agent/main.ts` and enter a financial research query, for example:
6 | // "Write up an analysis of Apple Inc.'s most recent quarter."
7 |
8 | async function main() {
9 | const readline = await import('readline');
10 | const rl = readline.createInterface({
11 | input: process.stdin,
12 | output: process.stdout,
13 | });
14 | rl.question('Enter a financial research query: ', async (query: string) => {
15 | rl.close();
16 | await withTrace('Financial research workflow', async () => {
17 | const manager = new FinancialResearchManager();
18 | await manager.run(query);
19 | });
20 | });
21 | }
22 |
23 | if (require.main === module) {
24 | main();
25 | }
26 |
--------------------------------------------------------------------------------
/examples/financial-research-agent/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "financial-research-agent",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "zod": "~3.25.40"
7 | },
8 | "scripts": {
9 | "build-check": "tsc --noEmit",
10 | "start": "tsx main.ts"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/examples/financial-research-agent/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/handoffs/README.md:
--------------------------------------------------------------------------------
1 | # Agent Handoffs
2 |
3 | This example shows how one agent can transfer control to another. The `index.ts` script sets up two English speaking assistants and a Spanish assistant. The second agent is configured with a handoff so that if the user requests Spanish replies it hands off to the Spanish agent. A message filter strips out tool messages and the first two history items before the handoff occurs. Run it with:
4 |
5 | ```bash
6 | pnpm -F handoffs start
7 | ```
8 |
9 | `types.ts` demonstrates typed outputs. A triage agent inspects the message and hands off to either `firstAgent` or `secondAgent`, each with their own Zod schema for structured output. The script logs which agent produced the final result.
10 |
--------------------------------------------------------------------------------
/examples/handoffs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "handoffs",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "@openai/agents-core": "workspace:*",
7 | "zod": "~3.25.40"
8 | },
9 | "scripts": {
10 | "build-check": "tsc --noEmit",
11 | "start": "tsx index.ts",
12 | "start:types": "tsx types.ts"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/examples/handoffs/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/mcp/README.md:
--------------------------------------------------------------------------------
1 | # Model Context Protocol Example
2 |
3 | This example demonstrates how to use the [Model Context Protocol](https://modelcontextprotocol.io/) with the OpenAI Agents SDK.
4 |
5 | `filesystem-example.ts` starts a local MCP server exposing the files inside `sample_files/`. The agent reads those files through the protocol and can answer questions about them. The directory includes:
6 |
7 | - `books.txt` – A list of favorite books.
8 | - `favorite_songs.txt` – A list of favorite songs.
9 |
10 | Run the example from the repository root:
11 |
12 | ```bash
13 | pnpm -F mcp start:stdio
14 | ```
15 |
--------------------------------------------------------------------------------
/examples/mcp/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "mcp",
4 | "dependencies": {
5 | "@modelcontextprotocol/sdk": "^1.12.0",
6 | "@openai/agents": "workspace:*",
7 | "zod": "~3.25.40"
8 | },
9 | "scripts": {
10 | "build-check": "tsc --noEmit",
11 | "start:stdio": "tsx filesystem-example.ts"
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/examples/mcp/sample_files/books.txt:
--------------------------------------------------------------------------------
1 | 1. To Kill a Mockingbird – Harper Lee
2 | 2. Pride and Prejudice – Jane Austen
3 | 3. 1984 – George Orwell
4 | 4. The Hobbit – J.R.R. Tolkien
5 | 5. Harry Potter and the Sorcerer’s Stone – J.K. Rowling
6 | 6. The Great Gatsby – F. Scott Fitzgerald
7 | 7. Charlotte’s Web – E.B. White
8 | 8. Anne of Green Gables – Lucy Maud Montgomery
9 | 9. The Alchemist – Paulo Coelho
10 | 10. Little Women – Louisa May Alcott
11 | 11. The Catcher in the Rye – J.D. Salinger
12 | 12. Animal Farm – George Orwell
13 | 13. The Chronicles of Narnia: The Lion, the Witch, and the Wardrobe – C.S. Lewis
14 | 14. The Book Thief – Markus Zusak
15 | 15. A Wrinkle in Time – Madeleine L’Engle
16 | 16. The Secret Garden – Frances Hodgson Burnett
17 | 17. Moby-Dick – Herman Melville
18 | 18. Fahrenheit 451 – Ray Bradbury
19 | 19. Jane Eyre – Charlotte Brontë
20 | 20. The Little Prince – Antoine de Saint-Exupéry
--------------------------------------------------------------------------------
/examples/mcp/sample_files/favorite_songs.txt:
--------------------------------------------------------------------------------
1 | 1. "Here Comes the Sun" – The Beatles
2 | 2. "Imagine" – John Lennon
3 | 3. "Bohemian Rhapsody" – Queen
4 | 4. "Shake It Off" – Taylor Swift
5 | 5. "Billie Jean" – Michael Jackson
6 | 6. "Uptown Funk" – Mark Ronson ft. Bruno Mars
7 | 7. "Don’t Stop Believin’" – Journey
8 | 8. "Dancing Queen" – ABBA
9 | 9. "Happy" – Pharrell Williams
10 | 10. "Wonderwall" – Oasis
11 |
--------------------------------------------------------------------------------
/examples/mcp/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/model-providers/README.md:
--------------------------------------------------------------------------------
1 | # Model Providers Examples
2 |
3 | This directory contains small scripts showing how to integrate custom model providers. Run them with `pnpm` using the commands shown below.
4 |
5 | - `custom-example-agent.ts` – Pass a model instance directly to an `Agent`.
6 | ```bash
7 | pnpm -F model-providers start:custom-example-agent
8 | ```
9 | - `custom-example-global.ts` – Configure a global model provider. Requires environment variables `EXAMPLE_BASE_URL`, `EXAMPLE_API_KEY`, and `EXAMPLE_MODEL_NAME`.
10 | ```bash
11 | pnpm -F model-providers start:custom-example-global
12 | ```
13 | - `custom-example-provider.ts` – Create a custom `ModelProvider` for a single run (same environment variables as above).
14 | ```bash
15 | pnpm -F model-providers start:custom-example-provider
16 | ```
17 |
--------------------------------------------------------------------------------
/examples/model-providers/custom-example-agent.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod';
2 | import {
3 | Agent,
4 | run,
5 | withTrace,
6 | OpenAIChatCompletionsModel,
7 | tool,
8 | } from '@openai/agents';
9 | import { OpenAI } from 'openai';
10 |
11 | const getWeatherTool = tool({
12 | name: 'get_weather',
13 | description: 'Get the weather for a given city',
14 | parameters: z.object({ city: z.string() }),
15 | execute: async (input) => {
16 | return `The weather in ${input.city} is sunny`;
17 | },
18 | });
19 |
20 | const client = new OpenAI();
21 | const agent = new Agent({
22 | name: 'Assistant',
23 | model: new OpenAIChatCompletionsModel(client, 'gpt-4o'),
24 | instructions: 'You only respond in haikus.',
25 | tools: [getWeatherTool],
26 | });
27 |
28 | async function main() {
29 | await withTrace('ChatCompletions Assistant Example', async () => {
30 | const result = await run(agent, "What's the weather in Tokyo?");
31 | console.log(`\n\nFinal response:\n${result.finalOutput}`);
32 | });
33 | }
34 |
35 | main().catch((error) => {
36 | console.error('Error:', error);
37 | });
38 |
--------------------------------------------------------------------------------
/examples/model-providers/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "model-providers",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "zod": "~3.25.40"
7 | },
8 | "scripts": {
9 | "build-check": "tsc --noEmit",
10 | "start:custom-example-agent": "tsx custom-example-agent.ts",
11 | "start:custom-example-global": "tsx custom-example-global.ts",
12 | "start:custom-example-provider": "tsx custom-example-provider.ts"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/examples/model-providers/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/realtime-demo/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 |
15 | # Editor directories and files
16 | .vscode/*
17 | !.vscode/extensions.json
18 | .idea
19 | .DS_Store
20 | *.suo
21 | *.ntvs*
22 | *.njsproj
23 | *.sln
24 | *.sw?
25 |
--------------------------------------------------------------------------------
/examples/realtime-demo/README.md:
--------------------------------------------------------------------------------
1 | # Realtime Demo
2 |
3 | This example is a small [Vite](https://vitejs.dev/) application showcasing the realtime agent API.
4 |
5 | 1. Install dependencies in the repo root with `pnpm install`.
6 | 2. Generate an ephemeral API key:
7 | ```bash
8 | pnpm -F realtime-demo generate-token
9 | ```
10 | Copy the printed key.
11 | 3. Start the dev server:
12 | ```bash
13 | pnpm examples:realtime-demo
14 | ```
15 | 4. Open the printed localhost URL and paste the key when prompted.
16 |
17 | Use `pnpm -F realtime-demo build` to create a production build.
18 |
--------------------------------------------------------------------------------
/examples/realtime-demo/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "realtime-demo",
3 | "private": true,
4 | "type": "module",
5 | "scripts": {
6 | "dev": "vite",
7 | "build-check": "tsc --noEmit",
8 | "build": "tsc && vite build",
9 | "preview": "vite preview",
10 | "generate-token": "tsx token.ts"
11 | },
12 | "devDependencies": {
13 | "@openai/agents-realtime": "workspace:*",
14 | "typescript": "~5.8.3",
15 | "vite": "^6.3.5"
16 | },
17 | "dependencies": {
18 | "@tailwindcss/vite": "^4.1.7",
19 | "openai": "^4.91.0",
20 | "tailwindcss": "^4.1.7"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/examples/realtime-demo/public/vite.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/realtime-demo/src/style.css:
--------------------------------------------------------------------------------
1 | @import "tailwindcss";
2 |
--------------------------------------------------------------------------------
/examples/realtime-demo/token.ts:
--------------------------------------------------------------------------------
1 | import OpenAI from 'openai';
2 |
3 | async function generateToken() {
4 | const openai = new OpenAI({
5 | apiKey: process.env.OPENAI_API_KEY,
6 | });
7 |
8 | const session = await openai.beta.realtime.sessions.create({
9 | model: 'gpt-4o-realtime-preview',
10 | });
11 |
12 | console.log(session.client_secret.value);
13 | }
14 |
15 | generateToken().catch((err) => {
16 | console.error('Failed to create ephemeral token', err);
17 | process.exit(1);
18 | });
19 |
--------------------------------------------------------------------------------
/examples/realtime-demo/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2020",
4 | "useDefineForClassFields": true,
5 | "module": "ESNext",
6 | "lib": ["ES2020", "DOM", "DOM.Iterable"],
7 | "skipLibCheck": true,
8 |
9 | /* Bundler mode */
10 | "moduleResolution": "bundler",
11 | "allowImportingTsExtensions": true,
12 | "verbatimModuleSyntax": true,
13 | "moduleDetection": "force",
14 | "noEmit": true,
15 |
16 | /* Linting */
17 | "strict": true,
18 | "noUnusedLocals": true,
19 | "noUnusedParameters": true,
20 | "erasableSyntaxOnly": true,
21 | "noFallthroughCasesInSwitch": true,
22 | "noUncheckedSideEffectImports": true
23 | },
24 | "include": ["src"]
25 | }
26 |
--------------------------------------------------------------------------------
/examples/realtime-demo/vite-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
--------------------------------------------------------------------------------
/examples/realtime-demo/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite';
2 | import tailwindcss from '@tailwindcss/vite';
3 | export default defineConfig({
4 | plugins: [tailwindcss()],
5 | });
6 |
--------------------------------------------------------------------------------
/examples/realtime-next/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.*
7 | .yarn/*
8 | !.yarn/patches
9 | !.yarn/plugins
10 | !.yarn/releases
11 | !.yarn/versions
12 |
13 | # testing
14 | /coverage
15 |
16 | # next.js
17 | /.next/
18 | /out/
19 |
20 | # production
21 | /build
22 |
23 | # misc
24 | .DS_Store
25 | *.pem
26 |
27 | # debug
28 | npm-debug.log*
29 | yarn-debug.log*
30 | yarn-error.log*
31 | .pnpm-debug.log*
32 |
33 | # env files (can opt-in for committing if needed)
34 | .env*
35 |
36 | # vercel
37 | .vercel
38 |
39 | # typescript
40 | *.tsbuildinfo
41 | next-env.d.ts
42 |
--------------------------------------------------------------------------------
/examples/realtime-next/README.md:
--------------------------------------------------------------------------------
1 | # Realtime Next.js Demo
2 |
3 | This example shows how to combine Next.js with the OpenAI Agents SDK to create a realtime voice agent.
4 |
5 | ## Run the example
6 |
7 | Set the `OPENAI_API_KEY` environment variable and run:
8 |
9 | ```bash
10 | pnpm examples:realtime-next
11 | ```
12 |
13 | Open [http://localhost:3000](http://localhost:3000) in your browser and start talking.
14 |
15 | ## Endpoints
16 |
17 | - **`/`** – WebRTC voice demo using the `RealtimeSession` class. Code in `src/app/page.tsx`.
18 | - **`/websocket`** – Same agent over WebSockets. Code in `src/app/websocket/page.tsx`.
19 | - **`/raw-client`** – Low-level WebRTC example using `OpenAIRealtimeWebRTC`. Code in `src/app/raw-client/page.tsx`.
20 |
--------------------------------------------------------------------------------
/examples/realtime-next/next.config.ts:
--------------------------------------------------------------------------------
1 | import type { NextConfig } from "next";
2 |
3 | const nextConfig: NextConfig = {
4 | /* config options here */
5 | };
6 |
7 | export default nextConfig;
8 |
--------------------------------------------------------------------------------
/examples/realtime-next/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "realtime-next",
3 | "private": true,
4 | "scripts": {
5 | "dev": "next dev --turbopack",
6 | "build": "next build",
7 | "start": "next start",
8 | "lint": "next lint",
9 | "build-check": "tsc --noEmit"
10 | },
11 | "dependencies": {
12 | "@openai/agents": "workspace:*",
13 | "@radix-ui/react-slot": "^1.2.3",
14 | "class-variance-authority": "^0.7.1",
15 | "clsx": "^2.1.1",
16 | "next": "15.3.2",
17 | "react": "^19.0.0",
18 | "react-dom": "^19.0.0",
19 | "tailwind-merge": "^3.3.0",
20 | "wavtools": "^0.1.5",
21 | "zod": "~3.25.40"
22 | },
23 | "devDependencies": {
24 | "@tailwindcss/postcss": "^4",
25 | "@types/node": "^20",
26 | "@types/react": "^19",
27 | "@types/react-dom": "^19",
28 | "tailwindcss": "^4",
29 | "typescript": "^5"
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/examples/realtime-next/postcss.config.mjs:
--------------------------------------------------------------------------------
1 | const config = {
2 | plugins: ["@tailwindcss/postcss"],
3 | };
4 |
5 | export default config;
6 |
--------------------------------------------------------------------------------
/examples/realtime-next/public/file.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/realtime-next/public/globe.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/realtime-next/public/next.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/realtime-next/public/vercel.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/realtime-next/public/window.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/realtime-next/src/app/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openai/openai-agents-js/4a0fe867e4acdc0bc7ce7cbba5d3015bc9d18ec9/examples/realtime-next/src/app/favicon.ico
--------------------------------------------------------------------------------
/examples/realtime-next/src/app/globals.css:
--------------------------------------------------------------------------------
1 | @import "tailwindcss";
2 |
3 | :root {
4 | --background: #ffffff;
5 | --foreground: #171717;
6 | }
7 |
8 | @theme inline {
9 | --color-background: var(--background);
10 | --color-foreground: var(--foreground);
11 | --font-sans: var(--font-geist-sans);
12 | --font-mono: var(--font-geist-mono);
13 | }
14 |
15 | @media (prefers-color-scheme: dark) {
16 | :root {
17 | --background: #0a0a0a;
18 | --foreground: #ededed;
19 | }
20 | }
21 |
22 | body {
23 | background: var(--background);
24 | color: var(--foreground);
25 | font-family: Arial, Helvetica, sans-serif;
26 | }
27 |
--------------------------------------------------------------------------------
/examples/realtime-next/src/app/layout.tsx:
--------------------------------------------------------------------------------
1 | import type { Metadata } from 'next';
2 | import './globals.css';
3 |
4 | export const metadata: Metadata = {
5 | title: 'Realtime Agent Next.js Demo',
6 | description: 'A demo of the Realtime Agent framework in Next.js',
7 | };
8 |
9 | export default function RootLayout({
10 | children,
11 | }: Readonly<{
12 | children: React.ReactNode;
13 | }>) {
14 | return (
15 |
16 | {children}
17 |
18 | );
19 | }
20 |
--------------------------------------------------------------------------------
/examples/realtime-next/src/app/server/backendAgent.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import { Agent, Runner, user } from '@openai/agents';
4 | import { type RealtimeItem } from '@openai/agents/realtime';
5 | import { z } from 'zod';
6 |
7 | const backendAgent = new Agent({
8 | name: 'Refund Agent',
9 | instructions:
10 | 'You are a specialist on handling refund requests and detect fraud. You are given a request and you need to determine if the request is valid and if it is, you need to handle it.',
11 | model: 'o4-mini',
12 | outputType: z.object({
13 | refundApproved: z.boolean(),
14 | refundReason: z.string(),
15 | fraud: z.boolean(),
16 | }),
17 | });
18 |
19 | const runner = new Runner();
20 |
21 | export async function handleRefundRequest(
22 | request: string,
23 | history: RealtimeItem[] = [],
24 | ) {
25 | const input = [
26 | user(
27 | `
28 | Request: ${request}
29 |
30 | ## Past Conversation History
31 | ${JSON.stringify(history, null, 2)}
32 | `.trim(),
33 | ),
34 | ];
35 | const result = await runner.run(backendAgent, input);
36 | console.log(result.output);
37 | return JSON.stringify(result.finalOutput);
38 | }
39 |
--------------------------------------------------------------------------------
/examples/realtime-next/src/app/server/token.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import OpenAI from 'openai';
4 |
5 | export async function getToken() {
6 | const openai = new OpenAI({
7 | apiKey: process.env.OPENAI_API_KEY,
8 | });
9 |
10 | const session = await openai.beta.realtime.sessions.create({
11 | model: 'gpt-4o-realtime-preview',
12 | // tracing: {
13 | // workflow_name: 'Realtime Next Demo',
14 | // },
15 | });
16 |
17 | return session.client_secret.value;
18 | }
19 |
--------------------------------------------------------------------------------
/examples/realtime-next/src/components/icons/ClockIcon.tsx:
--------------------------------------------------------------------------------
1 | import * as React from 'react';
2 |
3 | const ClockIcon = (props: React.SVGProps) => (
4 |
18 | );
19 |
20 | export default ClockIcon;
21 |
--------------------------------------------------------------------------------
/examples/realtime-next/src/components/messages/TextMessage.tsx:
--------------------------------------------------------------------------------
1 | import clsx from 'clsx';
2 | import React from 'react';
3 |
4 | type CustomLinkProps = {
5 | href?: string;
6 | children?: React.ReactNode;
7 | };
8 |
9 | const CustomLink = ({ href, children, ...props }: CustomLinkProps) => (
10 |
15 | {children}
16 |
17 | );
18 |
19 | type TextMessageProps = {
20 | text: string;
21 | isUser: boolean;
22 | };
23 |
24 | export function TextMessage({ text, isUser }: TextMessageProps) {
25 | return (
26 |
31 |
37 | {text}
38 |
39 |
40 | );
41 | }
42 |
--------------------------------------------------------------------------------
/examples/realtime-next/src/components/ui/utils.ts:
--------------------------------------------------------------------------------
1 | import { clsx, type ClassValue } from 'clsx';
2 | import { twMerge } from 'tailwind-merge';
3 |
4 | export function cn(...inputs: ClassValue[]) {
5 | return twMerge(clsx(inputs));
6 | }
7 |
--------------------------------------------------------------------------------
/examples/realtime-next/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2017",
4 | "lib": ["dom", "dom.iterable", "esnext"],
5 | "allowJs": true,
6 | "skipLibCheck": true,
7 | "strict": true,
8 | "noEmit": true,
9 | "esModuleInterop": true,
10 | "module": "esnext",
11 | "moduleResolution": "bundler",
12 | "resolveJsonModule": true,
13 | "isolatedModules": true,
14 | "jsx": "preserve",
15 | "incremental": true,
16 | "plugins": [
17 | {
18 | "name": "next"
19 | }
20 | ],
21 | "paths": {
22 | "@/*": ["./src/*"]
23 | }
24 | },
25 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
26 | "exclude": ["node_modules"]
27 | }
28 |
--------------------------------------------------------------------------------
/examples/realtime-next/vercel.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://openapi.vercel.sh/vercel.json",
3 | "framework": "nextjs"
4 | }
5 |
--------------------------------------------------------------------------------
/examples/realtime-twilio/README.md:
--------------------------------------------------------------------------------
1 | # Realtime Twilio Integration
2 |
3 | This example demonstrates how to connect the OpenAI Realtime API to a phone call using Twilio's Media Streams.
4 | The script in `index.ts` starts a Fastify server that serves TwiML for incoming calls and creates a WebSocket
5 | endpoint for streaming audio. When a call connects, the audio stream is forwarded through a
6 | `TwilioRealtimeTransportLayer` to a `RealtimeSession` so the `RealtimeAgent` can respond in real time.
7 |
8 | To try it out you must have a Twilio phone number.
9 | Expose your localhost with a tunneling service such as ngrok and set the phone number's incoming call URL to `https:///incoming-call`.
10 |
11 | Start the server with:
12 |
13 | ```bash
14 | pnpm -F realtime-twilio start
15 | ```
16 |
17 |
--------------------------------------------------------------------------------
/examples/realtime-twilio/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "realtime-twilio",
4 | "dependencies": {
5 | "@fastify/formbody": "^8.0.2",
6 | "@fastify/websocket": "^11.1.0",
7 | "@openai/agents": "workspace:*",
8 | "@openai/agents-extensions": "workspace:*",
9 | "dotenv": "^16.5.0",
10 | "fastify": "^5.3.3",
11 | "ws": "^8.18.1",
12 | "zod": "~3.25.40"
13 | },
14 | "scripts": {
15 | "build-check": "tsc --noEmit",
16 | "start": "tsx index.ts"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/examples/realtime-twilio/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/research-bot/README.md:
--------------------------------------------------------------------------------
1 | # Research Bot
2 |
3 | This example shows how to orchestrate several agents to produce a detailed research report.
4 |
5 | ## Files
6 |
7 | - **main.ts** – CLI entrypoint that asks for a query and runs the workflow using `ResearchManager`.
8 | - **manager.ts** – Coordinates the planning, web searching and report writing stages.
9 | - **agents.ts** – Contains the agents: a planner that suggests search terms, a search agent that summarizes results and a writer that generates the final report.
10 |
11 | ## Usage
12 |
13 | From the repository root run:
14 |
15 | ```bash
16 | pnpm examples:research-bot
17 | ```
18 |
--------------------------------------------------------------------------------
/examples/research-bot/main.ts:
--------------------------------------------------------------------------------
1 | import { ResearchManager } from './manager';
2 |
3 | async function main() {
4 | const readline = await import('readline');
5 | const rl = readline.createInterface({
6 | input: process.stdin,
7 | output: process.stdout,
8 | });
9 | rl.question('What would you like to research? ', async (query: string) => {
10 | rl.close();
11 | const manager = new ResearchManager();
12 | await manager.run(query);
13 | });
14 | }
15 |
16 | if (require.main === module) {
17 | main();
18 | }
19 |
--------------------------------------------------------------------------------
/examples/research-bot/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "research-bot",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "zod": "~3.25.40"
7 | },
8 | "scripts": {
9 | "build-check": "tsc --noEmit",
10 | "start": "tsx main.ts"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/examples/research-bot/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/tools/README.md:
--------------------------------------------------------------------------------
1 | # Tool Integrations
2 |
3 | These examples demonstrate the hosted tools provided by the Agents SDK.
4 |
5 | ## Examples
6 |
7 | - `computer-use.ts` – Uses the computer tool with Playwright to automate a local browser.
8 |
9 | ```bash
10 | pnpm examples:tools-computer-use
11 | ```
12 |
13 | - `file-search.ts` – Shows how to run a vector search with `fileSearchTool`.
14 |
15 | ```bash
16 | pnpm examples:tools-file-search
17 | ```
18 |
19 | - `web-search.ts` – Demonstrates `webSearchTool` for general web queries.
20 |
21 | ```bash
22 | pnpm examples:tools-web-search
23 | ```
24 |
25 | - `code-interpreter.ts` – Demonstrates `codeInterpreterTool` for code execution.
26 |
27 | ```bash
28 | pnpm examples:tools-code-interpreter
29 | ```
30 |
31 | - `image-generation.ts` – Demonstrates `imageGenerationTool` for image generation.
32 |
33 | ```bash
34 | pnpm examples:tools-image-generation
35 | ```
--------------------------------------------------------------------------------
/examples/tools/code-interpreter.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run, codeInterpreterTool, withTrace } from '@openai/agents';
2 | import OpenAI from 'openai';
3 |
4 | async function main() {
5 | const agent = new Agent({
6 | name: 'Agent Math Tutor',
7 | instructions:
8 | 'You are a personal math tutor. When asked a math question, write and run code to answer the question.',
9 | tools: [codeInterpreterTool({ container: { type: 'auto' } })],
10 | });
11 |
12 | await withTrace('Code interpreter example', async () => {
13 | console.log('Solving math problem...');
14 | const result = await run(
15 | agent,
16 | 'I need to solve the equation 3x + 11 = 14. Can you help me?',
17 | { stream: true },
18 | );
19 | for await (const event of result) {
20 | if (
21 | event.type === 'raw_model_stream_event' &&
22 | event.data.type === 'model'
23 | ) {
24 | const modelEvent = event.data.event as
25 | | OpenAI.Responses.ResponseStreamEvent
26 | | undefined;
27 | if (
28 | modelEvent &&
29 | modelEvent.type === 'response.output_item.done' &&
30 | modelEvent.item.type === 'code_interpreter_call'
31 | ) {
32 | const code = modelEvent.item.code;
33 | console.log(`Code interpreter code:\n\`\`\`\n${code}\n\`\`\``);
34 | }
35 | }
36 | }
37 | console.log(`Final output: ${result.finalOutput}`);
38 | });
39 | }
40 |
41 | main().catch(console.error);
42 |
--------------------------------------------------------------------------------
/examples/tools/file-search.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run, fileSearchTool, withTrace } from '@openai/agents';
2 |
3 | async function main() {
4 | const agent = new Agent({
5 | name: 'File searcher',
6 | instructions: 'You are a helpful agent.',
7 | tools: [
8 | fileSearchTool(['vs_67bf88953f748191be42b462090e53e7'], {
9 | maxNumResults: 3,
10 | includeSearchResults: true,
11 | }),
12 | ],
13 | });
14 |
15 | await withTrace('File search example', async () => {
16 | const result = await run(
17 | agent,
18 | 'Be concise, and tell me 1 sentence about Arrakis I might not know.',
19 | );
20 | console.log(result.finalOutput);
21 | /*
22 | Arrakis, the desert planet in Frank Herbert's "Dune," was inspired by the scarcity of water
23 | as a metaphor for oil and other finite resources.
24 | */
25 |
26 | console.log(
27 | '\n' +
28 | result.newItems.map((out: unknown) => JSON.stringify(out)).join('\n'),
29 | );
30 | /*
31 | {"id":"...", "queries":["Arrakis"], "results":[...]}
32 | */
33 | });
34 | }
35 |
36 | main().catch(console.error);
37 |
--------------------------------------------------------------------------------
/examples/tools/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "name": "tools",
4 | "dependencies": {
5 | "@openai/agents": "workspace:*",
6 | "playwright": "^1.52.0",
7 | "zod": "~3.25.40"
8 | },
9 | "scripts": {
10 | "build-check": "tsc --noEmit",
11 | "start:computer-use": "tsx computer-use.ts",
12 | "start:file-search": "tsx file-search.ts",
13 | "start:web-search": "tsx web-search.ts",
14 | "start:code-interpreter": "tsx code-interpreter.ts",
15 | "start:image-generation": "tsx image-generation.ts"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/examples/tools/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.examples.json"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/tools/web-search.ts:
--------------------------------------------------------------------------------
1 | import { Agent, run, webSearchTool, withTrace } from '@openai/agents';
2 |
3 | async function main() {
4 | const agent = new Agent({
5 | name: 'Web searcher',
6 | instructions: 'You are a helpful agent.',
7 | tools: [
8 | webSearchTool({
9 | userLocation: { type: 'approximate', city: 'New York' },
10 | }),
11 | ],
12 | });
13 |
14 | await withTrace('Web search example', async () => {
15 | const result = await run(
16 | agent,
17 | "search the web for 'local sports news' and give me 1 interesting update in a sentence.",
18 | );
19 | console.log(result.finalOutput);
20 | // The New York Giants are reportedly pursuing quarterback Aaron Rodgers after his ...
21 | });
22 | }
23 |
24 | main().catch(console.error);
25 |
--------------------------------------------------------------------------------
/helpers/tests/setup.ts:
--------------------------------------------------------------------------------
1 | import { setTracingDisabled } from '../../packages/agents-core/src';
2 |
3 | export function setup() {
4 | setTracingDisabled(true);
5 | }
6 |
--------------------------------------------------------------------------------
/integration-tests/README.md:
--------------------------------------------------------------------------------
1 | # Integration tests
2 |
3 | This project hosts packages to test the different environments that the Agents SDK works in.
4 |
5 | It is intentionally not part of the `pnpm` workspace and instead installs the packages from a
6 | local package registry using verdaccio.
7 |
8 | ## How to run integration tests
9 |
10 | 1. **Requirements:**
11 |
12 | - Have Node.js, Bun, and Deno installed globally
13 | - Have an `OPENAI_API_KEY` environment variable configured
14 | - Add into `interation-tests/cloudflare-workers/worker` a file `.dev.vars` with `OPENAI_API_KEY=`
15 |
16 | 2. **Local npm registry**
17 |
18 | We will publish packages in a local registry to emulate a real environment.
19 |
20 | Run in one process `pnpm run local-npm:start` and keep it running until you are done with your test.
21 |
22 | **Hint:** The first time you might have to run `npm adduser --registry http://localhost:4873/` (you can use any fake data)
23 |
24 | 3. **Publish your packages to run the tests**
25 |
26 | In order to test the packages first build them (`pnpm build`) and then run `pnpm local-npm:publish`.
27 |
28 | 4. **Run your tests**
29 |
30 | You can now run your integration tests:
31 |
32 | ```bash
33 | pnpm test:integration
34 | ```
35 |
--------------------------------------------------------------------------------
/integration-tests/_helpers/setup.ts:
--------------------------------------------------------------------------------
1 | export async function setup() {
2 | try {
3 | const response = await fetch('http://localhost:4873/');
4 | if (response.status === 200) {
5 | console.log('Local npm registry already running');
6 | return () => {};
7 | }
8 | } catch (err) {
9 | throw new Error('Local npm registry not running');
10 | }
11 |
12 | return () => {
13 | // console.log('Shutting down local npm registry');
14 | // verdaccioServer.kill();
15 | };
16 | }
17 |
--------------------------------------------------------------------------------
/integration-tests/bun.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect, beforeAll, afterAll } from 'vitest';
2 | import { execa as execaBase } from 'execa';
3 |
4 | const execa = execaBase({ cwd: './integration-tests/bun' });
5 |
6 | describe('Bun', () => {
7 | beforeAll(async () => {
8 | // remove lock file to avoid errors
9 | await execa`rm -f bun.lock`;
10 | console.log('[bun] Removing node_modules');
11 | await execa`rm -rf node_modules`;
12 | console.log('[bun] Installing dependencies');
13 | await execa`bun install`;
14 | }, 60000);
15 |
16 | test('should be able to run', async () => {
17 | const { stdout } = await execa`bun run index.ts`;
18 | expect(stdout).toContain('[RESPONSE]Hello there![/RESPONSE]');
19 | });
20 |
21 | afterAll(async () => {
22 | await execa`rm -f bun.lock`;
23 | });
24 | });
25 |
--------------------------------------------------------------------------------
/integration-tests/bun/.gitignore:
--------------------------------------------------------------------------------
1 | # dependencies (bun install)
2 | node_modules
3 |
4 | # output
5 | out
6 | dist
7 | *.tgz
8 |
9 | # code coverage
10 | coverage
11 | *.lcov
12 |
13 | # logs
14 | logs
15 | _.log
16 | report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
17 |
18 | # dotenv environment variable files
19 | .env
20 | .env.development.local
21 | .env.test.local
22 | .env.production.local
23 | .env.local
24 |
25 | # caches
26 | .eslintcache
27 | .cache
28 | *.tsbuildinfo
29 |
30 | # IntelliJ based IDEs
31 | .idea
32 |
33 | # Finder (MacOS) folder config
34 | .DS_Store
35 |
--------------------------------------------------------------------------------
/integration-tests/bun/.npmrc:
--------------------------------------------------------------------------------
1 | @openai:registry=http://localhost:4873
2 | package-lock=false
--------------------------------------------------------------------------------
/integration-tests/bun/index.ts:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import {
4 | Agent,
5 | run,
6 | setTraceProcessors,
7 | ConsoleSpanExporter,
8 | BatchTraceProcessor,
9 | } from '@openai/agents';
10 |
11 | setTraceProcessors([new BatchTraceProcessor(new ConsoleSpanExporter())]);
12 |
13 | const agent = new Agent({
14 | name: 'Test Agent',
15 | instructions:
16 | 'You will always only respond with "Hello there!". Not more not less.',
17 | });
18 |
19 | const result = await run(agent, 'Hey there!');
20 | console.log(`[RESPONSE]${result.finalOutput}[/RESPONSE]`);
21 |
--------------------------------------------------------------------------------
/integration-tests/bun/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "bun",
3 | "module": "index.ts",
4 | "type": "module",
5 | "private": true,
6 | "scripts": {
7 | "start": "bun run index.ts"
8 | },
9 | "devDependencies": {
10 | "@types/bun": "latest"
11 | },
12 | "peerDependencies": {
13 | "typescript": "^5"
14 | },
15 | "dependencies": {
16 | "@openai/agents": "^0.0.1-next.0"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/integration-tests/bun/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | // Environment setup & latest features
4 | "lib": ["esnext"],
5 | "target": "ESNext",
6 | "module": "ESNext",
7 | "moduleDetection": "force",
8 | "jsx": "react-jsx",
9 | "allowJs": true,
10 |
11 | // Bundler mode
12 | "moduleResolution": "bundler",
13 | "allowImportingTsExtensions": true,
14 | "verbatimModuleSyntax": true,
15 | "noEmit": true,
16 |
17 | // Best practices
18 | "strict": true,
19 | "skipLibCheck": true,
20 | "noFallthroughCasesInSwitch": true,
21 | "noUncheckedIndexedAccess": true,
22 |
23 | // Some stricter flags (disabled by default)
24 | "noUnusedLocals": false,
25 | "noUnusedParameters": false,
26 | "noPropertyAccessFromIndexSignature": false
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/integration-tests/cloudflare-workers/worker/.npmrc:
--------------------------------------------------------------------------------
1 | @openai:registry=http://localhost:4873
2 | package-lock=false
--------------------------------------------------------------------------------
/integration-tests/cloudflare-workers/worker/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "files.associations": {
3 | "wrangler.json": "jsonc"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/integration-tests/cloudflare-workers/worker/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "worker",
3 | "version": "0.0.0",
4 | "private": true,
5 | "scripts": {
6 | "deploy": "wrangler deploy",
7 | "dev": "wrangler dev",
8 | "start": "wrangler dev",
9 | "test": "vitest",
10 | "cf-typegen": "wrangler types",
11 | "build": "wrangler deploy --dry-run --outdir dist"
12 | },
13 | "dependencies": {
14 | "@openai/agents": "^0.0.1-next.0"
15 | },
16 | "devDependencies": {
17 | "typescript": "^5.5.2",
18 | "wrangler": "^4.18.0"
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/integration-tests/cloudflare-workers/worker/src/index.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Welcome to Cloudflare Workers! This is your first worker.
3 | *
4 | * - Run `npm run dev` in your terminal to start a development server
5 | * - Open a browser tab at http://localhost:8787/ to see your worker in action
6 | * - Run `npm run deploy` to publish your worker
7 | *
8 | * Bind resources to your worker in `wrangler.jsonc`. After adding bindings, a type definition for the
9 | * `Env` object can be regenerated with `npm run cf-typegen`.
10 | *
11 | * Learn more at https://developers.cloudflare.com/workers/
12 | */
13 |
14 | export interface Env {
15 | OPENAI_API_KEY: string;
16 | }
17 |
18 | export default {
19 | async fetch(request, env, ctx): Promise {
20 | try {
21 | const { Agent, run, setDefaultOpenAIKey, setTracingDisabled } = await import('@openai/agents');
22 |
23 | setDefaultOpenAIKey(env.OPENAI_API_KEY!);
24 | setTracingDisabled(true);
25 |
26 | const agent = new Agent({
27 | name: 'Test Agent',
28 | instructions: 'You will always only respond with "Hello there!". Not more not less.',
29 | });
30 | const result = await run(agent, 'Hey there!');
31 | return new Response(`[RESPONSE]${result.finalOutput}[/RESPONSE]`);
32 | } catch (error) {
33 | console.error(error);
34 | return new Response(String(error), { status: 500 });
35 | }
36 | },
37 | } satisfies ExportedHandler;
38 |
--------------------------------------------------------------------------------
/integration-tests/cloudflare.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect, beforeAll, afterAll } from 'vitest';
2 | import { execa as execaBase, ResultPromise } from 'execa';
3 |
4 | const execa = execaBase({
5 | cwd: './integration-tests/cloudflare-workers/worker',
6 | });
7 |
8 | const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
9 |
10 | let server: ResultPromise;
11 |
12 | describe('Cloudflare Workers', () => {
13 | beforeAll(async () => {
14 | // Remove lock file to avoid errors
15 | await execa`rm -f package-lock.json`;
16 | console.log('[cloudflare] Removing node_modules');
17 | await execa`rm -rf node_modules`;
18 | console.log('[cloudflare] Installing dependencies');
19 | await execa`npm install`;
20 | console.log('[cloudflare] Starting server');
21 | server = execa`npm run start`;
22 | await new Promise((resolve) => {
23 | server.stdout?.on('data', (data) => {
24 | if (data.toString().includes('Ready')) {
25 | resolve(true);
26 | }
27 | });
28 | });
29 | }, 60000);
30 |
31 | test(
32 | 'should be able to run',
33 | async () => {
34 | const response = await fetch('http://localhost:8787/');
35 | const text = await response.text();
36 | expect(text).toContain('[RESPONSE]Hello there![/RESPONSE]');
37 | },
38 | {
39 | timeout: 60000,
40 | },
41 | );
42 |
43 | afterAll(async () => {
44 | if (server) {
45 | server.kill;
46 | }
47 | });
48 | });
49 |
--------------------------------------------------------------------------------
/integration-tests/deno.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect, beforeAll, afterAll } from 'vitest';
2 | import { execa as execaBase } from 'execa';
3 |
4 | const execa = execaBase({ cwd: './integration-tests/deno' });
5 |
6 | describe('Deno', () => {
7 | beforeAll(async () => {
8 | // Remove lock file to avoid errors
9 | await execa`rm -f deno.lock`;
10 | console.log('[deno] Removing node_modules');
11 | await execa`rm -rf node_modules`;
12 | console.log('[deno] Installing dependencies');
13 | await execa`deno install`;
14 | }, 60000);
15 |
16 | test('should be able to run', async () => {
17 | const { stdout } = await execa`deno --allow-net --allow-env main.ts`;
18 | expect(stdout).toContain('[RESPONSE]Hello there![/RESPONSE]');
19 | });
20 |
21 | afterAll(async () => {
22 | await execa`rm -f deno.lock`;
23 | });
24 | });
25 |
--------------------------------------------------------------------------------
/integration-tests/deno/.npmrc:
--------------------------------------------------------------------------------
1 | @openai:registry=http://localhost:4873
2 | package-lock=false
--------------------------------------------------------------------------------
/integration-tests/deno/main.ts:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import {
4 | Agent,
5 | run,
6 | setTraceProcessors,
7 | ConsoleSpanExporter,
8 | BatchTraceProcessor,
9 | } from '@openai/agents';
10 |
11 | setTraceProcessors([new BatchTraceProcessor(new ConsoleSpanExporter())]);
12 |
13 | const agent = new Agent({
14 | name: 'Test Agent',
15 | instructions:
16 | 'You will always only respond with "Hello there!". Not more not less.',
17 | });
18 |
19 | const result = await run(agent, 'Hey there!');
20 | console.log(`[RESPONSE]${result.finalOutput}[/RESPONSE]`);
21 |
--------------------------------------------------------------------------------
/integration-tests/deno/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "deno",
3 | "private": true,
4 | "scripts": {
5 | "start": "deno --allow-net --allow-env main.ts"
6 | },
7 | "dependencies": {
8 | "@openai/agents": "^0.0.1-next.0"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/integration-tests/node.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect, beforeAll } from 'vitest';
2 | import { execa as execaBase } from 'execa';
3 |
4 | const execa = execaBase({ cwd: './integration-tests/node' });
5 |
6 | describe('Node.js', () => {
7 | beforeAll(async () => {
8 | // remove lock file to avoid errors
9 | console.log('[node] Removing node_modules');
10 | await execa`rm -rf node_modules`;
11 | console.log('[node] Installing dependencies');
12 | await execa`npm install`;
13 | }, 60000);
14 |
15 | test('should be able to run using CommonJS', async () => {
16 | const { stdout } = await execa`npm run start:cjs`;
17 | expect(stdout).toContain('[RESPONSE]Hello there![/RESPONSE]');
18 | });
19 |
20 | test('should be able to run using ESM', async () => {
21 | const { stdout } = await execa`npm run start:esm`;
22 | expect(stdout).toContain('[RESPONSE]Hello there![/RESPONSE]');
23 | });
24 | });
25 |
--------------------------------------------------------------------------------
/integration-tests/node/.npmrc:
--------------------------------------------------------------------------------
1 | @openai:registry=http://localhost:4873
2 | package-lock=false
--------------------------------------------------------------------------------
/integration-tests/node/index.cjs:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | const {
4 | Agent,
5 | run,
6 | setTraceProcessors,
7 | ConsoleSpanExporter,
8 | BatchTraceProcessor,
9 | } = require('@openai/agents');
10 | const { assert } = require('node:console');
11 |
12 | setTraceProcessors([new BatchTraceProcessor(new ConsoleSpanExporter())]);
13 |
14 | const agent = new Agent({
15 | name: 'Test Agent',
16 | instructions:
17 | 'You will always only respond with "Hello there!". Not more not less.',
18 | });
19 |
20 | async function main() {
21 | const result = await run(agent, 'Hey there!');
22 | console.log(`[RESPONSE]${result.finalOutput}[/RESPONSE]`);
23 | }
24 |
25 | main().catch(console.error);
26 |
--------------------------------------------------------------------------------
/integration-tests/node/index.mjs:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import {
4 | Agent,
5 | run,
6 | setTraceProcessors,
7 | ConsoleSpanExporter,
8 | BatchTraceProcessor,
9 | } from '@openai/agents';
10 |
11 | setTraceProcessors([new BatchTraceProcessor(new ConsoleSpanExporter())]);
12 |
13 | const agent = new Agent({
14 | name: 'Test Agent',
15 | instructions:
16 | 'You will always only respond with "Hello there!". Not more not less.',
17 | });
18 |
19 | const result = await run(agent, 'Hey there!');
20 | console.log(`[RESPONSE]${result.finalOutput}[/RESPONSE]`);
21 |
--------------------------------------------------------------------------------
/integration-tests/node/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "scripts": {
4 | "start:cjs": "node index.cjs",
5 | "start:esm": "node index.mjs"
6 | },
7 | "dependencies": {
8 | "@openai/agents": "latest"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/packages/agents-core/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # @openai/agents-core
2 |
3 | ## 0.0.4
4 |
5 | ### Patch Changes
6 |
7 | - 25165df: fix: Process hangs on SIGINT because `process.exit` is never called
8 | - 6683db0: fix(shims): Naively polyfill AsyncLocalStorage in browser
9 | - 78811c6: fix(shims): Bind crypto to randomUUID
10 | - 426ad73: ensure getTransferMessage returns valid JSON
11 |
12 | ## 0.0.3
13 |
14 | ### Patch Changes
15 |
16 | - d7fd8dc: Export CURRENT_SCHEMA_VERSION constant and use it when serializing run state.
17 | - 284d0ab: Update internal module in agents-core to accept a custom logger
18 |
19 | ## 0.0.2
20 |
21 | ### Patch Changes
22 |
23 | - a2979b6: fix: ensure process.on exists and is a function before adding event handlers
24 |
25 | ## 0.0.1
26 |
27 | ### Patch Changes
28 |
29 | - aaa6d08: Initial release
30 |
31 | ## 0.0.1-next.0
32 |
33 | ### Patch Changes
34 |
35 | - Initial release
36 |
--------------------------------------------------------------------------------
/packages/agents-core/README.md:
--------------------------------------------------------------------------------
1 | # OpenAI Agents SDK
2 |
3 | The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows.
4 |
5 | ## Installation
6 |
7 | ```bash
8 | npm install @openai/agents
9 | ```
10 |
11 | ## License
12 |
13 | MIT
14 |
--------------------------------------------------------------------------------
/packages/agents-core/src/computer.ts:
--------------------------------------------------------------------------------
1 | export type Environment = 'mac' | 'windows' | 'ubuntu' | 'browser';
2 | export type Button = 'left' | 'right' | 'wheel' | 'back' | 'forward';
3 |
4 | import { Expand, SnakeToCamelCase } from './types/helpers';
5 | import type { ComputerAction } from './types/protocol';
6 |
7 | type Promisable = T | Promise;
8 |
9 | /**
10 | * Interface to implement for a computer environment to be used by the agent.
11 | */
12 | interface ComputerBase {
13 | environment: Environment;
14 | dimensions: [number, number];
15 |
16 | screenshot(): Promisable;
17 | click(x: number, y: number, button: Button): Promisable;
18 | doubleClick(x: number, y: number): Promisable;
19 | scroll(
20 | x: number,
21 | y: number,
22 | scrollX: number,
23 | scrollY: number,
24 | ): Promisable;
25 | type(text: string): Promisable;
26 | wait(): Promisable;
27 | move(x: number, y: number): Promisable;
28 | keypress(keys: string[]): Promisable;
29 | drag(path: [number, number][]): Promisable;
30 | }
31 |
32 | // This turns every snake_case string in the ComputerAction['type'] into a camelCase string
33 | type ActionNames = SnakeToCamelCase;
34 |
35 | /**
36 | * Interface representing a fully implemented computer environment.
37 | * Combines the base operations with a constraint that no extra
38 | * action names beyond those in `ComputerAction` are present.
39 | */
40 | export type Computer = Expand<
41 | ComputerBase & Record, never>
42 | >;
43 |
--------------------------------------------------------------------------------
/packages/agents-core/src/config.ts:
--------------------------------------------------------------------------------
1 | // Use function instead of exporting the value to prevent
2 | // circular dependency resolution issues caused by other exports in '@openai/agents-core/_shims'
3 | import { loadEnv as _loadEnv } from '@openai/agents-core/_shims';
4 |
5 | /**
6 | * Loads environment variables from the process environment.
7 | *
8 | * @returns An object containing the environment variables.
9 | */
10 | export function loadEnv(): Record {
11 | return _loadEnv();
12 | }
13 |
14 | /**
15 | * Checks if a flag is enabled in the environment.
16 | *
17 | * @param flagName - The name of the flag to check.
18 | * @returns `true` if the flag is enabled, `false` otherwise.
19 | */
20 | function isEnabled(flagName: string): boolean {
21 | const env = loadEnv();
22 | return (
23 | typeof env !== 'undefined' &&
24 | (env[flagName] === 'true' || env[flagName] === '1')
25 | );
26 | }
27 |
28 | /**
29 | * Global configuration for tracing.
30 | */
31 | export const tracing = {
32 | get disabled() {
33 | return isEnabled('OPENAI_AGENTS_DISABLE_TRACING');
34 | },
35 | };
36 |
37 | /**
38 | * Global configuration for logging.
39 | */
40 | export const logging = {
41 | get dontLogModelData() {
42 | return isEnabled('OPENAI_AGENTS_DONT_LOG_MODEL_DATA');
43 | },
44 | get dontLogToolData() {
45 | return isEnabled('OPENAI_AGENTS_DONT_LOG_TOOL_DATA');
46 | },
47 | };
48 |
--------------------------------------------------------------------------------
/packages/agents-core/src/extensions/handoffPrompt.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * A recommended prompt prefix for agents that use handoffs. We recommend including this or
3 | * similar instructions in any agents that use handoffs.
4 | */
5 | export const RECOMMENDED_PROMPT_PREFIX = `# System context
6 | You are part of a multi-agent system called the Agents SDK, designed to make agent coordination and execution easy. Agents uses two primary abstractions: **Agents** and **Handoffs**. An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate. Handoffs are achieved by calling a handoff function, generally named \`transfer_to_\`. Transfers between agents are handled seamlessly in the background; do not mention or draw attention to these transfers in your conversation with the user.`;
7 |
8 | /**
9 | * Add recommended instructions to the prompt for agents that use handoffs.
10 | *
11 | * @param prompt - The original prompt string.
12 | * @returns The prompt prefixed with recommended handoff instructions.
13 | */
14 | export function promptWithHandoffInstructions(prompt: string): string {
15 | return `${RECOMMENDED_PROMPT_PREFIX}\n\n${prompt}`;
16 | }
17 |
--------------------------------------------------------------------------------
/packages/agents-core/src/extensions/index.ts:
--------------------------------------------------------------------------------
1 | export { RECOMMENDED_PROMPT_PREFIX, promptWithHandoffInstructions } from './handoffPrompt';
2 | export { removeAllTools } from './handoffFilters';
--------------------------------------------------------------------------------
/packages/agents-core/src/metadata.ts:
--------------------------------------------------------------------------------
1 |
2 | // This file is automatically generated
3 |
4 | export const METADATA = {
5 | "name": "@openai/agents-core",
6 | "version": "0.0.1-next.0",
7 | "versions": {
8 | "@openai/agents-core": "0.0.1-next.0",
9 | "@openai/zod": "npm:zod@^3.25.40",
10 | "openai": "^5.0.1"
11 | }
12 | };
13 |
14 | export default METADATA;
15 |
--------------------------------------------------------------------------------
/packages/agents-core/src/providers.ts:
--------------------------------------------------------------------------------
1 | import { ModelProvider } from './model';
2 |
3 | let DEFAULT_PROVIDER: ModelProvider | undefined;
4 |
5 | /**
6 | * Set the model provider used when no explicit provider is supplied.
7 | *
8 | * @param provider - The provider to use by default.
9 | */
10 | export function setDefaultModelProvider(provider: ModelProvider) {
11 | DEFAULT_PROVIDER = provider;
12 | }
13 |
14 | /**
15 | * Returns the default model provider.
16 | *
17 | * @returns The default model provider.
18 | */
19 | export function getDefaultModelProvider(): ModelProvider {
20 | if (typeof DEFAULT_PROVIDER === 'undefined') {
21 | throw new Error(
22 | 'No default model provider set. Make sure to set a provider using setDefaultModelProvider before calling getDefaultModelProvider or pass an explicit provider.',
23 | );
24 | }
25 | return DEFAULT_PROVIDER;
26 | }
27 |
--------------------------------------------------------------------------------
/packages/agents-core/src/shims/interface.ts:
--------------------------------------------------------------------------------
1 | export type EventEmitterEvents = Record;
2 |
3 | export interface EventEmitter<
4 | EventTypes extends EventEmitterEvents = Record,
5 | > {
6 | on(
7 | type: K,
8 | listener: (...args: EventTypes[K]) => void,
9 | ): EventEmitter;
10 | off(
11 | type: K,
12 | listener: (...args: EventTypes[K]) => void,
13 | ): EventEmitter;
14 | emit(type: K, ...args: EventTypes[K]): boolean;
15 | once(
16 | type: K,
17 | listener: (...args: EventTypes[K]) => void,
18 | ): EventEmitter;
19 | }
20 |
21 | interface ReadableStreamAsyncIterator
22 | extends AsyncIterator {
23 | [Symbol.asyncIterator](): ReadableStreamAsyncIterator;
24 | }
25 | export interface ReadableStream {
26 | values(options?: { preventCancel?: boolean }): ReadableStreamAsyncIterator;
27 | [Symbol.asyncIterator](): ReadableStreamAsyncIterator;
28 | }
29 |
30 | export interface Timeout {
31 | ref(): this;
32 | unref(): this;
33 | hasRef(): boolean;
34 | refresh(): this;
35 | }
36 |
37 | export interface Timer {
38 | setTimeout(callback: (...args: any[]) => any, ms: number): Timeout;
39 | clearTimeout(timeoutId: Timeout | string | number | undefined): void;
40 | }
41 |
--------------------------------------------------------------------------------
/packages/agents-core/src/shims/mcp-stdio/browser.ts:
--------------------------------------------------------------------------------
1 | import {
2 | BaseMCPServerStdio,
3 | CallToolResultContent,
4 | MCPServerStdioOptions,
5 | MCPTool,
6 | } from '../../mcp';
7 |
8 | export class MCPServerStdio extends BaseMCPServerStdio {
9 | constructor(params: MCPServerStdioOptions) {
10 | super(params);
11 | }
12 | get name(): string {
13 | return 'MCPServerStdio';
14 | }
15 | connect(): Promise {
16 | throw new Error('Method not implemented.');
17 | }
18 | close(): Promise {
19 | throw new Error('Method not implemented.');
20 | }
21 | listTools(): Promise {
22 | throw new Error('Method not implemented.');
23 | }
24 | callTool(
25 | _toolName: string,
26 | _args: Record | null,
27 | ): Promise {
28 | throw new Error('Method not implemented.');
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/packages/agents-core/src/shims/shims.ts:
--------------------------------------------------------------------------------
1 | export * from './shims-node';
2 |
--------------------------------------------------------------------------------
/packages/agents-core/src/tracing/index.ts:
--------------------------------------------------------------------------------
1 | import { TracingProcessor } from './processor';
2 | import { getGlobalTraceProvider } from './provider';
3 |
4 | export {
5 | getCurrentSpan,
6 | getCurrentTrace,
7 | getOrCreateTrace,
8 | resetCurrentSpan,
9 | setCurrentSpan,
10 | withTrace,
11 | } from './context';
12 | export * from './createSpans';
13 | export {
14 | BatchTraceProcessor,
15 | TracingExporter,
16 | TracingProcessor,
17 | ConsoleSpanExporter,
18 | } from './processor';
19 | export { NoopSpan, Span } from './spans';
20 | export { NoopTrace, Trace } from './traces';
21 | export { generateGroupId, generateSpanId, generateTraceId } from './utils';
22 |
23 | /**
24 | * Add a processor to the list of processors. Each processor will receive all traces/spans.
25 | *
26 | * @param processor - The processor to add.
27 | */
28 | export function addTraceProcessor(processor: TracingProcessor): void {
29 | getGlobalTraceProvider().registerProcessor(processor);
30 | }
31 |
32 | /**
33 | * Set the list of processors. This will replace any existing processors.
34 | *
35 | * @param processors - The list of processors to set.
36 | */
37 | export function setTraceProcessors(processors: TracingProcessor[]): void {
38 | getGlobalTraceProvider().setProcessors(processors);
39 | }
40 |
41 | /**
42 | * Set the disabled state of the tracing provider.
43 | *
44 | * @param disabled - Whether to disable tracing.
45 | */
46 | export function setTracingDisabled(disabled: boolean): void {
47 | getGlobalTraceProvider().setDisabled(disabled);
48 | }
49 |
--------------------------------------------------------------------------------
/packages/agents-core/src/types/aliases.ts:
--------------------------------------------------------------------------------
1 | import {
2 | UserMessageItem,
3 | AssistantMessageItem,
4 | SystemMessageItem,
5 | HostedToolCallItem,
6 | FunctionCallItem,
7 | ComputerUseCallItem,
8 | FunctionCallResultItem,
9 | ComputerCallResultItem,
10 | ReasoningItem,
11 | UnknownItem,
12 | } from './protocol';
13 |
14 | /**
15 | * Context that is being passed around as part of the session is unknown
16 | */
17 | export type UnknownContext = unknown;
18 |
19 | /**
20 | * Agent is expected to output text
21 | */
22 | export type TextOutput = 'text';
23 |
24 | /**
25 | * Agent output items
26 | */
27 | export type AgentOutputItem =
28 | | UserMessageItem
29 | | AssistantMessageItem
30 | | SystemMessageItem
31 | | HostedToolCallItem
32 | | FunctionCallItem
33 | | ComputerUseCallItem
34 | | FunctionCallResultItem
35 | | ComputerCallResultItem
36 | | ReasoningItem
37 | | UnknownItem;
38 |
39 | /**
40 | * Agent input
41 | */
42 | export type AgentInputItem =
43 | | UserMessageItem
44 | | AssistantMessageItem
45 | | SystemMessageItem
46 | | HostedToolCallItem
47 | | FunctionCallItem
48 | | ComputerUseCallItem
49 | | FunctionCallResultItem
50 | | ComputerCallResultItem
51 | | ReasoningItem
52 | | UnknownItem;
53 |
--------------------------------------------------------------------------------
/packages/agents-core/src/types/index.ts:
--------------------------------------------------------------------------------
1 | export * from './protocol';
2 | export * from './helpers';
3 | export * from '../model';
4 | export * from './aliases';
5 |
--------------------------------------------------------------------------------
/packages/agents-core/src/utils/index.ts:
--------------------------------------------------------------------------------
1 | export { isZodObject } from './typeGuards';
2 | export { toSmartString } from './smartString';
3 | export { EventEmitterDelegate } from '../lifecycle';
4 |
--------------------------------------------------------------------------------
/packages/agents-core/src/utils/messages.ts:
--------------------------------------------------------------------------------
1 | import { ResponseOutputItem } from '../types';
2 | import { ModelResponse } from '../model';
3 |
4 | /**
5 | * Get the last text from the output message.
6 | * @param outputMessage
7 | * @returns
8 | */
9 | export function getLastTextFromOutputMessage(
10 | outputMessage: ResponseOutputItem,
11 | ): string | undefined {
12 | if (outputMessage.type !== 'message') {
13 | return undefined;
14 | }
15 |
16 | if (outputMessage.role !== 'assistant') {
17 | return undefined;
18 | }
19 |
20 | const lastItem = outputMessage.content[outputMessage.content.length - 1];
21 | if (lastItem.type !== 'output_text') {
22 | return undefined;
23 | }
24 |
25 | return lastItem.text;
26 | }
27 |
28 | /**
29 | * Get the last text from the output message.
30 | * @param output
31 | * @returns
32 | */
33 | export function getOutputText(output: ModelResponse) {
34 | if (output.output.length === 0) {
35 | return '';
36 | }
37 |
38 | return (
39 | getLastTextFromOutputMessage(output.output[output.output.length - 1]) || ''
40 | );
41 | }
42 |
--------------------------------------------------------------------------------
/packages/agents-core/src/utils/safeExecute.ts:
--------------------------------------------------------------------------------
1 | export type SafeExecuteResult = [Error | unknown | null, T | null];
2 |
3 | export async function safeExecute(
4 | fn: () => T
5 | ): Promise> {
6 | try {
7 | return [null, await fn()];
8 | } catch (error) {
9 | return [error, null];
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/packages/agents-core/src/utils/serialize.ts:
--------------------------------------------------------------------------------
1 | import { JsonObjectSchema } from '../types';
2 | import { Handoff } from '../handoff';
3 | import { Tool } from '../tool';
4 | import { SerializedHandoff, SerializedTool } from '../model';
5 |
6 | export function serializeTool(tool: Tool): SerializedTool {
7 | if (tool.type === 'function') {
8 | return {
9 | type: 'function',
10 | name: tool.name,
11 | description: tool.description,
12 | parameters: tool.parameters as JsonObjectSchema,
13 | strict: tool.strict,
14 | };
15 | }
16 | if (tool.type === 'computer') {
17 | return {
18 | type: 'computer',
19 | name: tool.name,
20 | environment: tool.computer.environment,
21 | dimensions: tool.computer.dimensions,
22 | };
23 | }
24 | return {
25 | type: 'hosted_tool',
26 | name: tool.name,
27 | providerData: tool.providerData,
28 | };
29 | }
30 |
31 | export function serializeHandoff(h: Handoff): SerializedHandoff {
32 | return {
33 | toolName: h.toolName,
34 | toolDescription: h.toolDescription,
35 | inputJsonSchema: h.inputJsonSchema as JsonObjectSchema,
36 | strictJsonSchema: h.strictJsonSchema,
37 | };
38 | }
39 |
--------------------------------------------------------------------------------
/packages/agents-core/src/utils/smartString.ts:
--------------------------------------------------------------------------------
1 | export function toSmartString(value: unknown): string {
2 | if (value === null || value === undefined) {
3 | return String(value);
4 | } else if (typeof value === 'string') {
5 | return value;
6 | } else if (typeof value === 'object') {
7 | try {
8 | return JSON.stringify(value);
9 | } catch (_e) {
10 | return '[object with circular references]';
11 | }
12 | }
13 | return String(value);
14 | }
15 |
--------------------------------------------------------------------------------
/packages/agents-core/src/utils/typeGuards.ts:
--------------------------------------------------------------------------------
1 | import type { ZodObject } from 'zod/v3';
2 |
3 | /**
4 | * Verifies that an input is a ZodObject without needing to have Zod at runtime since it's an
5 | * optional dependency.
6 | * @param input
7 | * @returns
8 | */
9 |
10 | export function isZodObject(input: unknown): input is ZodObject {
11 | return (
12 | typeof input === 'object' &&
13 | input !== null &&
14 | '_def' in input &&
15 | typeof input._def === 'object' &&
16 | input._def !== null &&
17 | 'typeName' in input._def &&
18 | input._def.typeName === 'ZodObject'
19 | );
20 | }
21 | /**
22 | * Verifies that an input is an object with an `input` property.
23 | * @param input
24 | * @returns
25 | */
26 |
27 | export function isAgentToolInput(input: unknown): input is {
28 | input: string;
29 | } {
30 | return (
31 | typeof input === 'object' &&
32 | input !== null &&
33 | 'input' in input &&
34 | typeof (input as any).input === 'string'
35 | );
36 | }
37 |
--------------------------------------------------------------------------------
/packages/agents-core/test/extensions/handoffFilters.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import { removeAllTools } from '../../src/extensions';
3 |
4 | describe('removeAllTools', () => {
5 | test('should be available', () => {
6 | const result = removeAllTools({
7 | inputHistory: [],
8 | preHandoffItems: [],
9 | newItems: [],
10 | });
11 | expect(result).toEqual({
12 | inputHistory: [],
13 | preHandoffItems: [],
14 | newItems: [],
15 | });
16 | });
17 | });
18 |
--------------------------------------------------------------------------------
/packages/agents-core/test/extensions/handoffPrompt.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import { RECOMMENDED_PROMPT_PREFIX, promptWithHandoffInstructions } from '../../src/extensions';
3 |
4 | describe('RECOMMENDED_PROMPT_PREFIX', () => {
5 | test('should be available', () => {
6 | expect(RECOMMENDED_PROMPT_PREFIX).toBeDefined();
7 | });
8 | });
9 |
10 | describe('promptWithHandoffInstructions', () => {
11 | test('should be available', () => {
12 | expect(promptWithHandoffInstructions('foo')).toEqual(`${RECOMMENDED_PROMPT_PREFIX}\n\nfoo`);
13 | });
14 | });
15 |
--------------------------------------------------------------------------------
/packages/agents-core/test/helpers/message.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { user, system, assistant } from '../../src/helpers/message';
3 | import { UserContent } from '../../src/types';
4 |
5 | describe('message helpers', () => {
6 | it('user() converts string to message', () => {
7 | const msg = user('hi');
8 | expect(msg).toEqual({
9 | type: 'message',
10 | role: 'user',
11 | content: [{ type: 'input_text', text: 'hi' }],
12 | providerData: undefined,
13 | });
14 | });
15 |
16 | it('user() keeps array content and provider data', () => {
17 | const content: UserContent[] = [{ type: 'input_text', text: 'a', providerData: { foo: 'b' } }];
18 | const msg = user(content, { extra: true });
19 | expect(msg.content).toBe(content);
20 | expect(msg.providerData).toEqual({ extra: true });
21 | });
22 |
23 | it('system() returns system message', () => {
24 | const msg = system('rules', { id: 1 });
25 | expect(msg).toEqual({
26 | type: 'message',
27 | role: 'system',
28 | content: 'rules',
29 | providerData: { id: 1 },
30 | });
31 | });
32 |
33 | it('assistant() converts text to assistant message', () => {
34 | const msg = assistant('output');
35 | expect(msg.role).toBe('assistant');
36 | expect(msg.status).toBe('completed');
37 | expect(msg.content).toEqual([{ type: 'output_text', text: 'output' }]);
38 | });
39 | });
40 |
--------------------------------------------------------------------------------
/packages/agents-core/test/index.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 |
3 | import { Agent } from '../src/index';
4 |
5 | describe('index.ts', () => {
6 | test('has expected exports', () => {
7 | const agent = new Agent({
8 | name: 'TestAgent',
9 | outputType: 'text',
10 | });
11 | expect(agent).toBeDefined();
12 | expect(agent.name).toEqual('TestAgent');
13 | });
14 | });
15 |
--------------------------------------------------------------------------------
/packages/agents-core/test/mcp.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import { MCPServerStdio } from '../src';
3 |
4 | describe('MCPServerStdio', () => {
5 | test('should be available', () => {
6 | const server = new MCPServerStdio({
7 | name: 'test',
8 | fullCommand: 'test',
9 | cacheToolsList: true,
10 | });
11 | expect(server).toBeDefined();
12 | expect(server.name).toBe('test');
13 | expect(server.cacheToolsList).toBe(true);
14 | });
15 | });
16 |
--------------------------------------------------------------------------------
/packages/agents-core/test/metadata.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import METADATA from '../src/metadata';
3 |
4 | describe('metadata', () => {
5 | test('is not unintentionally broken', () => {
6 | expect(METADATA.name).toBe('@openai/agents-core');
7 | expect(METADATA.version).toBeDefined();
8 | });
9 | });
10 |
--------------------------------------------------------------------------------
/packages/agents-core/test/model.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 |
3 | import { SerializedTool } from '../src/model';
4 |
5 | describe('model.ts', () => {
6 | test('has expected exports', () => {
7 | const tool: SerializedTool = {
8 | type: 'function',
9 | name: 'test',
10 | description: 'test',
11 | parameters: {
12 | type: 'object',
13 | properties: {
14 | foo: { type: 'string' },
15 | bar: { type: 'number' },
16 | },
17 | required: ['foo'],
18 | additionalProperties: false,
19 | },
20 | strict: false,
21 | };
22 | expect(tool).toBeDefined();
23 | expect(tool.type).toEqual('function');
24 | });
25 | });
26 |
--------------------------------------------------------------------------------
/packages/agents-core/test/providers.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import { getDefaultModelProvider } from '../src/providers';
3 |
4 | describe('providers', () => {
5 | test('getDefaultModelProvider', () => {
6 | expect(() => getDefaultModelProvider()).toThrow();
7 | });
8 | });
9 |
--------------------------------------------------------------------------------
/packages/agents-core/test/run.utils.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { getTurnInput } from '../src/run';
3 | import { RunMessageOutputItem as MessageOutputItem } from '../src/items';
4 | import { Agent } from '../src/agent';
5 | import { TEST_MODEL_MESSAGE } from './stubs';
6 |
7 | describe('getTurnInput', () => {
8 | it('combines original string input with generated items', () => {
9 | const agent = new Agent({ name: 'A' });
10 | const item = new MessageOutputItem(TEST_MODEL_MESSAGE, agent);
11 | const result = getTurnInput('hello', [item]);
12 | expect(result[0]).toMatchObject({ role: 'user', type: 'message' });
13 | expect(result[1]).toEqual(TEST_MODEL_MESSAGE);
14 | });
15 | });
16 |
--------------------------------------------------------------------------------
/packages/agents-core/test/runContext.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { RunContext } from '../src/runContext';
3 | import { RunToolApprovalItem as ToolApprovalItem } from '../src/items';
4 | import { Agent } from '../src/agent';
5 |
6 | const agent = new Agent({ name: 'A' });
7 | const rawItem = {
8 | type: 'function_call',
9 | name: 'toolX',
10 | callId: '123',
11 | status: 'completed',
12 | arguments: '{}',
13 | };
14 |
15 | function createApproval() {
16 | return new ToolApprovalItem(rawItem as any, agent);
17 | }
18 |
19 | describe('RunContext', () => {
20 | it('approves and rejects tool calls', () => {
21 | const ctx = new RunContext();
22 | const item = createApproval();
23 | ctx.approveTool(item, { alwaysApprove: true });
24 | expect(ctx.isToolApproved({ toolName: 'toolX', callId: '123' })).toBe(true);
25 |
26 | ctx.rejectTool(item, { alwaysReject: true });
27 | expect(ctx.isToolApproved({ toolName: 'toolX', callId: '123' })).toBe(
28 | false,
29 | );
30 | });
31 |
32 | it('rejects all subsequent calls when alwaysReject is true', () => {
33 | const ctx = new RunContext();
34 | const item = createApproval();
35 | ctx.rejectTool(item, { alwaysReject: true });
36 | expect(ctx.isToolApproved({ toolName: 'toolX', callId: '456' })).toBe(
37 | false,
38 | );
39 | });
40 |
41 | it('rebuilds approvals map', () => {
42 | const ctx = new RunContext();
43 | ctx._rebuildApprovals({ other: { approved: true, rejected: [] } });
44 | expect(ctx.isToolApproved({ toolName: 'other', callId: '1' })).toBe(true);
45 | });
46 | });
47 |
--------------------------------------------------------------------------------
/packages/agents-core/test/shims/mcp-stdio/browser.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import { MCPServerStdio } from '../../../src/shims/mcp-stdio/browser';
3 |
4 | describe('MCPServerStdio', () => {
5 | test('should be available', async () => {
6 | const server = new MCPServerStdio({
7 | name: 'test',
8 | fullCommand: 'test',
9 | cacheToolsList: true,
10 | });
11 | expect(server).toBeDefined();
12 | await expect(() => server.connect()).toThrow();
13 | });
14 | });
15 |
--------------------------------------------------------------------------------
/packages/agents-core/test/usage.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 |
3 | import { Usage } from '../src/usage';
4 |
5 | describe('Usage', () => {
6 | it('initialises with default values', () => {
7 | const usage = new Usage();
8 |
9 | expect(usage.requests).toBe(0);
10 | expect(usage.inputTokens).toBe(0);
11 | expect(usage.outputTokens).toBe(0);
12 | expect(usage.totalTokens).toBe(0);
13 | });
14 |
15 | it('can be constructed from a ResponseUsage‑like object', () => {
16 | const usage = new Usage({
17 | requests: 3,
18 | inputTokens: 10,
19 | outputTokens: 5,
20 | totalTokens: 15,
21 | });
22 |
23 | expect(usage.requests).toBe(3);
24 | expect(usage.inputTokens).toBe(10);
25 | expect(usage.outputTokens).toBe(5);
26 | expect(usage.totalTokens).toBe(15);
27 | });
28 |
29 | it('adds other Usage instances correctly', () => {
30 | const usageA = new Usage({
31 | inputTokens: 1,
32 | outputTokens: 1,
33 | totalTokens: 2,
34 | });
35 | const usageB = new Usage({
36 | requests: 2,
37 | inputTokens: 3,
38 | outputTokens: 4,
39 | totalTokens: 7,
40 | });
41 |
42 | usageA.add(usageB);
43 |
44 | expect(usageA.requests).toBe(3); // 1 (default) + 2
45 | expect(usageA.inputTokens).toBe(4); // 1 + 3
46 | expect(usageA.outputTokens).toBe(5); // 1 + 4
47 | expect(usageA.totalTokens).toBe(9); // 2 + 7
48 | });
49 | });
50 |
--------------------------------------------------------------------------------
/packages/agents-core/test/utils/index.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { toSmartString } from '../../src/utils/index';
3 |
4 | describe('utils/index', () => {
5 | it('toSmartString', () => {
6 | expect(toSmartString('foo')).toBe('foo');
7 | });
8 | });
9 |
--------------------------------------------------------------------------------
/packages/agents-core/test/utils/safeExecute.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { safeExecute } from '../../src/utils/safeExecute';
3 |
4 | describe('safeExecute', () => {
5 | it('returns value when function succeeds', async () => {
6 | const [err, value] = await safeExecute(() => 'ok');
7 | expect(err).toBeNull();
8 | expect(value).toBe('ok');
9 | });
10 |
11 | it('returns error when function throws', async () => {
12 | const [err, value] = await safeExecute(() => {
13 | throw new Error('fail');
14 | });
15 | expect(value).toBeNull();
16 | expect(err).toBeInstanceOf(Error);
17 | });
18 | });
19 |
--------------------------------------------------------------------------------
/packages/agents-core/test/utils/smartString.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import { toSmartString } from '../../src/utils/smartString';
3 |
4 | describe('toSmartString()', () => {
5 | test('should convert null to string', () => {
6 | expect(toSmartString(null)).toBe('null');
7 | });
8 | test('should convert undefined to string', () => {
9 | expect(toSmartString(undefined)).toBe('undefined');
10 | });
11 | test('should convert string to string', () => {
12 | expect(toSmartString('test')).toBe('test');
13 | });
14 |
15 | test('should convert number to string', () => {
16 | expect(toSmartString(123)).toBe('123');
17 | });
18 |
19 | test('should convert boolean to string', () => {
20 | expect(toSmartString(true)).toBe('true');
21 | });
22 |
23 | test('should convert an array to string', () => {
24 | expect(toSmartString([1, 2, 3])).toBe('[1,2,3]');
25 | });
26 |
27 | test('should convert object to string', () => {
28 | expect(toSmartString({ foo: 'bar' })).toBe(JSON.stringify({ foo: 'bar' }));
29 | });
30 | });
31 |
--------------------------------------------------------------------------------
/packages/agents-core/test/utils/typeGuards.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { isZodObject, isAgentToolInput } from '../../src/utils/typeGuards';
3 | import { z } from 'zod/v3';
4 |
5 | describe('type guards', () => {
6 | it('isZodObject detects zod objects', () => {
7 | expect(isZodObject(z.object({}))).toBe(true);
8 | expect(isZodObject({})).toBe(false);
9 | });
10 |
11 | it('isAgentToolInput checks for string input property', () => {
12 | expect(isAgentToolInput({ input: 'x' })).toBe(true);
13 | expect(isAgentToolInput({ input: 42 })).toBe(false);
14 | expect(isAgentToolInput({ input: {} })).toBe(false);
15 | expect(isAgentToolInput({ other: 1 })).toBe(false);
16 | expect(isAgentToolInput(null)).toBe(false);
17 | });
18 | });
19 |
--------------------------------------------------------------------------------
/packages/agents-core/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist",
5 | "rootDir": "./src",
6 | "paths": {
7 | "@openai/agents-core": ["./src/index.ts"],
8 | "@openai/agents-core/_shims": ["./src/shims/shims-node.ts"]
9 | }
10 | },
11 | "exclude": ["dist/**", "test/**"]
12 | }
13 |
--------------------------------------------------------------------------------
/packages/agents-core/tsconfig.test.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist",
5 | "paths": {
6 | "@openai/agents-core": ["./src/index.ts"],
7 | "@openai/agents-core/_shims": ["./src/shims/shims-node.ts"]
8 | }
9 | },
10 | "include": ["src/**/*.ts", "test/**/*.ts"],
11 | "exclude": ["dist/**"]
12 | }
13 |
--------------------------------------------------------------------------------
/packages/agents-extensions/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # @openai/agents-extensions
2 |
3 | ## 0.0.4
4 |
5 | ### Patch Changes
6 |
7 | - 0f4850e: Fix #34 by adjusting the internals of ai-sdk integration
8 | - @openai/agents@0.0.4
9 |
10 | ## 0.0.3
11 |
12 | ### Patch Changes
13 |
14 | - @openai/agents@0.0.3
15 |
16 | ## 0.0.2
17 |
18 | ### Patch Changes
19 |
20 | - @openai/agents@0.0.2
21 |
22 | ## 0.0.1
23 |
24 | ### Patch Changes
25 |
26 | - aaa6d08: Initial release
27 | - Updated dependencies [aaa6d08]
28 | - @openai/agents@0.0.1
29 |
30 | ## 0.0.1-next.0
31 |
32 | ### Patch Changes
33 |
34 | - Initial release
35 | - Updated dependencies
36 | - @openai/agents@0.0.1-next.0
37 |
--------------------------------------------------------------------------------
/packages/agents-extensions/README.md:
--------------------------------------------------------------------------------
1 | # OpenAI Agents SDK Extensions
2 |
3 | This package contains a collection of extension features for the OpenAI Agents SDK and is intended to be used alongside it.
4 |
5 | ## Installation
6 |
7 | ```bash
8 | npm install @openai/agents @openai/agents-extensions
9 | ```
10 |
11 | ## License
12 |
13 | MIT
14 |
--------------------------------------------------------------------------------
/packages/agents-extensions/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@openai/agents-extensions",
3 | "repository": "https://github.com/openai/openai-agents-js",
4 | "homepage": "https://openai.github.io/openai-agents-js/",
5 | "version": "0.0.4",
6 | "description": "Extensions for the OpenAI Agents SDK",
7 | "author": "OpenAI ",
8 | "main": "dist/index.js",
9 | "types": "dist/index.d.ts",
10 | "scripts": {
11 | "prebuild": "tsx ../../scripts/embedMeta.ts",
12 | "build": "tsc",
13 | "build-check": "tsc --noEmit -p ./tsconfig.test.json"
14 | },
15 | "dependencies": {
16 | "@ai-sdk/provider": "^1.1.3",
17 | "@openai/zod": "npm:zod@^3.25.40",
18 | "@types/ws": "^8.18.1",
19 | "debug": "^4.4.0"
20 | },
21 | "exports": {
22 | ".": {
23 | "require": {
24 | "types": "./dist/index.d.ts",
25 | "default": "./dist/index.js"
26 | },
27 | "types": "./dist/index.d.ts",
28 | "default": "./dist/index.mjs"
29 | }
30 | },
31 | "peerDependencies": {
32 | "@openai/agents": "workspace:*",
33 | "ws": "^8.18.1"
34 | },
35 | "keywords": [
36 | "openai",
37 | "agents",
38 | "ai",
39 | "agentic"
40 | ],
41 | "license": "MIT",
42 | "devDependencies": {
43 | "@openai/agents": "workspace:*",
44 | "@types/debug": "^4.1.12",
45 | "ws": "^8.18.1"
46 | },
47 | "files": [
48 | "dist"
49 | ]
50 | }
51 |
--------------------------------------------------------------------------------
/packages/agents-extensions/src/index.ts:
--------------------------------------------------------------------------------
1 | export * from './TwilioRealtimeTransport';
2 | export * from './aiSdk';
3 |
--------------------------------------------------------------------------------
/packages/agents-extensions/src/metadata.ts:
--------------------------------------------------------------------------------
1 |
2 | // This file is automatically generated
3 |
4 | export const METADATA = {
5 | "name": "@openai/agents-extensions",
6 | "version": "0.0.1-next.0",
7 | "versions": {
8 | "@openai/agents-extensions": "0.0.1-next.0",
9 | "@openai/zod": "npm:zod@^3.25.40"
10 | }
11 | };
12 |
13 | export default METADATA;
14 |
--------------------------------------------------------------------------------
/packages/agents-extensions/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist",
5 | "rootDir": "./src"
6 | },
7 | "exclude": ["dist/**", "test/**"]
8 | }
9 |
10 |
--------------------------------------------------------------------------------
/packages/agents-extensions/tsconfig.test.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist"
5 | },
6 | "include": ["src/**/*.ts", "test/**/*.ts"],
7 | "exclude": ["dist/**"]
8 | }
9 |
--------------------------------------------------------------------------------
/packages/agents-openai/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # @openai/agents-openai
2 |
3 | ## 0.0.4
4 |
5 | ### Patch Changes
6 |
7 | - ded675a: chore(openai): add more accurate debug logging
8 | - Updated dependencies [25165df]
9 | - Updated dependencies [6683db0]
10 | - Updated dependencies [78811c6]
11 | - Updated dependencies [426ad73]
12 | - @openai/agents-core@0.0.4
13 |
14 | ## 0.0.3
15 |
16 | ### Patch Changes
17 |
18 | - 0474de9: Fix incorrect handling of chat completions mode for handoff
19 | - Updated dependencies [d7fd8dc]
20 | - Updated dependencies [284d0ab]
21 | - @openai/agents-core@0.0.3
22 |
23 | ## 0.0.2
24 |
25 | ### Patch Changes
26 |
27 | - b4942fa: Fix #5 setDefaultOpenAIClient issue in agents-openai package
28 | - Updated dependencies [a2979b6]
29 | - @openai/agents-core@0.0.2
30 |
31 | ## 0.0.1
32 |
33 | ### Patch Changes
34 |
35 | - aaa6d08: Initial release
36 | - Updated dependencies [aaa6d08]
37 | - @openai/agents-core@0.0.1
38 |
39 | ## 0.0.1-next.0
40 |
41 | ### Patch Changes
42 |
43 | - Initial release
44 | - Updated dependencies
45 | - @openai/agents-core@0.0.1-next.0
46 |
--------------------------------------------------------------------------------
/packages/agents-openai/README.md:
--------------------------------------------------------------------------------
1 | # OpenAI Agents SDK
2 |
3 | The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows.
4 |
5 | ## Installation
6 |
7 | ```bash
8 | npm install @openai/agents
9 | ```
10 |
11 | ## License
12 |
13 | MIT
14 |
--------------------------------------------------------------------------------
/packages/agents-openai/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@openai/agents-openai",
3 | "repository": "https://github.com/openai/openai-agents-js",
4 | "homepage": "https://openai.github.io/openai-agents-js/",
5 | "version": "0.0.4",
6 | "description": "The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows.",
7 | "author": "OpenAI ",
8 | "main": "dist/index.js",
9 | "types": "dist/index.d.ts",
10 | "exports": {
11 | ".": {
12 | "require": {
13 | "types": "./dist/index.d.ts",
14 | "default": "./dist/index.js"
15 | },
16 | "types": "./dist/index.d.ts",
17 | "default": "./dist/index.mjs"
18 | }
19 | },
20 | "dependencies": {
21 | "@openai/agents-core": "workspace:*",
22 | "debug": "^4.4.0",
23 | "openai": "^5.0.1",
24 | "@openai/zod": "npm:zod@^3.25.40"
25 | },
26 | "scripts": {
27 | "prebuild": "tsx ../../scripts/embedMeta.ts",
28 | "build": "tsc",
29 | "build-check": "tsc --noEmit -p ./tsconfig.test.json"
30 | },
31 | "keywords": [
32 | "openai",
33 | "agents",
34 | "ai",
35 | "agentic"
36 | ],
37 | "license": "MIT",
38 | "devDependencies": {
39 | "@ai-sdk/provider": "^1.1.3",
40 | "@types/debug": "^4.1.12"
41 | },
42 | "files": [
43 | "dist"
44 | ]
45 | }
46 |
--------------------------------------------------------------------------------
/packages/agents-openai/src/defaults.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from 'openai';
2 | import { loadEnv } from '@openai/agents-core/_shims';
3 | import METADATA from './metadata';
4 |
5 | export const DEFAULT_OPENAI_API = 'responses';
6 | export const DEFAULT_OPENAI_MODEL = 'gpt-4.1';
7 |
8 | let _defaultOpenAIAPI = DEFAULT_OPENAI_API;
9 | let _defaultOpenAIClient: OpenAI | undefined;
10 | let _defaultOpenAIKey: string | undefined = undefined;
11 | let _defaultTracingApiKey: string | undefined = undefined;
12 |
13 | export function setTracingExportApiKey(key: string) {
14 | _defaultTracingApiKey = key;
15 | }
16 |
17 | export function getTracingExportApiKey(): string | undefined {
18 | return _defaultTracingApiKey ?? loadEnv().OPENAI_API_KEY;
19 | }
20 |
21 | export function shouldUseResponsesByDefault() {
22 | return _defaultOpenAIAPI === 'responses';
23 | }
24 |
25 | export function setOpenAIAPI(value: 'chat_completions' | 'responses') {
26 | _defaultOpenAIAPI = value;
27 | }
28 |
29 | export function setDefaultOpenAIClient(client: OpenAI) {
30 | _defaultOpenAIClient = client;
31 | }
32 |
33 | export function getDefaultOpenAIClient(): OpenAI | undefined {
34 | return _defaultOpenAIClient;
35 | }
36 |
37 | export function setDefaultOpenAIKey(key: string) {
38 | _defaultOpenAIKey = key;
39 | }
40 |
41 | export function getDefaultOpenAIKey(): string | undefined {
42 | return _defaultOpenAIKey ?? loadEnv().OPENAI_API_KEY;
43 | }
44 |
45 | export const HEADERS = {
46 | 'User-Agent': `Agents/JavaScript ${METADATA.version}`,
47 | };
48 |
--------------------------------------------------------------------------------
/packages/agents-openai/src/index.ts:
--------------------------------------------------------------------------------
1 | export { OpenAIProvider } from './openaiProvider';
2 | export { OpenAIResponsesModel } from './openaiResponsesModel';
3 | export { OpenAIChatCompletionsModel } from './openaiChatCompletionsModel';
4 | export {
5 | setDefaultOpenAIClient,
6 | setOpenAIAPI,
7 | setDefaultOpenAIKey,
8 | setTracingExportApiKey,
9 | } from './defaults';
10 | export {
11 | setDefaultOpenAITracingExporter,
12 | OpenAITracingExporter,
13 | OpenAITracingExporterOptions,
14 | } from './openaiTracingExporter';
15 | export {
16 | webSearchTool,
17 | fileSearchTool,
18 | codeInterpreterTool,
19 | imageGenerationTool,
20 | } from './tools';
21 |
--------------------------------------------------------------------------------
/packages/agents-openai/src/logger.ts:
--------------------------------------------------------------------------------
1 | import { getLogger } from '@openai/agents-core';
2 |
3 | const logger = getLogger('openai-agents:openai');
4 |
5 | export default logger;
6 |
--------------------------------------------------------------------------------
/packages/agents-openai/src/metadata.ts:
--------------------------------------------------------------------------------
1 |
2 | // This file is automatically generated
3 |
4 | export const METADATA = {
5 | "name": "@openai/agents-openai",
6 | "version": "0.0.1-next.0",
7 | "versions": {
8 | "@openai/agents-openai": "0.0.1-next.0",
9 | "@openai/agents-core": "workspace:*",
10 | "openai": "^5.0.1",
11 | "@openai/zod": "npm:zod@^3.25.40"
12 | }
13 | };
14 |
15 | export default METADATA;
16 |
--------------------------------------------------------------------------------
/packages/agents-openai/test/defaults.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import {
3 | DEFAULT_OPENAI_MODEL,
4 | setTracingExportApiKey,
5 | getTracingExportApiKey,
6 | shouldUseResponsesByDefault,
7 | setOpenAIAPI,
8 | getDefaultOpenAIClient,
9 | setDefaultOpenAIClient,
10 | setDefaultOpenAIKey,
11 | getDefaultOpenAIKey,
12 | } from '../src/defaults';
13 | import OpenAI from 'openai';
14 |
15 | describe('Defaults', () => {
16 | test('Default OpenAI model is out there', () => {
17 | expect(DEFAULT_OPENAI_MODEL).toBeDefined();
18 | });
19 | test('get/setTracingExportApiKey', async () => {
20 | setTracingExportApiKey('foo');
21 | expect(getTracingExportApiKey()).toBe('foo');
22 | });
23 | test('shouldUseResponsesByDefault', async () => {
24 | setOpenAIAPI('responses');
25 | expect(shouldUseResponsesByDefault()).toBe(true);
26 | setOpenAIAPI('chat_completions');
27 | expect(shouldUseResponsesByDefault()).toBe(false);
28 | });
29 | test('get/setDefaultOpenAIClient', async () => {
30 | const client = new OpenAI({ apiKey: 'foo' });
31 | setDefaultOpenAIClient(client);
32 | expect(getDefaultOpenAIClient()).toBe(client);
33 | });
34 | test('get/setDefaultOpenAIKey', async () => {
35 | setDefaultOpenAIKey('foo');
36 | expect(getDefaultOpenAIKey()).toBe('foo');
37 | });
38 | });
39 |
--------------------------------------------------------------------------------
/packages/agents-openai/test/index.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import { OpenAIProvider } from '../src';
3 |
4 | describe('Exports', () => {
5 | test('OpenAIProvider is out there', () => {
6 | const provider = new OpenAIProvider();
7 | expect(provider).toBeDefined();
8 | });
9 | });
10 |
--------------------------------------------------------------------------------
/packages/agents-openai/test/openaiProvider.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { OpenAIProvider } from '../src/openaiProvider';
3 | import { OpenAIResponsesModel } from '../src/openaiResponsesModel';
4 | import { OpenAIChatCompletionsModel } from '../src/openaiChatCompletionsModel';
5 | import { setOpenAIAPI } from '../src/defaults';
6 |
7 | class FakeClient {}
8 |
9 | describe('OpenAIProvider', () => {
10 | it('throws when apiKey and openAIClient are provided', () => {
11 | expect(() => new OpenAIProvider({ apiKey: 'k', openAIClient: {} as any })).toThrow();
12 | });
13 |
14 | it('throws when baseURL and openAIClient are provided', () => {
15 | expect(() => new OpenAIProvider({ baseURL: 'x', openAIClient: {} as any })).toThrow();
16 | });
17 |
18 | it('returns responses model when useResponses true', async () => {
19 | const provider = new OpenAIProvider({ openAIClient: new FakeClient() as any, useResponses: true });
20 | const model = await provider.getModel('m');
21 | expect(model).toBeInstanceOf(OpenAIResponsesModel);
22 | });
23 |
24 | it('uses default API when useResponses not set', async () => {
25 | setOpenAIAPI('responses');
26 | let provider = new OpenAIProvider({ openAIClient: new FakeClient() as any });
27 | expect(await provider.getModel('m')).toBeInstanceOf(OpenAIResponsesModel);
28 |
29 | setOpenAIAPI('chat_completions');
30 | provider = new OpenAIProvider({ openAIClient: new FakeClient() as any });
31 | expect(await provider.getModel('m')).toBeInstanceOf(OpenAIChatCompletionsModel);
32 | });
33 | });
34 |
--------------------------------------------------------------------------------
/packages/agents-openai/test/tools.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { fileSearchTool, webSearchTool } from '../src/tools';
3 |
4 | describe('Tool', () => {
5 | it('webSearchTool', () => {
6 | const t = webSearchTool({
7 | userLocation: { type: 'approximate', city: 'Tokyo' },
8 | });
9 | expect(t).toBeDefined();
10 | expect(t.type).toBe('hosted_tool');
11 | expect(t.name).toBe('web_search_preview');
12 | });
13 |
14 | it('fileSearchTool', () => {
15 | const t = fileSearchTool(['test'], {});
16 | expect(t).toBeDefined();
17 | expect(t.type).toBe('hosted_tool');
18 | expect(t.name).toBe('file_search');
19 |
20 | const t2 = fileSearchTool('test', {});
21 | expect(t2).toBeDefined();
22 | expect(t2.type).toBe('hosted_tool');
23 | expect(t2.name).toBe('file_search');
24 | });
25 | });
26 |
--------------------------------------------------------------------------------
/packages/agents-openai/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist",
5 | "rootDir": "./src"
6 | },
7 | "exclude": ["dist/**", "test/**"]
8 | }
9 |
10 |
--------------------------------------------------------------------------------
/packages/agents-openai/tsconfig.test.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist"
5 | },
6 | "include": ["src/**/*.ts", "test/**/*.ts"],
7 | "exclude": ["dist/**"]
8 | }
9 |
--------------------------------------------------------------------------------
/packages/agents-realtime/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # @openai/agents-realtime
2 |
3 | ## 0.0.4
4 |
5 | ### Patch Changes
6 |
7 | - Updated dependencies [25165df]
8 | - Updated dependencies [6683db0]
9 | - Updated dependencies [78811c6]
10 | - Updated dependencies [426ad73]
11 | - @openai/agents-core@0.0.4
12 |
13 | ## 0.0.3
14 |
15 | ### Patch Changes
16 |
17 | - 68ff0ba: fix: avoid realtime guardrail race condition and detect ongoing response
18 | - Updated dependencies [d7fd8dc]
19 | - Updated dependencies [284d0ab]
20 | - @openai/agents-core@0.0.3
21 |
22 | ## 0.0.2
23 |
24 | ### Patch Changes
25 |
26 | - Updated dependencies [a2979b6]
27 | - @openai/agents-core@0.0.2
28 |
29 | ## 0.0.1
30 |
31 | ### Patch Changes
32 |
33 | - aaa6d08: Initial release
34 | - Updated dependencies [aaa6d08]
35 | - @openai/agents-core@0.0.1
36 |
37 | ## 0.0.1-next.0
38 |
39 | ### Patch Changes
40 |
41 | - Initial release
42 | - Updated dependencies
43 | - @openai/agents-core@0.0.1-next.0
44 |
--------------------------------------------------------------------------------
/packages/agents-realtime/README.md:
--------------------------------------------------------------------------------
1 | # OpenAI Agents SDK
2 |
3 | The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows.
4 |
5 | ## Installation
6 |
7 | ```bash
8 | npm install @openai/agents
9 | ```
10 |
11 | ## License
12 |
13 | MIT
14 |
--------------------------------------------------------------------------------
/packages/agents-realtime/src/clientMessages.ts:
--------------------------------------------------------------------------------
1 | import {
2 | JsonObjectSchema,
3 | ModelSettingsToolChoice,
4 | } from '@openai/agents-core/types';
5 |
6 | export type RealtimeClientMessage = {
7 | type: string;
8 | [key: string]: any;
9 | };
10 |
11 | export type RealtimeUserInput =
12 | | string
13 | | {
14 | type: 'message';
15 | role: 'user';
16 | content: {
17 | type: 'input_text';
18 | text: string;
19 | }[];
20 | };
21 |
22 | export type RealtimeAudioFormat =
23 | | 'pcm16'
24 | | 'g711_ulaw'
25 | | 'g711_alaw'
26 | | (string & {});
27 |
28 | export type RealtimeTracingConfig =
29 | | {
30 | workflow_name?: string;
31 | group_id?: string;
32 | metadata?: Record;
33 | }
34 | | 'auto';
35 |
36 | export type RealtimeSessionConfig = {
37 | model: string;
38 | instructions: string;
39 | modalities: ('text' | 'audio')[];
40 | voice: string;
41 | inputAudioFormat: RealtimeAudioFormat;
42 | outputAudioFormat: RealtimeAudioFormat;
43 | inputAudioTranscription: Record;
44 | turnDetection: Record;
45 | toolChoice: ModelSettingsToolChoice;
46 | tools: FunctionToolDefinition[];
47 | tracing?: RealtimeTracingConfig | null;
48 | providerData?: Record;
49 | };
50 |
51 | export type FunctionToolDefinition = {
52 | type: 'function';
53 | name: string;
54 | description: string;
55 | parameters: JsonObjectSchema;
56 | strict: boolean;
57 | };
58 |
--------------------------------------------------------------------------------
/packages/agents-realtime/src/logger.ts:
--------------------------------------------------------------------------------
1 | import { getLogger } from '@openai/agents-core';
2 |
3 | const logger = getLogger('openai-agents:realtime');
4 |
5 | export default logger;
6 |
--------------------------------------------------------------------------------
/packages/agents-realtime/src/metadata.ts:
--------------------------------------------------------------------------------
1 |
2 | // This file is automatically generated
3 |
4 | export const METADATA = {
5 | "name": "@openai/agents-realtime",
6 | "version": "0.0.1-next.0",
7 | "versions": {
8 | "@openai/agents-realtime": "0.0.1-next.0",
9 | "@openai/agents-core": "workspace:*"
10 | }
11 | };
12 |
13 | export default METADATA;
14 |
--------------------------------------------------------------------------------
/packages/agents-realtime/src/shims/shims-browser.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
3 | export const WebSocket = globalThis.WebSocket;
4 | export function isBrowserEnvironment(): boolean {
5 | return true;
6 | }
7 |
--------------------------------------------------------------------------------
/packages/agents-realtime/src/shims/shims-node.ts:
--------------------------------------------------------------------------------
1 | export { WebSocket } from 'ws';
2 | export function isBrowserEnvironment(): boolean {
3 | return false;
4 | }
5 |
--------------------------------------------------------------------------------
/packages/agents-realtime/src/shims/shims.ts:
--------------------------------------------------------------------------------
1 | export * from './shims-node';
2 |
--------------------------------------------------------------------------------
/packages/agents-realtime/test/index.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, test, expect } from 'vitest';
2 | import { RealtimeAgent, RealtimeSession } from '../src';
3 |
4 | describe('RealtimeAgent', () => {
5 | test('should be available', () => {
6 | const ra = new RealtimeAgent({
7 | name: 'test',
8 | instructions: 'test',
9 | });
10 | expect(ra).toBeDefined();
11 | });
12 | });
13 |
14 | describe('RealtimeSession', () => {
15 | test('should be available', () => {
16 | const session = new RealtimeSession(
17 | new RealtimeAgent({
18 | name: 'test',
19 | instructions: 'test',
20 | }),
21 | );
22 | expect(session).toBeDefined();
23 | });
24 | });
25 |
--------------------------------------------------------------------------------
/packages/agents-realtime/test/openaiRealtimeWebRtc.environment.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { OpenAIRealtimeWebRTC } from '../src/openaiRealtimeWebRtc';
3 |
4 | describe('OpenAIRealtimeWebRTC constructor', () => {
5 | it('throws if WebRTC is not available', () => {
6 | const original = (global as any).RTCPeerConnection;
7 | delete (global as any).RTCPeerConnection;
8 |
9 | expect(() => new OpenAIRealtimeWebRTC()).toThrow(
10 | 'WebRTC is not supported in this environment'
11 | );
12 |
13 | (global as any).RTCPeerConnection = original;
14 | });
15 | });
16 |
17 |
--------------------------------------------------------------------------------
/packages/agents-realtime/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist",
5 | "rootDir": "./src",
6 | "paths": {
7 | "@openai/agents-realtime": ["./src/index.ts"],
8 | "@openai/agents-realtime/_shims": ["./src/shims/shims-node.ts"]
9 | }
10 | },
11 | "exclude": ["dist/**", "test/**"]
12 | }
13 |
--------------------------------------------------------------------------------
/packages/agents-realtime/tsconfig.test.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist",
5 | "paths": {
6 | "@openai/agents-realtime": ["./src/index.ts"],
7 | "@openai/agents-realtime/_shims": ["./src/shims/shims-node.ts"]
8 | }
9 | },
10 | "include": ["src/**/*.ts", "test/**/*.ts"],
11 | "exclude": ["dist/**"]
12 | }
13 |
--------------------------------------------------------------------------------
/packages/agents-realtime/vite.config.js:
--------------------------------------------------------------------------------
1 | import { dirname, resolve } from 'node:path';
2 | import { fileURLToPath } from 'node:url';
3 | import { defineConfig } from 'vite';
4 |
5 | const __dirname = dirname(fileURLToPath(import.meta.url));
6 |
7 | export default defineConfig({
8 | build: {
9 | lib: {
10 | entry: resolve(__dirname, 'dist/index.mjs'),
11 | name: 'OpenAIAgentsRealtime',
12 | // the proper extensions will be added
13 | fileName: 'openai-realtime-agents',
14 | },
15 | sourcemap: 'inline',
16 | rollupOptions: {
17 | // make sure to externalize deps that shouldn't be bundled
18 | // into your library
19 | external: [],
20 | output: {
21 | dir: 'dist/bundle',
22 | banner: '/** OpenAI Agents Realtime **/',
23 | minifyInternalExports: false,
24 | // Provide global variables to use in the UMD build
25 | // for externalized deps
26 | globals: {
27 | // vue: 'Vue',
28 | },
29 | },
30 | },
31 | },
32 | });
33 |
--------------------------------------------------------------------------------
/packages/agents/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # @openai/agents
2 |
3 | ## 0.0.4
4 |
5 | ### Patch Changes
6 |
7 | - Updated dependencies [ded675a]
8 | - Updated dependencies [25165df]
9 | - Updated dependencies [6683db0]
10 | - Updated dependencies [78811c6]
11 | - Updated dependencies [426ad73]
12 | - @openai/agents-openai@0.0.4
13 | - @openai/agents-core@0.0.4
14 | - @openai/agents-realtime@0.0.4
15 |
16 | ## 0.0.3
17 |
18 | ### Patch Changes
19 |
20 | - Updated dependencies [d7fd8dc]
21 | - Updated dependencies [284d0ab]
22 | - Updated dependencies [0474de9]
23 | - Updated dependencies [68ff0ba]
24 | - @openai/agents-core@0.0.3
25 | - @openai/agents-openai@0.0.3
26 | - @openai/agents-realtime@0.0.3
27 |
28 | ## 0.0.2
29 |
30 | ### Patch Changes
31 |
32 | - Updated dependencies [a2979b6]
33 | - Updated dependencies [b4942fa]
34 | - @openai/agents-core@0.0.2
35 | - @openai/agents-openai@0.0.2
36 | - @openai/agents-realtime@0.0.2
37 |
38 | ## 0.0.1
39 |
40 | ### Patch Changes
41 |
42 | - aaa6d08: Initial release
43 | - Updated dependencies [aaa6d08]
44 | - @openai/agents-realtime@0.0.1
45 | - @openai/agents-openai@0.0.1
46 | - @openai/agents-core@0.0.1
47 |
48 | ## 0.0.1-next.0
49 |
50 | ### Patch Changes
51 |
52 | - Initial release
53 | - Updated dependencies
54 | - @openai/agents-realtime@0.0.1-next.0
55 | - @openai/agents-openai@0.0.1-next.0
56 | - @openai/agents-core@0.0.1-next.0
57 |
--------------------------------------------------------------------------------
/packages/agents/src/index.ts:
--------------------------------------------------------------------------------
1 | import { setDefaultModelProvider } from '@openai/agents-core';
2 | import { OpenAIProvider } from '@openai/agents-openai';
3 | import { setDefaultOpenAITracingExporter } from '@openai/agents-openai';
4 |
5 | setDefaultModelProvider(new OpenAIProvider());
6 | setDefaultOpenAITracingExporter();
7 |
8 | export * from '@openai/agents-core';
9 | export * from '@openai/agents-openai';
10 | export * as realtime from '@openai/agents-realtime';
11 |
--------------------------------------------------------------------------------
/packages/agents/src/metadata.ts:
--------------------------------------------------------------------------------
1 |
2 | // This file is automatically generated
3 |
4 | export const METADATA = {
5 | "name": "@openai/agents",
6 | "version": "0.0.1-next.0",
7 | "versions": {
8 | "@openai/agents": "0.0.1-next.0",
9 | "@openai/agents-core": "workspace:*",
10 | "@openai/agents-openai": "workspace:*",
11 | "@openai/agents-realtime": "workspace:*",
12 | "openai": "^5.0.1"
13 | }
14 | };
15 |
16 | export default METADATA;
17 |
--------------------------------------------------------------------------------
/packages/agents/src/realtime/index.ts:
--------------------------------------------------------------------------------
1 | export * from '@openai/agents-realtime';
2 |
--------------------------------------------------------------------------------
/packages/agents/src/utils/index.ts:
--------------------------------------------------------------------------------
1 | export * from '@openai/agents-core/utils';
2 |
--------------------------------------------------------------------------------
/packages/agents/test/index.test.ts:
--------------------------------------------------------------------------------
1 | import { Agent } from '../src/index';
2 | import { RealtimeAgent } from '../src/realtime';
3 | import { isZodObject } from '../src/utils';
4 | import { describe, test, expect } from 'vitest';
5 |
6 | describe('Exports', () => {
7 | test('Agent is out there', () => {
8 | const agent = new Agent({ name: 'Test' });
9 | expect(agent.name).toBe('Test');
10 | });
11 | });
12 |
13 | describe('RealtimeAgent', () => {
14 | test('should be available', () => {
15 | const agent = new RealtimeAgent({ name: 'Test' });
16 | expect(agent.name).toBe('Test');
17 | });
18 | });
19 |
20 | describe('isZodObject', () => {
21 | test('should be available', () => {
22 | expect(isZodObject({})).toBe(false);
23 | });
24 | });
25 |
--------------------------------------------------------------------------------
/packages/agents/test/metadata.test.ts:
--------------------------------------------------------------------------------
1 | import { METADATA } from '../src/metadata';
2 | import { describe, test, expect } from 'vitest';
3 |
4 | describe('Metadata', () => {
5 | test('is not changed unintentionally', () => {
6 | expect(METADATA.name).toBe('@openai/agents');
7 | expect(METADATA.version).toBeDefined();
8 | });
9 | });
10 |
--------------------------------------------------------------------------------
/packages/agents/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist",
5 | "rootDir": "./src"
6 | },
7 | "exclude": ["dist/**", "test/**"]
8 | }
9 |
--------------------------------------------------------------------------------
/packages/agents/tsconfig.test.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.json",
3 | "compilerOptions": {
4 | "outDir": "./dist"
5 | },
6 | "include": ["src/**/*.ts", "test/**/*.ts"],
7 | "exclude": ["dist/**"]
8 | }
9 |
--------------------------------------------------------------------------------
/pnpm-workspace.yaml:
--------------------------------------------------------------------------------
1 | packages:
2 | - packages/*
3 | - examples/*
4 | - docs
5 | onlyBuiltDependencies:
6 | - esbuild
7 | - sharp
8 | publishBranch: main
9 |
--------------------------------------------------------------------------------
/scripts/dev.mts:
--------------------------------------------------------------------------------
1 | import concurrently from 'concurrently';
2 |
3 | await concurrently([
4 | {
5 | command: 'pnpm packages:dev',
6 | name: 'packages',
7 | prefixColor: 'auto',
8 | },
9 | {
10 | command: 'pnpm -F docs dev',
11 | name: 'docs',
12 | prefixColor: 'auto',
13 | },
14 | ]);
15 |
--------------------------------------------------------------------------------
/scripts/embedMeta.ts:
--------------------------------------------------------------------------------
1 | import { readFileSync, writeFileSync } from 'node:fs';
2 | import { resolve } from 'node:path';
3 | import { cwd } from 'node:process';
4 |
5 | const packageJson = JSON.parse(
6 | readFileSync(resolve(cwd(), 'package.json'), 'utf-8'),
7 | );
8 |
9 | const dependencies = Object.entries(packageJson.dependencies);
10 | const openaiDependencies = dependencies.filter(
11 | ([name]) => name.startsWith('@openai/') || name === 'openai',
12 | );
13 |
14 | const versions = {
15 | [packageJson.name]: packageJson.version,
16 | ...Object.fromEntries(
17 | openaiDependencies.map(([name, version]) => [name, version]),
18 | ),
19 | };
20 |
21 | const METADATA = {
22 | name: packageJson.name,
23 | version: packageJson.version,
24 | versions: versions,
25 | };
26 |
27 | const output = `
28 | // This file is automatically generated
29 |
30 | export const METADATA = ${JSON.stringify(METADATA, null, 2)};
31 |
32 | export default METADATA;
33 | `;
34 |
35 | writeFileSync(resolve(cwd(), 'src/metadata.ts'), output, 'utf-8');
36 |
--------------------------------------------------------------------------------
/tsc-multi.json:
--------------------------------------------------------------------------------
1 | {
2 | "targets": [
3 | { "extname": ".js", "module": "es2022", "moduleResolution": "node" },
4 | { "extname": ".mjs", "module": "esnext" }
5 | ],
6 | "projects": [
7 | "packages/agents-core/tsconfig.json",
8 | "packages/agents-openai/tsconfig.json",
9 | "packages/agents-realtime/tsconfig.json",
10 | "packages/agents/tsconfig.json",
11 | "packages/agents-extensions/tsconfig.json"
12 | ]
13 | }
14 |
--------------------------------------------------------------------------------
/tsconfig.examples.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2017",
4 | "lib": ["ES2018", "DOM"],
5 | "module": "ESNext",
6 | "moduleResolution": "node",
7 | "noUncheckedSideEffectImports": true,
8 | "declaration": true,
9 | "sourceMap": true,
10 | "stripInternal": true,
11 | "esModuleInterop": true,
12 | "forceConsistentCasingInFileNames": true,
13 | "strict": true,
14 | "noUnusedLocals": true,
15 | "skipLibCheck": true
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/tsconfig.test.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "outDir": "./dist",
4 | "paths": {
5 | "@openai/agents-core": ["./src/index.ts"],
6 | "@openai/agents-core/_shims": ["./src/shims/shims-node.ts"]
7 | }
8 | },
9 | "include": ["src/**/*.ts", "test/**/*.ts"],
10 | "exclude": ["dist/**"]
11 | }
12 |
--------------------------------------------------------------------------------
/verdaccio-config.yml:
--------------------------------------------------------------------------------
1 | storage: .cache/verdaccio/storage
2 | auth:
3 | htpasswd:
4 | file: .cache/verdaccio/htpasswd
5 | uplinks:
6 | npmjs:
7 | url: https://registry.npmjs.org/
8 | packages:
9 | '@openai/*':
10 | access: $all
11 | publish: $all
12 | proxy: npmjs
13 | '**':
14 | proxy: npmjs
15 | log: { type: stdout, format: pretty, level: http }
16 |
--------------------------------------------------------------------------------
/vitest.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vitest/config';
2 |
3 | export default defineConfig({
4 | test: {
5 | workspace: ['packages/*'],
6 | globalSetup: './helpers/tests/setup.ts',
7 | // Enable code coverage reporting with Vitest's built‑in integration. We
8 | // only enable it for the monorepo packages (workspaces) so that the
9 | // initial report focuses on our public libraries and avoids unnecessary
10 | // noise from docs and examples.
11 | coverage: {
12 | provider: 'v8',
13 | reporter: ['text', 'html', 'json'],
14 | all: true,
15 | // Only include source files from the published packages. This keeps the
16 | // metrics meaningful and prevents Vitest from trying to instrument node
17 | // dependencies or the compiled dist folder.
18 | include: ['packages/**/src/**/*.ts'],
19 | exclude: ['**/*.d.ts', 'packages/**/test/**', 'packages/**/dist/**'],
20 | },
21 | },
22 | });
23 |
--------------------------------------------------------------------------------
/vitest.integration.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vitest/config';
2 |
3 | export default defineConfig({
4 | test: {
5 | include: ['./integration-tests/*.test.ts'],
6 | globalSetup: './integration-tests/_helpers/setup.ts',
7 | },
8 | });
9 |
--------------------------------------------------------------------------------