├── .cursor
└── rules
│ ├── always-applied
│ ├── core-principles.mdc
│ └── fern-components.mdc
│ ├── code-standards.mdc
│ ├── content-templates.mdc
│ ├── examples-documentation.mdc
│ ├── glob-based
│ ├── examples-documentation.mdc
│ ├── mdx-components.mdc
│ ├── quickstart-guide.mdc
│ └── workflows-documentation.mdc
│ └── index.mdc
├── .cursorignore
├── .github
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── check.yml
│ ├── pr-review.yml
│ ├── preview-docs.yml
│ ├── preview-sdks.yml
│ ├── publish-docs.yml
│ ├── release-all.yml
│ ├── release-csharp-sdk.yml
│ ├── release-go-sdk.yml
│ ├── release-java-sdk.yml
│ ├── release-python-sdk.yml
│ ├── release-ruby-sdk.yml
│ ├── release-ts-sdk.yml
│ ├── update-openapi.yml
│ └── update-plain.yml
├── .gitignore
├── README.md
├── advanced.md
├── dev-docs.json
└── fern
├── GHL.mdx
├── advanced
└── sip
│ ├── sip-plivo.mdx
│ ├── sip-telnyx.mdx
│ ├── sip-trunk.mdx
│ ├── sip-twilio.mdx
│ ├── sip-zadarma.mdx
│ └── sip.mdx
├── api-reference
├── openapi.mdx
└── swagger.mdx
├── apis
├── api
│ ├── generators.yml
│ ├── openapi-overrides.yml
│ ├── openapi.json
│ └── patches
│ │ └── 2022-10-23-offset-to-integer.patch
└── webhooks
│ ├── generators.yml
│ ├── openapi-overrides.yml
│ └── openapi.yml
├── assets
├── batch-sample.csv
├── close-playground.js
├── styles.css
└── widget.js
├── assistants.mdx
├── assistants
├── assistant-hooks.mdx
├── background-messages.mdx
├── call-analysis.mdx
├── call-recording.mdx
├── dynamic-variables.mdx
├── examples
│ ├── docs-agent.mdx
│ ├── inbound-support.mdx
│ └── voice-widget.mdx
├── persistent-assistants.mdx
├── personalization.mdx
└── voice-formatting-plan.mdx
├── blocks.mdx
├── blocks
├── block-types.mdx
└── steps.mdx
├── call-forwarding.mdx
├── calls
├── call-dynamic-transfers.mdx
├── call-ended-reason.mdx
├── call-features.mdx
├── call-handling-with-vapi-and-twilio.mdx
├── call-outbound.mdx
├── voicemail-detection.mdx
└── websocket-transport.mdx
├── changelog
├── 2024-10-07.mdx
├── 2024-10-08.mdx
├── 2024-10-09.mdx
├── 2024-10-10.mdx
├── 2024-10-13.mdx
├── 2024-10-16.mdx
├── 2024-10-19.mdx
├── 2024-10-22.mdx
├── 2024-10-25.mdx
├── 2024-10-29.mdx
├── 2024-10-30.mdx
├── 2024-11-03.mdx
├── 2024-11-04.mdx
├── 2024-11-06.mdx
├── 2024-11-11.mdx
├── 2024-11-14.mdx
├── 2024-11-15.mdx
├── 2024-11-21.mdx
├── 2024-11-22.mdx
├── 2024-11-24.mdx
├── 2024-11-25.mdx
├── 2024-11-27.mdx
├── 2024-11-30.mdx
├── 2024-12-03.mdx
├── 2024-12-05.mdx
├── 2024-12-06.mdx
├── 2024-12-09.mdx
├── 2024-12-10.mdx
├── 2024-12-11.mdx
├── 2024-12-13.mdx
├── 2024-12-14.mdx
├── 2024-12-19.mdx
├── 2024-12-21.mdx
├── 2024-12-30.mdx
├── 2025-01-05.mdx
├── 2025-01-07.mdx
├── 2025-01-11.mdx
├── 2025-01-14.mdx
├── 2025-01-15.mdx
├── 2025-01-20.mdx
├── 2025-01-21.mdx
├── 2025-01-22.mdx
├── 2025-01-29.mdx
├── 2025-02-01.mdx
├── 2025-02-04.md
├── 2025-02-10.mdx
├── 2025-02-17.mdx
├── 2025-02-20.mdx
├── 2025-02-25.mdx
├── 2025-02-27.mdx
├── 2025-03-02.mdx
├── 2025-03-06.mdx
├── 2025-03-09.mdx
├── 2025-03-13.mdx
├── 2025-03-14.mdx
├── 2025-03-15.mdx
├── 2025-03-17.mdx
├── 2025-03-19.mdx
├── 2025-03-20.mdx
├── 2025-03-21.mdx
├── 2025-03-22.mdx
├── 2025-03-23.mdx
├── 2025-03-27.mdx
├── 2025-03-28.mdx
├── 2025-03-30.mdx
├── 2025-04-03.mdx
├── 2025-04-04.mdx
├── 2025-04-05.mdx
├── 2025-04-08.mdx
├── 2025-04-11.mdx
├── 2025-04-12.mdx
├── 2025-04-15.mdx
├── 2025-04-16.mdx
├── 2025-04-17.mdx
├── 2025-04-18.mdx
├── 2025-04-23.mdx
├── 2025-04-24.mdx
├── 2025-04-25.mdx
├── 2025-04-26.mdx
├── 2025-04-27.mdx
├── 2025-04-29.mdx
├── 2025-04-30.mdx
├── 2025-05-01.mdx
├── 2025-05-03.mdx
├── 2025-05-06.mdx
├── 2025-05-07.mdx
├── 2025-05-08.mdx
├── 2025-05-09.mdx
├── 2025-05-10.mdx
├── 2025-05-13.mdx
├── 2025-05-14.mdx
├── 2025-05-15.mdx
├── 2025-05-16.mdx
├── 2025-05-17.mdx
├── 2025-05-18.mdx
├── 2025-05-19.mdx
├── 2025-05-20.mdx
├── 2025-05-22.mdx
├── 2025-05-23.mdx
├── 2025-05-24.mdx
├── 2025-05-25.mdx
├── 2025-05-26.mdx
├── 2025-05-27.mdx
├── 2025-05-28.mdx
├── 2025-05-30.mdx
├── 2025-05-31.mdx
├── 2025-06-03.mdx
├── 2025-06-04.mdx
├── 2025-06-06.mdx
└── overview.mdx
├── chat
├── non-streaming.mdx
├── openai-compatibility.mdx
├── quickstart.mdx
├── session-management.mdx
└── streaming.mdx
├── community
├── appointment-scheduling.mdx
├── comparisons.mdx
├── conferences.mdx
├── demos.mdx
├── expert-directory.mdx
├── ghl.mdx
├── guide.mdx
├── inbound.mdx
├── knowledgebase.mdx
├── myvapi.mdx
├── outbound.mdx
├── podcast.mdx
├── snippets-sdks-tutorials.mdx
├── special-mentions.mdx
├── squads.mdx
├── television.mdx
└── usecase.mdx
├── custom.js
├── customization
├── custom-keywords.mdx
├── custom-llm
│ ├── fine-tuned-openai-models.mdx
│ ├── tool-calling-integration.mdx
│ └── using-your-server.mdx
├── custom-transcriber.mdx
├── custom-voices
│ ├── custom-voice.mdx
│ ├── elevenlabs.mdx
│ ├── playht.mdx
│ └── tavus.mdx
├── jwt-authentication.mdx
├── multilingual.mdx
├── provider-keys.mdx
└── speech-configuration.mdx
├── debugging.mdx
├── docs-agent-prompt.txt
├── docs.yml
├── enterprise
├── onprem.mdx
└── plans.mdx
├── examples.mdx
├── faq.mdx
├── fern.config.json
├── glossary.mdx
├── how-vapi-works.mdx
├── info-hierarchy.mdx
├── issue-reporting.mdx
├── knowledge-base
├── integrating-with-trieve.mdx
├── knowledge-base.mdx
└── using-query-tool.mdx
├── knowledgebase.mdx
├── openai-realtime.mdx
├── overview.mdx
├── phone-calling.mdx
├── phone-numbers.mdx
├── phone-numbers
├── free-telephony.mdx
├── import-twillio.mdx
├── phone-number-hooks.mdx
└── telnyx.mdx
├── pricing.mdx
├── prompting-guide.mdx
├── providers
├── chat-dash.mdx
├── cloud
│ ├── cloudflare.mdx
│ ├── gcp.mdx
│ ├── s3.mdx
│ └── supabase.mdx
├── klen-ai.mdx
├── model
│ ├── deepinfra.mdx
│ ├── gemini.mdx
│ ├── groq.mdx
│ ├── openai.mdx
│ ├── openrouter.mdx
│ ├── perplexity.mdx
│ └── togetherai.mdx
├── observability
│ └── langfuse.mdx
├── transcriber
│ ├── assembly-ai.mdx
│ ├── deepgram.mdx
│ ├── gladia.mdx
│ ├── google.mdx
│ └── talkscriber.mdx
├── vapify.mdx
├── video
│ └── tavus.mdx
├── voice
│ ├── azure.mdx
│ ├── cartesia.mdx
│ ├── deepgram.mdx
│ ├── elevenlabs.mdx
│ ├── imnt.mdx
│ ├── neets.mdx
│ ├── openai.mdx
│ ├── playht.mdx
│ ├── rimeai.mdx
│ ├── sesame.mdx
│ └── vapi-voices.mdx
├── voiceflow.mdx
└── voicerr.mdx
├── quickstart.mdx
├── quickstart
├── introduction.mdx
├── phone.mdx
└── web.mdx
├── resources.mdx
├── rss-feed.mdx
├── sdk
├── mcp-server.mdx
└── web.mdx
├── sdks.mdx
├── security-and-privacy
├── GDPR.mdx
├── PCI.mdx
├── hipaa.mdx
├── privacy-policy.mdx
├── soc.mdx
└── tos.mdx
├── server-sdks.mdx
├── server-url.mdx
├── server-url
├── developing-locally.mdx
├── events.mdx
├── server-authentication.mdx
└── setting-server-urls.mdx
├── snippets
├── faq-snippet.mdx
├── quickstart
│ ├── dashboard
│ │ ├── assistant-setup-inbound.mdx
│ │ └── provision-phone-number-with-vapi.mdx
│ ├── phone
│ │ └── get-a-phone-number.mdx
│ ├── platform-specific
│ │ └── no-code-prerequisites.mdx
│ └── web
│ │ └── links.tsx
├── sdk.mdx
├── sdks
│ └── web
│ │ ├── import-web-sdk.mdx
│ │ ├── install-web-sdk.mdx
│ │ └── pass-api-keys.mdx
└── video
│ ├── video.css
│ └── videos.tsx
├── squads-example.mdx
├── squads.mdx
├── squads
└── silent-transfers.mdx
├── static
├── audio
│ ├── cole-sample.wav
│ ├── elliot-sample.wav
│ ├── hana-sample.wav
│ ├── harry-sample.wav
│ ├── lily-sample.wav
│ ├── neha-sample.wav
│ ├── paige-sample.wav
│ ├── rohan-sample.wav
│ ├── savannah-sample.wav
│ └── spencer-sample.wav
├── gifs
│ ├── create-assistant.gif
│ ├── create-number.gif
│ ├── dashboard-call.gif
│ └── outbound-call.gif
├── images
│ ├── advanced-tab
│ │ ├── livekit-smart-endpointing.png
│ │ └── vapi-voicemail-detection.png
│ ├── blocks
│ │ └── food-order-steps.png
│ ├── changelog
│ │ ├── additional-vapi-voices.png
│ │ ├── ai-edge-condition-prompt.png
│ │ ├── anthropic-model.png
│ │ ├── assembly-ai.png
│ │ ├── assistant-models.png
│ │ ├── auto-reload.png
│ │ ├── azure-openai-credentials.png
│ │ ├── billing-addon-concurrent-calls.png
│ │ ├── byosiptrunkcredential.png
│ │ ├── call-artifact-recording.png
│ │ ├── cartesia-languages.png
│ │ ├── credits.png
│ │ ├── custom-llm-credential.png
│ │ ├── deepgram-voices.png
│ │ ├── fallback-plan.png
│ │ ├── first-message.png
│ │ ├── gemini-2.0-flash-lite.png
│ │ ├── google-calendar-tool.png
│ │ ├── google-model.png
│ │ ├── gpt-4.1-models.png
│ │ ├── gpt-4o-2024-08-06-ptu.png
│ │ ├── groq-new-model.png
│ │ ├── hume-voice-configuration.png
│ │ ├── inflection-ai.png
│ │ ├── knowledge-base-endpoints.png
│ │ ├── langfuse.png
│ │ ├── neuphonic.png
│ │ ├── new-groq-models.png
│ │ ├── send-text-tool.png
│ │ ├── slack-tool.png
│ │ ├── subscription-concurrency.png
│ │ ├── tavus-credentials.png
│ │ ├── test-suite-management.png
│ │ ├── vapi-voice-configuration.png
│ │ ├── vapi-voice-kylie.png
│ │ ├── webhook-credential.png
│ │ └── xai-model.png
│ ├── credentials
│ │ └── provider
│ │ │ ├── cloud-provider-aws-s3.png
│ │ │ ├── cloud-provider-cloudflare-r2.png
│ │ │ ├── cloud-provider-gcp-hmac.png
│ │ │ └── cloud-provider-supabase-s3.png
│ ├── favicon.ico
│ ├── intro
│ │ └── custom-vs-vapi.png
│ ├── knowledge-base
│ │ ├── assistant.png
│ │ ├── crawl.png
│ │ ├── create-dataset.png
│ │ ├── edit-chunk.png
│ │ ├── files.png
│ │ ├── query-tool.png
│ │ ├── search-playground.png
│ │ ├── trieve-credential.png
│ │ ├── upload-files-advanced.png
│ │ └── upload-files.png
│ ├── learn
│ │ ├── billing
│ │ │ ├── billing-example-template.png
│ │ │ ├── billing-limits-exceeded.png
│ │ │ ├── billing-limits.png
│ │ │ ├── call-pricing-breakdown.png
│ │ │ ├── cost-estimate.gif
│ │ │ ├── cost-routing.png
│ │ │ ├── custom-model-inbound-phone-example.png
│ │ │ ├── outbound-phone-example.png
│ │ │ └── web-interviews-example.png
│ │ └── platform
│ │ │ └── vapi-orchestration.png
│ ├── logo
│ │ ├── logo-dark.svg
│ │ └── logo-light.svg
│ ├── pricing
│ │ └── voice-pipeline-cost-breakdown.png
│ ├── providers
│ │ └── langfuse-example.png
│ ├── quickstart
│ │ ├── assistant-id-dashboard.png
│ │ ├── billing
│ │ │ ├── auto-reload.png
│ │ │ ├── download-invoice.png
│ │ │ ├── invoice-detail-form.png
│ │ │ └── sample-invoice.png
│ │ ├── dashboard
│ │ │ ├── assistant-created.png
│ │ │ ├── assistant-model-set-up.png
│ │ │ ├── assistant-transcriber-config.png
│ │ │ ├── assistant-voice-config.png
│ │ │ ├── auth-ui.png
│ │ │ ├── buy-phone-number-modal.png
│ │ │ ├── buy-vapi-phone-number-modal.png
│ │ │ ├── call-assistant-web-dashboard.png
│ │ │ ├── choose-blank-template.png
│ │ │ ├── create-new-assistant-button.png
│ │ │ ├── create-vapi-phone-number-empty-view.png
│ │ │ ├── create-vapi-phone-number.png
│ │ │ ├── inbound-assistant-set.png
│ │ │ ├── model-provider-keys.png
│ │ │ ├── name-your-assistant.png
│ │ │ ├── set-assistant-number.png
│ │ │ ├── set-llm.png
│ │ │ ├── set-transcriber.png
│ │ │ ├── set-voice.png
│ │ │ ├── telnyx-modal.png
│ │ │ ├── transcriber-providers-keys.png
│ │ │ ├── vapi-api-keys-tab.png
│ │ │ ├── vapi-assistant-transcriber-provider-dropdown.png
│ │ │ ├── vapi-assistant-transcriber-publish.png
│ │ │ ├── vapi-assistant-transcriber-tab.png
│ │ │ ├── vapi-assistants-sidebar-selection.png
│ │ │ ├── vapi-dashboard-post-signup.png
│ │ │ ├── vapi-phone-number-config.png
│ │ │ ├── vapi-phone-numbers-sidebar-selection.png
│ │ │ └── voice-provider-keys.png
│ │ ├── phone
│ │ │ ├── buy-phone-number-twilio.png
│ │ │ ├── dashboard-import-phone-number.png
│ │ │ ├── import-twilio-number-dashboard.png
│ │ │ ├── outbound
│ │ │ │ ├── assistant-model-setup.png
│ │ │ │ └── dial-outbound-call-dashboard.png
│ │ │ ├── phone-number-import-complete.png
│ │ │ ├── set-billing-information.png
│ │ │ ├── twilio-api-key-nav.png
│ │ │ └── twilio-credentials.png
│ │ ├── quickstart-banner.png
│ │ └── web
│ │ │ └── microphone-permissions.png
│ ├── server-url
│ │ ├── authentication
│ │ │ ├── custom-llm.png
│ │ │ └── webhook.png
│ │ ├── developing-locally
│ │ │ ├── logging-events-locally.png
│ │ │ ├── ngrok-cli-ui.png
│ │ │ └── reverse-proxy-developing-locally.png
│ │ ├── overview-graphic.png
│ │ └── settings-server-urls
│ │ │ ├── assistant-server-url-dashboard.png
│ │ │ ├── function-call-server-url-dashboard.png
│ │ │ ├── org-settings-server-urls.png
│ │ │ ├── server-url-priority.png
│ │ │ └── setting-account-server-url.png
│ ├── sip
│ │ ├── .DS_Store
│ │ ├── sip-plivo-attach-number-to-inbound-trunk.png
│ │ ├── sip-plivo-buy-phone-number.png
│ │ ├── sip-plivo-create-new-inbound-trunk.png
│ │ ├── sip-plivo-create-new-ip-uri.png
│ │ ├── sip-plivo-create-vapi-assistant.png
│ │ ├── sip-plivo-ip-acl.png
│ │ ├── sip-plivo-outbound-call-response.png
│ │ ├── sip-plivo-outbound-trunk.png
│ │ ├── sip-plivo-phone-number-response.png
│ │ ├── sip-plivo-sip-trunk-credential-response.png
│ │ ├── sip-plivo-termination-sip-domain.png
│ │ ├── sip-plivo-vapi-dashboard-call.png
│ │ ├── sip-plivo-vapi-dashboard.png
│ │ ├── sip-twilio-ip-1.png
│ │ ├── sip-twilio-ip-2.png
│ │ ├── sip-twilio-ip-authentication.png
│ │ ├── sip-twilio-number-attach.png
│ │ ├── sip-twilio-origination-creation.png
│ │ ├── sip-twilio-origination.png
│ │ ├── sip-twilio-termination-uri.png
│ │ ├── sip-twilio-trunk.png
│ │ ├── telynx-inbound.png
│ │ ├── telynx-outbound-auth.png
│ │ └── telynx-outbound-settings.png
│ ├── tests
│ │ └── voice-testing-page.png
│ ├── tools
│ │ ├── assistant-select-google-calendar-tool.png
│ │ ├── gohighlevel-connect.png
│ │ ├── gohighlevel-create.png
│ │ ├── gohighlevel-select.png
│ │ ├── google-calendar-connect.png
│ │ └── google-calendar-create.png
│ ├── voice-tab
│ │ └── sesame
│ │ │ └── cloning.png
│ └── workflows
│ │ ├── examples
│ │ ├── appointment-scheduling.png
│ │ ├── clinic-triage-scheduling.png
│ │ ├── ecommerce-order-management.png
│ │ └── lead-qualification.png
│ │ ├── logic-condition.png
│ │ ├── workflow-builder-example.png
│ │ ├── workflows-add-node.png
│ │ ├── workflows-ai-edge.png
│ │ ├── workflows-api-node.png
│ │ ├── workflows-complete-basic-flow.png
│ │ ├── workflows-configure-first-node.png
│ │ ├── workflows-conversation-node.png
│ │ ├── workflows-create-workflow.png
│ │ ├── workflows-edit-edge-condition.png
│ │ ├── workflows-end-call-node.png
│ │ ├── workflows-end-node.png
│ │ ├── workflows-extract-node.png
│ │ ├── workflows-extract-variables.png
│ │ ├── workflows-global-node-configuration.png
│ │ ├── workflows-global-node-toggle.png
│ │ ├── workflows-overview.png
│ │ ├── workflows-quickstart-flow.png
│ │ ├── workflows-second-node-added.png
│ │ ├── workflows-second-node-prompt.png
│ │ ├── workflows-test-call-button.png
│ │ ├── workflows-tools-node.png
│ │ ├── workflows-transfer-call-node.png
│ │ └── workflows-transfer-node.png
├── spreadsheets
│ ├── appointment-scheduling
│ │ ├── appointments.csv
│ │ ├── customers.csv
│ │ └── services.csv
│ ├── clinic
│ │ ├── appointments.csv
│ │ ├── patients.csv
│ │ ├── providers.csv
│ │ └── triage_protocols.csv
│ ├── ecommerce
│ │ ├── customers.csv
│ │ ├── orders.csv
│ │ ├── products.csv
│ │ └── returns.csv
│ ├── inbound-support
│ │ ├── accounts.csv
│ │ └── transactions.csv
│ └── lead-qualification
│ │ ├── call_outcomes.csv
│ │ ├── leads.csv
│ │ └── products.csv
└── videos
│ ├── debugging
│ ├── api-logs.mp4
│ ├── call-logs.mp4
│ ├── tool-testing.mp4
│ ├── voice-test-suites.mp4
│ └── webhook-logs.mp4
│ ├── inbound-support
│ ├── assign-phone-number.mp4
│ ├── assistant-tools.mp4
│ ├── configure-assistant.mp4
│ ├── create-assistant.mp4
│ ├── create-test-suite.mp4
│ └── run-test-suite.mp4
│ ├── upload-files.mp4
│ └── workflows
│ ├── configure-start-node.mp4
│ ├── create-and-connect-nodes.mp4
│ ├── create-conversation-node.mp4
│ └── create-workflow.mp4
├── status.mdx
├── support.mdx
├── tcpa-consent.mdx
├── test
├── chat-testing.mdx
├── test-suites.mdx
└── voice-testing.mdx
├── tools
├── custom-tools.mdx
├── default-tools.mdx
├── go-high-level.mdx
├── google-calendar.mdx
├── google-sheets.mdx
├── introduction.mdx
├── mcp.mdx
└── slack.mdx
├── voice-fallback-plan.mdx
├── widget
├── index.tsx
├── package-lock.json
├── package.json
├── vite.config.ts
└── voice-widget.tsx
├── workflows.mdx
└── workflows
├── examples
├── appointment-scheduling.mdx
├── clinic-triage-scheduling.mdx
├── ecommerce-order-management.mdx
└── lead-qualification.mdx
├── overview.mdx
└── quickstart.mdx
/.cursorignore:
--------------------------------------------------------------------------------
1 | **/.definition
2 | **/.preview/**
3 | node_modules/
4 | dist/
5 | .env
6 | .DS_Store
7 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 |
4 | -
5 |
6 | ## Testing Steps
7 |
8 | - [ ] Run the app locally using `fern docs dev` or navigate to preview deployment
9 | - [ ] Ensure that the changed pages and code snippets work
10 |
--------------------------------------------------------------------------------
/.github/workflows/check.yml:
--------------------------------------------------------------------------------
1 | name: Fern Check
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches:
7 | - main
8 |
9 | jobs:
10 | fern-check:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout repository
14 | uses: actions/checkout@v4
15 |
16 | - name: Install Fern
17 | run: npm install -g fern-api
18 |
19 | - name: Check API is valid
20 | run: fern check
21 |
--------------------------------------------------------------------------------
/.github/workflows/preview-docs.yml:
--------------------------------------------------------------------------------
1 | name: Preview Docs
2 |
3 | on: pull_request
4 |
5 | jobs:
6 | run:
7 | runs-on: ubuntu-latest
8 | permissions: write-all
9 | steps:
10 | - name: Checkout repository
11 | uses: actions/checkout@v4
12 |
13 | - name: Install Fern
14 | run: npm install -g fern-api
15 |
16 | - name: Generate preview URL
17 | id: generate-docs
18 | env:
19 | FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
20 | POSTHOG_PROJECT_API_KEY: ${{ secrets.POSTHOG_PROJECT_API_KEY }}
21 | run: |
22 | OUTPUT=$(fern generate --docs --preview --log-level debug 2>&1) || true
23 | echo "$OUTPUT"
24 | URL=$(echo "$OUTPUT" | grep -oP 'Published docs to \K.*(?= \()')
25 | echo "Preview URL: $URL"
26 | echo "🌿 Preview your docs: $URL" > preview_url.txt
27 |
28 | - name: Comment URL in PR
29 | uses: thollander/actions-comment-pull-request@v2.4.3
30 | with:
31 | filePath: preview_url.txt
--------------------------------------------------------------------------------
/.github/workflows/publish-docs.yml:
--------------------------------------------------------------------------------
1 | name: Publish Docs
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | run:
10 | runs-on: ubuntu-latest
11 | if: ${{ github.event_name == 'push' && contains(github.ref, 'refs/heads/main') && github.run_number > 1 }}
12 | steps:
13 | - name: Checkout repository
14 | uses: actions/checkout@v4
15 |
16 | - name: Install Fern
17 | run: npm install -g fern-api
18 |
19 | - name: Publish Docs
20 | env:
21 | FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
22 | POSTHOG_PROJECT_API_KEY: ${{ secrets.POSTHOG_PROJECT_API_KEY }}
23 | run: fern generate --docs --log-level debug
--------------------------------------------------------------------------------
/.github/workflows/release-csharp-sdk.yml:
--------------------------------------------------------------------------------
1 | name: Release C# SDK
2 |
3 | on:
4 | workflow_call:
5 | inputs:
6 | makePR:
7 | description: Make Pull Request
8 | default: false
9 | type: boolean
10 | version:
11 | description: "The version of the C# SDK that you would like to release"
12 | required: true
13 | type: string
14 | workflow_dispatch:
15 | inputs:
16 | version:
17 | description: "The version of the C# SDK that you would like to release"
18 | required: true
19 | type: string
20 | makePR:
21 | description: Make Pull Request
22 | required: true
23 | default: false
24 | type: boolean
25 |
26 | jobs:
27 | release:
28 | runs-on: ubuntu-latest
29 | steps:
30 | - name: Checkout repo
31 | uses: actions/checkout@v3
32 |
33 | - name: Setup node
34 | uses: actions/setup-node@v3
35 |
36 | - name: Download Fern
37 | run: npm install -g fern-api
38 |
39 | - name: Release C# SDK
40 | env:
41 | FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
42 | NUGET_API_KEY: ${{ secrets.NUGET_API_KEY }}
43 | run: |
44 | if [ "${{ github.event.inputs.makePR }}" = "true" ]; then
45 | fern generate --api api --group csharp-sdk --version ${{ inputs.version }} --mode pull-request --log-level debug
46 | else
47 | fern generate --api api --group csharp-sdk --version ${{ inputs.version }} --log-level debug
48 | fi
--------------------------------------------------------------------------------
/.github/workflows/release-go-sdk.yml:
--------------------------------------------------------------------------------
1 | name: Release Go SDK
2 |
3 | on:
4 | workflow_call:
5 | inputs:
6 | makePR:
7 | description: Make Pull Request
8 | default: false
9 | type: boolean
10 | version:
11 | description: "The version of the C# SDK that you would like to release"
12 | required: true
13 | type: string
14 | workflow_dispatch:
15 | inputs:
16 | version:
17 | description: "The version of the Go SDK that you would like to release"
18 | required: true
19 | type: string
20 | makePR:
21 | description: Make Pull Request
22 | required: true
23 | default: false
24 | type: boolean
25 |
26 | jobs:
27 | release:
28 | runs-on: ubuntu-latest
29 | steps:
30 | - name: Checkout repo
31 | uses: actions/checkout@v3
32 |
33 | - name: Setup node
34 | uses: actions/setup-node@v3
35 |
36 | - name: Download Fern
37 | run: npm install -g fern-api
38 |
39 | - name: Release Go SDK
40 | env:
41 | FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
42 | run: |
43 | if [ "${{ github.event.inputs.makePR }}" = "true" ]; then
44 | fern generate --api api --group go-sdk --version ${{ inputs.version }} --mode pull-request --log-level debug
45 | else
46 | fern generate --api api --group go-sdk --version ${{ inputs.version }} --log-level debug
47 | fi
--------------------------------------------------------------------------------
/.github/workflows/release-java-sdk.yml:
--------------------------------------------------------------------------------
1 | name: Release Java SDK
2 |
3 | on:
4 | workflow_call:
5 | inputs:
6 | makePR:
7 | description: Make Pull Request
8 | default: false
9 | type: boolean
10 | version:
11 | description: "The version of the C# SDK that you would like to release"
12 | required: true
13 | type: string
14 | workflow_dispatch:
15 | inputs:
16 | version:
17 | description: "The version of the Java SDK that you would like to release"
18 | required: true
19 | type: string
20 | makePR:
21 | description: Make Pull Request
22 | required: true
23 | default: false
24 | type: boolean
25 |
26 | jobs:
27 | release:
28 | runs-on: ubuntu-latest
29 | steps:
30 | - name: Checkout repo
31 | uses: actions/checkout@v3
32 |
33 | - name: Setup node
34 | uses: actions/setup-node@v3
35 |
36 | - name: Download Fern
37 | run: npm install -g fern-api
38 |
39 | - name: Release Java SDK
40 | env:
41 | FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
42 | MAVEN_USERNAME: ${{ secrets.MAVEN_USERNAME }}
43 | MAVEN_PASSWORD: ${{ secrets.MAVEN_PASSWORD }}
44 | run: |
45 | if [ "${{ github.event.inputs.makePR }}" = "true" ]; then
46 | fern generate --api api --group java-sdk --version ${{ inputs.version }} --mode pull-request --log-level debug
47 | else
48 | fern generate --api api --group java-sdk --version ${{ inputs.version }} --log-level debug
49 | fi
--------------------------------------------------------------------------------
/.github/workflows/release-python-sdk.yml:
--------------------------------------------------------------------------------
1 | name: Release Python SDK
2 |
3 | on:
4 | workflow_call:
5 | inputs:
6 | makePR:
7 | description: Make Pull Request
8 | default: false
9 | type: boolean
10 | version:
11 | description: "The version of the C# SDK that you would like to release"
12 | required: true
13 | type: string
14 | workflow_dispatch:
15 | inputs:
16 | version:
17 | description: "The version of the Python SDK that you would like to release"
18 | required: true
19 | type: string
20 | makePR:
21 | description: Make Pull Request
22 | required: true
23 | default: false
24 | type: boolean
25 |
26 | jobs:
27 | release:
28 | runs-on: ubuntu-latest
29 | steps:
30 | - name: Checkout repo
31 | uses: actions/checkout@v3
32 |
33 | - name: Setup node
34 | uses: actions/setup-node@v3
35 |
36 | - name: Download Fern
37 | run: npm install -g fern-api
38 |
39 | - name: Release Python SDK
40 | env:
41 | FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
42 | PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
43 | run: |
44 | if [ "${{ github.event.inputs.makePR }}" = "true" ]; then
45 | fern generate --api api --group python-sdk --version ${{ inputs.version }} --mode pull-request --log-level debug
46 | else
47 | fern generate --api api --group python-sdk --version ${{ inputs.version }} --log-level debug
48 | fi
49 |
--------------------------------------------------------------------------------
/.github/workflows/release-ruby-sdk.yml:
--------------------------------------------------------------------------------
1 | name: Release Ruby SDK
2 |
3 | on:
4 | workflow_call:
5 | inputs:
6 | makePR:
7 | description: Make Pull Request
8 | default: false
9 | type: boolean
10 | version:
11 | description: "The version of the C# SDK that you would like to release"
12 | required: true
13 | type: string
14 | workflow_dispatch:
15 | inputs:
16 | version:
17 | description: "The version of the Ruby SDK that you would like to release"
18 | required: true
19 | type: string
20 | makePR:
21 | description: Make Pull Request
22 | required: true
23 | default: false
24 | type: boolean
25 |
26 | jobs:
27 | release:
28 | runs-on: ubuntu-latest
29 | steps:
30 | - name: Checkout repo
31 | uses: actions/checkout@v3
32 |
33 | - name: Setup node
34 | uses: actions/setup-node@v3
35 |
36 | - name: Download Fern
37 | run: npm install -g fern-api
38 |
39 | - name: Release Ruby SDK
40 | env:
41 | FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
42 | RUBYGEMS_API_KEY: ${{ secrets.RUBYGEMS_API_KEY }}
43 | run: |
44 | if [ "${{ github.event.inputs.makePR }}" = "true" ]; then
45 | fern generate --api api --group ruby-sdk --version ${{ inputs.version }} --mode pull-request --log-level debug
46 | else
47 | fern generate --api api --group ruby-sdk --version ${{ inputs.version }} --log-level debug
48 | fi
--------------------------------------------------------------------------------
/.github/workflows/release-ts-sdk.yml:
--------------------------------------------------------------------------------
1 | name: Release TypeScript SDK
2 |
3 | on:
4 | workflow_call:
5 | inputs:
6 | makePR:
7 | description: Make Pull Request
8 | default: false
9 | type: boolean
10 | version:
11 | description: "The version of the C# SDK that you would like to release"
12 | required: true
13 | type: string
14 | workflow_dispatch:
15 | inputs:
16 | version:
17 | description: "The version of the TypeScript SDK that you would like to release"
18 | required: true
19 | type: string
20 | makePR:
21 | description: Make Pull Request
22 | required: true
23 | default: false
24 | type: boolean
25 |
26 | jobs:
27 | release:
28 | runs-on: ubuntu-latest
29 | steps:
30 | - name: Checkout repo
31 | uses: actions/checkout@v3
32 |
33 | - name: Setup node
34 | uses: actions/setup-node@v3
35 |
36 | - name: Download Fern
37 | run: npm install -g fern-api
38 |
39 | - name: Release TypeScript SDK
40 | env:
41 | FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
42 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
43 | run: |
44 | if [ "${{ github.event.inputs.makePR }}" = "true" ]; then
45 | fern generate --api api --group ts-sdk --version ${{ inputs.version }} --mode pull-request --log-level debug
46 | else
47 | fern generate --api api --group ts-sdk --version ${{ inputs.version }} --log-level debug
48 | fi
--------------------------------------------------------------------------------
/.github/workflows/update-openapi.yml:
--------------------------------------------------------------------------------
1 | name: Update OpenAPI Specification
2 |
3 | on:
4 | workflow_dispatch:
5 | schedule:
6 | - cron: "0 0 * * *"
7 |
8 | jobs:
9 | update-openapi:
10 | runs-on: ubuntu-latest
11 | permissions:
12 | contents: write
13 | pull-requests: write
14 | steps:
15 | - uses: actions/checkout@v4
16 | with:
17 | token: ${{ secrets.GITHUB_TOKEN }}
18 | - name: Update OpenAPI Spec
19 | uses: fern-api/sync-openapi@v2
20 | with:
21 | token: ${{ secrets.GITHUB_TOKEN }}
22 | branch: 'update-openapi-spec'
23 | update_from_source: true
24 | add_timestamp: true
--------------------------------------------------------------------------------
/.github/workflows/update-plain.yml:
--------------------------------------------------------------------------------
1 | name: Index docs
2 |
3 | on:
4 | schedule:
5 | - cron: '0 */3 * * *'
6 |
7 | jobs:
8 | index:
9 | name: Index Documents
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v4
13 | - uses: actions/setup-node@v4
14 | with:
15 | node-version: '22'
16 |
17 | - name: Install CLI
18 | run: npm install -g @team-plain/cli@latest
19 |
20 | - name: Index Documents
21 | run: plain index-sitemap https://docs.vapi.ai/sitemap.xml
22 | env:
23 | PLAIN_API_KEY: ${{ secrets.PLAIN_API_KEY }}
24 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/.definition
2 | **/.preview/**
3 | node_modules/
4 | dist/
5 | .env
6 | .DS_Store
--------------------------------------------------------------------------------
/dev-docs.json:
--------------------------------------------------------------------------------
1 | {
2 | "gitHubApp": {
3 | "approvalWorkflow": true,
4 | "userDocsWorkflows": [
5 | "generateUserDocs"
6 | ],
7 | "issues": true
8 | }
9 | }
--------------------------------------------------------------------------------
/fern/api-reference/openapi.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: OpenAPI
3 | slug: api-reference/openapi
4 | ---
5 |
6 |
7 |
8 | Our OpenAPI is hosted at
9 | [https://api.vapi.ai/api-json](https://api.vapi.ai/api-json)
10 |
11 |
--------------------------------------------------------------------------------
/fern/api-reference/swagger.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Swagger
3 | slug: api-reference/swagger
4 | ---
5 |
6 |
7 |
8 | Our Swagger is hosted at [https://api.vapi.ai/api](https://api.vapi.ai/api)
9 |
10 |
--------------------------------------------------------------------------------
/fern/apis/api/patches/2022-10-23-offset-to-integer.patch:
--------------------------------------------------------------------------------
1 | diff --git a/fern/apis/api/openapi.json b/fern/apis/api/openapi.json
2 | index 0377888..b5c73a4 100644
3 | --- a/fern/apis/api/openapi.json
4 | +++ b/fern/apis/api/openapi.json
5 | @@ -2470,7 +2470,7 @@
6 | "description": "This is the page number to return. Defaults to 1.",
7 | "schema": {
8 | "minimum": 1,
9 | - "type": "number"
10 | + "type": "integer"
11 | }
12 | },
13 | {
14 |
--------------------------------------------------------------------------------
/fern/apis/webhooks/generators.yml:
--------------------------------------------------------------------------------
1 | api:
2 | specs:
3 | - openapi: ./openapi.yml
4 | overrides: ./openapi-overrides.yml
5 |
--------------------------------------------------------------------------------
/fern/apis/webhooks/openapi.yml:
--------------------------------------------------------------------------------
1 | openapi: 3.0.0
2 | info:
3 | title: Vapi Webhooks API
4 | servers:
5 | - url: https://{yourserver}.com
6 | paths:
7 | /server:
8 | post:
9 | summary: Server Message
10 | description: ""
11 | requestBody:
12 | content:
13 | application/json:
14 | schema:
15 | $ref: '../api/openapi.json#/components/schemas/ServerMessage'
16 | responses:
17 | '200':
18 | description: |
19 | This is the response that is expected from the server to the message.
20 |
21 | Note: Most messages don't expect a response. Only "assistant-request", "tool-calls" and "transfer-destination-request" do.
22 | content:
23 | application/json:
24 | schema:
25 | $ref: '../api/openapi.json#/components/schemas/ServerMessageResponse'
26 | /client:
27 | post:
28 | summary: Client Message
29 | description: |
30 | These are all the webhook messages that will be sent to the client-side SDKs during the call.
31 | Configure the messages you'd like to receive in `assistant.clientMessages`.
32 |
33 | requestBody:
34 | content:
35 | application/json:
36 | schema:
37 | $ref: '../api/openapi.json#/components/schemas//ClientMessage'
38 | responses:
39 | '200':
40 | description: These are the messages that can be sent from client-side SDKs to control the call.
41 | content:
42 | application/json:
43 | schema:
44 | $ref: '../api/openapi.json#/components/schemas/ClientInboundMessage'
45 |
--------------------------------------------------------------------------------
/fern/assets/batch-sample.csv:
--------------------------------------------------------------------------------
1 | number,name
2 | 1234567890,Test test
--------------------------------------------------------------------------------
/fern/assets/close-playground.js:
--------------------------------------------------------------------------------
1 | document.addEventListener('click', function () {
2 | function isPlaygroundEndpointButtonVisible() {
3 | const playgroundEndpoint = document.querySelector('.playground-endpoint .fern-button.outlined');
4 | return playgroundEndpoint;
5 | }
6 |
7 | function clickPlaygroundEndpointButton() {
8 | const playgroundEndpointButton = document.querySelector('.playground-endpoint .fern-button.outlined');
9 | if (playgroundEndpointButton) {
10 | playgroundEndpointButton.click();
11 | }
12 | }
13 |
14 | const fernHeaderButtons = document.querySelectorAll('.fern-header a');
15 | fernHeaderButtons.forEach(button => {
16 | button.addEventListener('click', function () {
17 | if (isPlaygroundEndpointButtonVisible()) {
18 | clickPlaygroundEndpointButton();
19 | }
20 | });
21 | });
22 |
23 | const fernHeaderTabs = document.querySelectorAll('.fern-header-container .fern-header-tab-button');
24 | fernHeaderTabs.forEach(button => {
25 | button.addEventListener('click', function () {
26 | if (isPlaygroundEndpointButtonVisible()) {
27 | clickPlaygroundEndpointButton();
28 | }
29 | });
30 | });
31 | });
32 |
--------------------------------------------------------------------------------
/fern/assistants/persistent-assistants.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Persistent assistants
3 | subtitle: When to use persistent assistants in your workflow
4 | slug: assistants/persistent-assistants
5 | ---
6 |
7 |
8 | You might be wondering whether or not you should create an assistant using the `/assistant` endpoint with its `assistantId`. Or, can you just specify the assistant configuration when starting a call?
9 |
10 | The `/assistant` endpoint is there for convenience to save you creating your own assistants table.
11 |
12 |
13 | - You won't be adding more assistant properties on top of ours.
14 | - You want to use the same assistant across multiple calls.
15 |
16 |
17 | Otherwise, you can just specify the assistant configuration when starting a call.
18 |
--------------------------------------------------------------------------------
/fern/changelog/2024-10-08.mdx:
--------------------------------------------------------------------------------
1 | 1. **New GPT-4o Model Support for Azure OpenAI**: You can now specify the `gpt-4o-2024-08-06` model in the `models` field when configuring Azure OpenAI credentials. Use this model to access the latest GPT-4 operational capabilities in your applications.
2 |
3 | 2. **Specify Timestamps as Strings in `/logs`**: We now expect timestamps as strings when working with logs. Please make sure to handle this accordingly in your applications.
--------------------------------------------------------------------------------
/fern/changelog/2024-10-09.mdx:
--------------------------------------------------------------------------------
1 | 1. **Call Cost Information**: You can now use `call.costs[type=vapi].subType` to determine if a Vapi cost is `normal` or an `overage`.
2 |
3 | 2. **Updated Billing Page**: Your payments are now returned inside a table with pages on the [billing page](https://dashboard.vapi.ai/org/billing).
--------------------------------------------------------------------------------
/fern/changelog/2024-10-10.mdx:
--------------------------------------------------------------------------------
1 | 1. **Purchase Reserved Concurrency and Scale Infinitely**: You can now reserve more concurrent calls with Vapi and scale infinitely by switching to our new top up payment system on the [billing page](https://dashboard.vapi.ai/org/billing). To migrate, click "Switch to Credit Based Billing" and make a payment. Advantages include:
2 |
3 | - **Support More Users Without Limits**: You don't need to worry about getting throttled or staying under usage limits on the conversations you can have with Vapi.
4 | - **Predictable Budgets**: You know exactly how much you will spend on Vapi each month, and you can top up at any time as your needs grow.
5 | - **Select Add-Ons You Need**: The credit based billing page allows you to select HIPAA compliance, dedicated Slack support, and the maximum number of concurrent calls you expect.
6 |
7 | This will require human input to login and migrate your account. You will not be able to revert back to the old billing system.
--------------------------------------------------------------------------------
/fern/changelog/2024-10-13.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Call Transfer Modes Added**: you can now wait for an operator to speak first before providing a transfer message or summary when transferring calls to a new destination with `TransferPlan`. Configure this through *transferPlan.mode=`'warm-transfer-wait-for-operator-to-speak-first-and-then-say-message'`* or *transferPlan.mode=`'warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary'`* inside the request body of `POST /assistant` or `PATCH /assistant`.
2 |
3 | 2. **Unified Server Configuration in Assistants**: You can now use the `server` property in `Assistant.server`, `AssistantOverrides.server`, and when creating or updating assistants to specify webhook settings, including URL, secret, custom headers, and timeout. This replaces the old `serverUrl` and `serverUrlSecret` properties of `Assistant`.
4 |
5 | Include custom headers in your webhook requests by using the `headers` property within the `server` object when creating or updating assistants.
6 |
7 | 3. **Configure PlayHT Voice Engines**: You can now configure which PlayHT voice `model` generates voices for your application between `PlayHT2.0`, `PlayHT2.0-turbo`, and `Play3.0-mini`.
--------------------------------------------------------------------------------
/fern/changelog/2024-10-16.mdx:
--------------------------------------------------------------------------------
1 | 1. **Apply Coupons to Subscriptions**: You can now apply coupons by specifying a `couponId` to add to a subscription.
2 |
3 | 2. **Detect Custom Transcriber Failures in Call End Reasons**: You can now handle cases where a custom transcriber fails during a call with `'pipeline-error-custom-transcriber-failed'`, a new `endedReason` option. This is now accessible in `Call`, `ServerMessageStatusUpdate`, and `ServerMessageEndOfCallReport`.
4 |
5 | 3. **Corrected Typo in Example Custom Voice Request**: We fixed a typo in `CustomVoice.server`, where the example request now shows how to use the `"message"` parameter instead of the misspelled `"messsage"`.
--------------------------------------------------------------------------------
/fern/changelog/2024-10-19.mdx:
--------------------------------------------------------------------------------
1 | 1. **Custom Transcriber Support**: You can now integrate your own transcription service by using `CustomTranscriber` at `assistant.transcriber`, `call.squad.members.assistant.transcriber`, and `call.squad.members.assistantOverrides.transcriber`. Provide your custom transcription server details via `server.url` to receive real-time transcriptions during calls.
2 |
3 | 2. **Increased Maximum Call Duration**: The maximum allowed value for `maxDurationSeconds` has increased from 21,600 to 43,200 seconds when creating or updating `Assistant` or `AssistantOverrides`. You can now configure your assistant to handle calls lasting up to 12 hours.
4 |
5 | 3. **New Voice Provider 'tavus'**: You can now specify `tavus` as a voice provider in `Assistant.voice`, `AssistantOverrides.voice`, `Call.voice` and in the Voice Library.
6 |
7 | 4. **Subscription Status 'frozen' Added**: A new status `frozen` has been added to `Subscription.status`, indicating when a subscription is temporarily inactive.
8 |
9 | 5. **Added Subscription Coupon Codes**: You can now apply coupon codes to your subscription. Visit the [billing page](https://dashboard.vapi.ai/org/billing) to apply coupons to specific organizations within a subscription.
--------------------------------------------------------------------------------
/fern/changelog/2024-10-22.mdx:
--------------------------------------------------------------------------------
1 | 1. **Invite Multiple Users via Email**: You can now invite up to 100 users at once by providing a list of email addresses inside your [org users page](https://dashboard.vapi.ai/org/users). Click `'+'` after entering an email address, select the role as *Editor* or *Admin*, and click `'Invite'`.
2 |
3 | 2. **Simplified Subscription Status Handling**: Your subscription status no longer includes the `past-due` status, so you can streamline your subscription management without handling 'past-due' scenarios.
--------------------------------------------------------------------------------
/fern/changelog/2024-10-25.mdx:
--------------------------------------------------------------------------------
1 | 1. **Specify API Traffic Channel for Organizations**: You can now configure which `channel` (cluster) your API traffic will be routed to. Select between *daily* or *weekly* in your [organization settings page](https://dashboard.vapi.ai/org/settings)
2 |
3 | 2. **Customize Tavus Voice Properties**: You can now use Tavus as a voice provider under `assistant.voice`. Configure additional properties like language, recording options, and transcriptions via `assistant.voice.properties`.
4 |
5 | 3. **Multilingual Support in Tool Messages**: You can now use the `contents` property in `ToolMessageStart`, `ToolMessageFailed`, `ToolMessageDelayed`, and `ToolMessageComplete` to provide message variants for different languages. If you don't provide content for a language, the first item will be automatically translated to the active language during the conversation.
6 |
7 | 4. **Automatic Translation of Message Contents**: For `CustomMessage`, `BlockStartMessage`, and `BlockCompleteMessage`, if specific content isn't provided for a language in `contents`, Vapi automatically translates the first item to the active language by default.
8 |
9 | 5. **Removed Backchanneling Configuration**: The `backchannelingEnabled` property has been removed from when creating or updating `Assistant` or `AssistantOverrides`. Backchanneling is no longer configurable in assistant settings.
--------------------------------------------------------------------------------
/fern/changelog/2024-10-30.mdx:
--------------------------------------------------------------------------------
1 | 1. **Auto-reload Credits in Billing Page**: You can now auto-reload credits and check credits remaining for subscriptions within the [updated billing page](https://dashboard.vapi.ai/org/billing).
2 |
3 |
4 |
5 |
6 |
7 | 2. **Expanded Language Options in `CartesiaVoice`**: You can now specify additional languages in `CartesiaVoice.language` (optional), including 'hi' (Hindi), 'it' (Italian), 'ko' (Korean), 'nl' (Dutch), 'pl' (Polish), 'ru' (Russian), 'sv' (Swedish), and 'tr' (Turkish). Refer to the [CartesiaVoice](https://api.vapi.ai/api) schema for more details.
8 |
9 |
10 |
11 |
12 |
13 | 3. **Enhanced Template Variables in `AssistantOverrides`**: The `AssistantOverrides.variableValues` now supports LiquidJS syntax for replacing template variables. You can customize assistant messages using expressions like `{{ name }}` for dynamic content, or format dates with `{{"now" | date: "%b %d, %Y, %I:%M %p", "America/New_York"}}`.
--------------------------------------------------------------------------------
/fern/changelog/2024-11-03.mdx:
--------------------------------------------------------------------------------
1 | 1. **Access Transport Details and Costs**: You can now use `call.transport` to access details about the provider used for a call (`twilio`, `vonage`, `vapi`, or `daily`), and whether the assistant's video is enabled for web calls (`assistantVideoEnabled`). Additionally, transport costs in `call.costs[type=transport]` now include a `provider` field, allowing you to see which provider contributed to the transport cost.
2 |
3 | 2. **Manage Tavus Credentials**: You can now create and update Tavus credentials in the [updated Provider Credentials page](https://dashboard.vapi.ai/keys).
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/fern/changelog/2024-11-04.mdx:
--------------------------------------------------------------------------------
1 | 1. **XAi Model Support**: You can now use xAI's `grok-beta` model when creating or updating an assistant, and specify your API credentials from the [xAI console](https://console.x.ai/) in the [updated Provider Credentials page](https://dashboard.vapi.ai/keys). The list of call ended reasons has been updated to include xAI-specific errors.
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/fern/changelog/2024-11-06.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Anthropic model `claude-3-5-haiku-20241022` added**: You can now use `claude-3-5-haiku-20241022` in your assistants. Specify `anthropic` in `Assistant.model.provider` and `claude-3-5-haiku-20241022` in `Assistant.model`.
2 |
3 | 2. **Payment `cost`, Subscription `credits` and `couponUsageLeft` are now strings**: These properties are now strings to avoid floating point precision errors. Please update your applications to handle these values as strings.
4 |
5 | 3. **Advanced call logging improvements**: You can now access detailed call logs through the [updated call logs page](https://dashboard.vapi.ai/calls) or [`GET /logs?type=Call`](https://api.vapi.ai/api#/Logs/LoggingController_queryLogs) endpoint. Refer to `CallLogPrivileged` or `CallLogsPaginatedResponse` schemas in the [updated API reference](https://api.vapi.ai/api) to learn more.
--------------------------------------------------------------------------------
/fern/changelog/2024-11-11.mdx:
--------------------------------------------------------------------------------
1 | 1. **Subscription Updates**: You can now check the number of minutes used in a subscription with `Subscription.minutesUsed` (Enterprise only).
2 |
3 | 2. **Updates to Concurrency Limits in your Subscription**: `Subscription.concurrencyLimit` now shows both the included and purchased limits, which better represents the total concurrency limit. Refer to the [Subscription schema](https://api.vapi.ai/api/) for more details.
4 | - Use `Subscription.concurrencyLimitIncluded` to get the default concurrency limit provided with the subscription.
5 | - Use `Subscription.concurrencyLimitPurchased` to get any additional purchased concurrency limit.
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/fern/changelog/2024-11-14.mdx:
--------------------------------------------------------------------------------
1 | 1. **Langfuse Credential Management**: You can now send traces to Langfuse by providing your "Secret Key", "Public Key", and "Host URL" for better telemetry monitoring. Create and update these credentials in the [updated Provider Credentials page](https://dashboard.vapi.ai/keys), under `Observability Providers`.
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/fern/changelog/2024-11-15.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Voices for `gpt-4o-realtime-preview-2024-10-01`**: You can now use new voice IDs: `ash`, `ballad`, `coral`, `sage`, and `verse` with the `voiceId` parameter when configuring `OpenAIVoice`. Please note that these voices are only available with the `gpt-4o-realtime-preview-2024-10-01` model.
--------------------------------------------------------------------------------
/fern/changelog/2024-11-22.mdx:
--------------------------------------------------------------------------------
1 | 1. **Support for 'uaenorth' Region in Azure OpenAI Credentials**: When configuring Azure OpenAI credentials, you can now set `region` to use the UAE North region by specifying `'uaenorth'`.
--------------------------------------------------------------------------------
/fern/changelog/2024-11-24.mdx:
--------------------------------------------------------------------------------
1 | 1. **Voice Fallback Plan Introduced**: You can now enhance your assistant's reliability by defining fallback voice providers using `assistant.voice.fallbackPlan.voices`. This allows your assistant to switch to alternative voices or providers like `FallbackLMNTVoice`, `FallbackAzureVoice`, `FallbackNeetsVoice`, `FallbackTavusVoice`, `FallbackOpenAIVoice`, and others if the primary voice provider fails.
2 |
3 |
4 |
5 |
6 |
7 | 2. **Language Selection for PlayHTVoice**: The `language` property has been added to `PlayHTVoice`. You can now specify the desired language for speech synthesis using `assistant.voice.language`.
8 |
9 | 3. **AssemblyAI Transcriber Available**: You can now use AssemblyAI for transcribing by setting `Assistant.transcriber` to `AssemblyAITranscriber`. This provides a new option for converting speech to text in your assistant.
10 |
11 | 4. **Updated OpenAI Model Support**: The `gpt-4o-2024-11-20` model has been added to `OpenAIModel.model` and `OpenAIModel.fallbackModels`. You can now configure your assistant to use this latest OpenAI model.
12 |
13 | 5. **Removal of 'fillerInjectionEnabled' Property**: The `fillerInjectionEnabled` property has been removed from voice configurations like `LMNTVoice`, `AzureVoice`, etc. You no longer need to include this property when configuring these voices.
14 |
--------------------------------------------------------------------------------
/fern/changelog/2024-11-25.mdx:
--------------------------------------------------------------------------------
1 | 1. **No length limit for assistant's first message**: You can now set `assistant.firstMessage` or `call.assistant.firstMessage` to any length; the previous maximum length restriction has been removed. This allows you to provide longer initial messages for the assistant's greeting.
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/fern/changelog/2024-11-30.mdx:
--------------------------------------------------------------------------------
1 | 1. **Extended Silence Timeout for Assistants**: You can now set `silenceTimeoutSeconds` up to 3600 seconds (previously 600 seconds) when creating or updating assistants and assistant overrides. This allows for longer periods of silence before an assistant session times out.
2 |
3 | 2. **New Credits Purchase Option**: You can now purchase credits to your subscription by navigating to the [updated billing page](https://dashboard.vapi.ai/org/billing/credits). Specify the dollar amount of your credits in the `credits` field to complete the purchase.
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/fern/changelog/2024-12-05.mdx:
--------------------------------------------------------------------------------
1 | 1. **OAuth2 Support for Custom LLM Credentials and Webhooks**: You can now authorize access to your [custom LLMs](https://docs.vapi.ai/customization/custom-llm/using-your-server#step-2-configuring-vapi-with-custom-llm) and [server urls (aka webhooks)](https://docs.vapi.ai/server-url) using OAuth2 (RFC 6749).
2 |
3 | For example, create a webhook credential with `CreateWebhookCredentialDTO` with the following payload:
4 |
5 | ```json
6 | {
7 | "provider": "webhook",
8 | "authenticationPlan": {
9 | "type": "oauth2",
10 | "url": "https://your-url.com/your/path/token",
11 | "clientId": "your-client-id",
12 | "clientSecret": "your-client-secret"
13 | },
14 | "name": "your-credential-name-between-1-and-40-characters"
15 | }
16 | ```
17 |
18 | This returns a [`WebhookCredential`](https://api.vapi.ai/api) object as follows:
19 |
20 |
21 |
22 |
23 |
24 | 3. **Removal of Canonical Knowledge Base**: The ability to create, update, and use canoncial knowledge bases in your assistant has been removed from the API(as custom knowledge bases and the Trieve integration supports as superset of this functionality). Please update your implementations as endpoints and models referencing canoncial knowledge base schemas are no longer available.
--------------------------------------------------------------------------------
/fern/changelog/2024-12-06.mdx:
--------------------------------------------------------------------------------
1 | 1. **OAuth 2 Authentication for Custom LLM Models and Webhooks**: In addition to (AuthZ)[https://www.okta.com/identity-101/authentication-vs-authorization/], you can now now authenticate users accessing your [custom LLMs](https://docs.vapi.ai/customization/custom-llm/using-your-server#step-2-configuring-vapi-with-custom-llm) and [server urls (aka webhooks)](https://docs.vapi.ai/server-url) using OAuth2 (RFC 6749). Use the `authenticationSession` dictionary which contains an `accessToken` and `expiresAt` datetime to authenticate further requests to your custom LLM or server URL.
2 |
3 | For example, create a webhook credential with `CreateCustomLLMCredentialDTO` with the following payload:
4 | ```json
5 | {
6 | "provider": "custom-llm",
7 | "apiKey": "your-api-key-max-10000-characters",
8 | "authenticationPlan": {
9 | "type": "oauth2",
10 | "url": "https://your-url.com/your/path/token",
11 | "clientId": "your-client-id",
12 | "clientSecret": "your-client-secret"
13 | },
14 | "name": "your-credential-name-between-1-and-40-characters"
15 | }
16 | ```
17 |
18 | This returns a [`CustomLLMCredential`](https://api.vapi.ai/api) object as follows:
19 |
20 |
21 |
22 |
23 |
24 | This can be used to authenticate successive requests to your custom LLM or server URL.
25 |
--------------------------------------------------------------------------------
/fern/changelog/2024-12-09.mdx:
--------------------------------------------------------------------------------
1 | 1. **Improved Tavus Video Processing Error Messages**: Your call `endedReason` now includes detailed error messages for `pipeline-error-tavus-video-failed`. Use this to detect and manage scenarios where the Tavus video processing pipeline fails during a call.
--------------------------------------------------------------------------------
/fern/changelog/2024-12-10.mdx:
--------------------------------------------------------------------------------
1 | 1. **Claude Computer Use Tools Available**: You can now use [Claude computer use tools](https://www.anthropic.com/news/3-5-models-and-computer-use) like `BashTool`, `ComputerTool`, and `TextEditorTool` when building your Vapi assistant. Create these tools with `CreateBashToolDTO` (enables shell command execution), `CreateComputerToolDTO` (use desktop functionality with customizable display dimensions using `displayWidthPx`, `displayHeightPx`), and `CreateTextEditorToolDTO` (text editing operations), respectively.
2 |
3 | Refer to our [API docs](https://api.vapi.ai/api) to learn more about how to use Claude computer use tools.
--------------------------------------------------------------------------------
/fern/changelog/2024-12-11.mdx:
--------------------------------------------------------------------------------
1 | 1. **Use OpenAI Chat Completions in your Assistant**: you can now more easily integrate your Assistant with OpenAI's [chat completions sessions](https://platform.openai.com/docs/api-reference/chat) by specifying `messages` (an array of `OpenAIMessage` objects) and an `assistantId` (a string). Each `OpenAIMessage` in turn consists of a `content` (a string between 1 and 100,000,000 characters) and a role (between *assistant*, *function*, *user*, *system*, *tool*). This makes it easier to manage chat sessions associated with a specific assistant. Refer to the `ChatDTO`, `OpenAIMessage` schemas in [our API docs](https://api.vapi.ai/api) to learn more.
2 |
3 | 2. **Update Subscription Email on Billing Page**: you can now customize which email address appears on your Vapi invoices through the updated billing page > [under payment history](https://dashboard.vapi.ai/org/billing). You can specify an email address (in addition through physical address and tax id) - read more in [our docs](https://docs.vapi.ai/quickstart/billing#how-do-i-download-invoices-for-my-credit-purchases).
--------------------------------------------------------------------------------
/fern/changelog/2024-12-13.mdx:
--------------------------------------------------------------------------------
1 | 1. **Azure Speech Transcriber Support**: You can now use Azure's speech-to-text service by specifying `AzureSpeechTranscriber` as an option for `transcriber`. This allows you to leverage Azure's speech to text capabilities when creating or updating your assistant.
2 |
3 | Refer to our [api docs](lhttps://api.vapi.ai/api) to learn more.
--------------------------------------------------------------------------------
/fern/changelog/2024-12-14.mdx:
--------------------------------------------------------------------------------
1 | 1. **Removal of `'gemma-7b-it'` from `GroqModel` Options:** The `'gemma-7b-it'` model is no longer available when selecting Groq as a model provider. Update your applications to use other valid options provided by the API.
2 |
3 | Refer to the [`GroqModel` schema](https://api.vapi.ai/api) or the [vapi dashboard](https://dashboard.vapi.ai/assistants) for Groq for a list of supported models.
--------------------------------------------------------------------------------
/fern/changelog/2024-12-19.mdx:
--------------------------------------------------------------------------------
1 | 1. **Azure Region Renamed to `swedencentral` (from *sweden*)**: Azure Speech Services customers using the Sweden data center should now specify `swedencentral` as your Azure Speech Services region instead of `sweden`. Update your region in your code and the updated [provider keys page](https://dashboard.vapi.ai/keys) > *Azure Speech*.
--------------------------------------------------------------------------------
/fern/changelog/2024-12-21.mdx:
--------------------------------------------------------------------------------
1 | **Expanded Voice Compatibility with Realtime Models**: You can use the voices ash, ballad, coral, sage, and verse with any realtime models, giving you more flexibility in voice synthesis options.
2 |
3 | **Access to New OpenAI Models**:
4 | You can now specify the new models `gpt-4o-realtime-preview-2024-12-17` and `gpt-4o-mini-realtime-preview-2024-12-17` when configuring `OpenAIModel.model` and `OpenAIModel.fallbackModels`.
5 |
6 | **New ElevenLabs Voice Models Available**:
7 | The new voice models `eleven_flash_v2` and `eleven_flash_v2_5` are now available for use in `ElevenLabsVoice` and `FallbackElevenLabsVoice`, offering potential improvements in voice performance.
--------------------------------------------------------------------------------
/fern/changelog/2025-01-05.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Transfer Plan Mode Added**: You can now include call summaries in the SIP header during blind transfers without assistant involvement with `blind-transfer-add-summary-to-sip-header` (a new `TransferPlan.mode` option). Doing so will make `ServerMessageStatusUpdate` include a `summary` when the call status is `forwarding` - which means you can access call summaries for real-time display or logging purposes in your SIP calls.
2 |
3 | 2. **Azure Speech Transcription Support**: You can now specify a new property called `AzureSpeechTranscriber.language` in Azure's Speech-to-Text service to improve the accuracy of processing spoken input.
4 |
5 | 3. **New Groq Model Available**: You can now use `'llama-3.3-70b-versatile'` in `GroqModel.model`.
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/fern/changelog/2025-01-07.mdx:
--------------------------------------------------------------------------------
1 | # New Gemini 2.0 Models, Realtime Updates, and Configuration Options
2 |
3 | 1. **New Gemini 2.0 Models**: You can now use two new models in `Assistant.model[model='GoogleModel']`: `gemini-2.0-flash-exp` and `gemini-2.0-flash-realtime-exp`, which give you access to the latest real-time capabilities and experimental features.
4 |
5 | 2. **Support for Real-time Configuration with Gemini 2.0 Models**: Developers can now fine-tune real-time settings for the Gemini 2.0 Multimodal Live API using `Assistant.model[model='GoogleModel'].realtimeConfig`, enabling more control over text generation and speech output.
6 |
7 | 3. **Customize Speech Output for Gemini Multimodal Live APIs**: You can now customize the assistant's voice using the `speechConfig` and `voiceConfig` properties, with options like `"Puck"`, `"Charon"`, and more.
8 |
9 | 4. **Advanced Gemini Text Generation Parameters**: You can also tune advanced hyperparameters such as `topK`, `topP`, `presencePenalty`, and `frequencyPenalty` to control how the assistant generates responses, leading to more natural and dynamic conversations.
--------------------------------------------------------------------------------
/fern/changelog/2025-01-14.mdx:
--------------------------------------------------------------------------------
1 | **End Call Message Support in ClientInboundMessage**: Developers can now programmatically end a call by sending an `end-call` message type within `ClientInboundMessage`. To use this feature, include a message with the `type` property set to `"end-call"` when sending inbound messages to the client.
--------------------------------------------------------------------------------
/fern/changelog/2025-01-15.mdx:
--------------------------------------------------------------------------------
1 | 1. **Updated Log Endpoints:**
2 | Both the `GET /logs` and `DELETE /logs` endpoints have been simplified by removing the `orgId` parameter.
3 |
4 | 2. **Updated Log Schema:**
5 | The following fields in the Log schema are no longer required: `requestDurationSeconds`, `requestStartedAt`, `requestFinishedAt`, `requestBody`, `requestHttpMethod`, `requestUrl`, `requestPath`, and `responseHttpCode`.
--------------------------------------------------------------------------------
/fern/changelog/2025-01-21.mdx:
--------------------------------------------------------------------------------
1 | # Updated Azure Regions for Credentials
2 |
3 | 1. **Updated Azure Regions for Credentials**: You can now specify `canadacentral`, `japaneast`, and `japanwest` as valid regions when specifying your Azure credentials. Additionally, the region `canada` has been renamed to `canadaeast`, and `japan` has been replaced with `japaneast` and `japanwest`; please update your configurations accordingly.
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/fern/changelog/2025-01-22.mdx:
--------------------------------------------------------------------------------
1 | # Tool Calling Updates, Final Transcripts, and DeepSeek Reasoner
2 | 1. **Migrate `ToolCallFunction` to `ToolCall`**: You should update your client and server tool calling code to use the [`ToolCall` schema](https://api.vapi.ai/api#:~:text=ToolCall) instead of `ToolCallFunction`, which includes properties like `name`, `tool`, and `toolBody` for more detailed tool call specifications. ToolCallFunction has been removed.
3 |
4 | 2. **Include `ToolCall` Nodes in Workflows**: You can now incorporate [`ToolCall` nodes](https://api.vapi.ai/api#:~:text=ToolCall) directly into workflow block steps, enabling tools to be invoked as part of the workflow execution.
5 |
6 | 3. **New Model Option `deepseek-reasoner`**: You can now select `deepseek-reasoner` as a model option inside your assistants with `Assistant.model["deep-seek"].model["deepseek-reasoner"]`, offering enhanced reasoning capabilities for your applications.
7 |
8 | 4. **Support for Final Transcripts in Server Messages**: The API now supports `'transcript[transcriptType="final"]'` in server messages, allowing your application to handle and process end of conversation transcripts.
--------------------------------------------------------------------------------
/fern/changelog/2025-02-04.md:
--------------------------------------------------------------------------------
1 | # Hooks, PCI Compliance, and Blocking Messages
2 |
3 | 1. **Introduction of `Hook`s in Workflows**: You can now use [`Hooks`](https://api.vapi.ai/api#:~:text=Hook) in your workflows to automatically execute actions when specific events occur, like task start or confirmation. Hooks are now available in [`ApiRequest`](https://api.vapi.ai/api#:~:text=ApiRequest) and [`Gather`](https://api.vapi.ai/api#:~:text=Gather) workflow nodes.
4 |
5 | 2. **Make your Assistant PCI Compliant**: You can now configure [`Assistant.pciEnabled`](https://api.vapi.ai/api#:~:text=UpdateCallDTO-,Assistant,-UpdateAssistantDTO) to indicate if your assistant deals with sensitive cardholder data that requires PCI compliance, helping you meet security standards for financial information.
6 |
7 | 3. **Blocking Messages before Tool Calls**: You can now configure your tool calls to wait until a message is fully spoken before starting with [`ToolMessageStart.blocking=true`](https://api.vapi.ai/api#:~:text=ToolMessageStart) (default is `false`).
8 |
9 |
--------------------------------------------------------------------------------
/fern/changelog/2025-03-14.mdx:
--------------------------------------------------------------------------------
1 | ## Blocks Schema Deprecations, Scheduling Enhancements, and New Voice Options for Vapi Voice
2 |
3 |
4 | 2. **'scheduled' Status Added to Calls and Messages**: You can now set the status of a call or message to `scheduled`, allowing it to be executed at a future time. This enables scheduling functionality within your application for calls and messages.
5 |
6 | 3. **New Voice Options for Text-to-Speech**: Four new voices—`Neha`, `Cole`, `Harry`, and `Paige`—have been added for text-to-speech services. You can enhance user experience by setting the `voiceId` to one of these options in your configurations.
7 |
8 | 3. **Removal of Step and Block Schemas**:
9 | Blocks and Steps are now officially deprecated. Developers should update their applications to adapt to these changes, possibly by using new or alternative schemas provided.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-15.mdx:
--------------------------------------------------------------------------------
1 | # Enhancements in Assistant Responses, New Gemini Model, and Call Handling
2 |
3 | 1. **Introduction of 'gemini-2.0-flash-lite' Model Option**: You can now use `gemini-2.0-flash-lite` in [`Assistant.model[provider="google"].model[model="gemini-2.0-flash-lite"]`](https://api.vapi.ai/api#:~:text=GoogleModel) for a reduced latency, lower cost Gemini model with a 1 million token context window.
4 |
5 |
6 |
7 |
8 |
9 | 2. **New Assistant Paginated Response**: All [`Assistant`](https://api.vapi.ai/api#:~:text=Assistants) endpoints now return paginated responses. Each response specifies `itemsPerPage`, `totalItems`, and `currentPage`, which you can use to navigate through a list of assistants.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-17.mdx:
--------------------------------------------------------------------------------
1 | # New `timeoutSeconds` Property in Custom LLM Model
2 |
3 | 1. **New `timeoutSeconds` Property in [`Custom LLM Model`](https://api.vapi.ai/api#:~:text=CustomLLMModel):** Developers can now specify a custom timeout duration (between 20 and 600 seconds) for connections to their [custom language model provider](https://api.vapi.ai/api#:~:text=CustomLLMModel) using the new `timeoutSeconds` property. This enhancement allows for better control over response waiting times, accommodating longer operations or varying network conditions.
4 |
--------------------------------------------------------------------------------
/fern/changelog/2025-03-19.mdx:
--------------------------------------------------------------------------------
1 |
2 | # Test Suite, Smart Endpointing, and Compliance Plans, Chat Completion Message Workflows, and Voicemail Detection
3 |
4 | 1. **Test Suite Enhancements**: Developers can now define `targetPlan` and `testerPlan` when creating or updating [test suites](https://api.vapi.ai/api#:~:text=TestSuite), allowing for customized testing configurations without importing phone numbers to Vapi.
5 |
6 | 2. **Smart Endpointing Updates**: You can now select between [`Vapi`](https://api.vapi.ai/api#:~:text=VapiSmartEndpointingPlan) and [`Livekit`](https://api.vapi.ai/api#:~:text=LivekitSmartEndpointingPlan) smart endpointing providers using the `Assistant.startSpeakingPlan.smartEndpointingPlan`; the `customEndpointingRules` property is deprecated and should no longer be used.
7 |
8 | 3. **Compliance Plan Enhancements**: Organizations can now specify compliance settings using the new `compliancePlan` property, enabling features like PCI compliance at the org level.
9 |
10 | 4. **Chat Completion Message Updates**: When working with OpenAI chat completions, you should now use [`ChatCompletionMessageWorkflows`](https://api.vapi.ai/api#:~:text=ChatCompletionMessageWorkflows) instead of the deprecated `ChatCompletionMessage`.
11 |
12 | 5. **Voicemail Detection Defaults Updated**: The default `voicemailExpectedDurationSeconds` for voicemail detection plans has increased from 15 to 25 seconds, affecting how voicemail detection timings are handled.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-20.mdx:
--------------------------------------------------------------------------------
1 | # Introducing Google Calendar Integration, and Chat Test Suite / Rime AI Voice Enhancements
2 |
3 | 1. **Integration with Google Calendar**: You can now create and manage Google Calendar events directly within your tools. Configure OAuth2 credentials through the [dashboard > Build > Provider Keys](https://dashboard.vapi.ai/keys#:~:text=Google%20Calendar) to authenticate and interact with Google Calendar APIs.
4 |
5 |
6 |
7 |
8 |
9 | 2. **Enhanced Voice Customization for RimeAIVoice**: Gain more control over [Rime AI voice](https://api.vapi.ai/api#:~:text=RimeAIVoice) properties with new options like `reduceLatency`, `inlineSpeedAlpha`, `pauseBetweenBrackets`, and `phonemizeBetweenBrackets`. These settings let you optimize voice streaming and adjust speech delivery to better suit your assistant's needs.
10 |
11 | 3. **Chat Test Suite Enhancements**: You can now create and run chat-based tests in your test suites using the new [`TestSuiteTestChat`](https://api.vapi.ai/api#:~:text=TestSuiteTestChat) to more comprehensively test conversational interactions in your assistant.
12 |
13 | 4. **Maximum Length for Test Suite Chat Scripts**: When creating or updating chat tests, note that the `script` property now has a maximum length of 10,000 characters. Ensure your test scripts conform to this limit to avoid any validation errors.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-21.mdx:
--------------------------------------------------------------------------------
1 |
2 | 1. **OpenAI Voice Enhancements**: When using [OpenAI Voice models in `Assistant.voice`](https://api.vapi.ai/api#:~:text=OpenAIVoice), you can now use specific text to speech models and add custom instructions to control your assistant's voice output
3 |
4 | 2. **Improved Call Error Reporting**: You can now use new [`Call.endedReason`](https://api.vapi.ai/api#:~:text=Call,-CallBatchError) codes when a call fails to start or ends unexpectedly due to failing to retrieve Vapi objects. Refer to [Call.endedReason](https://api.vapi.ai/api#:~:text=Call,-CallBatchError) for more details.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-22.mdx:
--------------------------------------------------------------------------------
1 |
2 | 1. **Customizable Background Sound**: You can now use a custom audio file as the background sound in calls by providing a URL in the `backgroundSound` property. This allows you to enhance the call experience with personalized ambient sounds or music.
3 |
4 | 2. **New Recording Format Options in `ArtifactPlan`**: You can specify the recording format as either `'wav;l16'` or `'mp3'` in `Assistant.artifactPlan` or `Call.artifactPlan`. This gives you control over the audio format of call recordings to suit your storage and playback preferences.
5 |
6 | 3. **Integrate with Langfuse for Enhanced Observability**: You can now integrate with Langfuse by setting `assistant.observabilityPlan` to `langfuse`. Add `tags` and `metadata` to your traces to improve monitoring, categorization, and debugging of your application's behavior.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-23.mdx:
--------------------------------------------------------------------------------
1 | 1. **Multi-Structured Data Extraction with `StructuredDataMultiPlan`:** You can now extract multiple sets of structured data from calls by configuring `assistant.analysisPlan.structuredDataMultiPlan`. This allows you to define various extraction plans, each producing structured outputs accessible via `call.analysis.structuredDataMulti`.
2 |
3 | 2. **Customizable Voice Speed and Language Settings:** You can now adjust the speech speed and language for your assistant's voice by using the new `speed` and `language` properties in `Assistant.voice`. This enables you to fine-tune the voice output to better match your user's preferences and localize the experience.
4 |
5 | 3. **Integration of OpenAI Transcriber:** The `transcriber` property in assistants now supports `OpenAITranscriber`, allowing you to utilize OpenAI's transcription services. A corresponding `Call.endedReason` value, `pipeline-error-openai-transcriber-failed`, has been added to help you identify when a call ends due to an OpenAI transcriber error.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-27.mdx:
--------------------------------------------------------------------------------
1 | 1. **Batch Call Operations**: You can now place multiple calls to different customers at once by providing a list of `customer`s as an array in [`POST /call`](https://api.vapi.ai/api#/Calls/CallController_create).
2 |
3 | 2. **Google Sheets Row Append Tool Added**: You can now append rows to Google Sheets directly from your assistant using [`GoogleSheetsRowAppendTool`](https://api.vapi.ai/api#/Tools/GoogleSheetsRowAppendTool). This allows integration with Google Sheets via the API for automating data entry tasks.
4 |
5 | 3. **Call Control and Scheduling**: You can now schedule calls using the new `SchedulePlan` feature, specifying earliest and latest times for calls to occur. This gives you more control over call timing and scheduling.
6 |
7 | 4. **New Transcriber Options and Fallback Plans**: New transcribers like `GoogleTranscriber` and `OpenAITranscriber` have been added, along with the ability to set `fallbackPlan` for transcribers. This provides more choices and reliability for speech recognition in your applications.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-28.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Slack and Google Calendar Tools Added**: You can now use the built-in [Slack tool](https://docs.vapi.ai/tools/slack) to send messages and use the [Google Calendar tool](https://docs.vapi.ai/tools/google-calendar) to check calendar availability directly from your assistant, with full CRUD operations available via the [`/tool` API endpoint](https://docs.vapi.ai/api-reference/tools/list). You can authenticate the [Slack tool](https://dashboard.vapi.ai/keys#:~:text=Slack) and the [Google Calendar tool](https://dashboard.vapi.ai/keys#:~:text=Google%20Calendar) using OAuth2 from the [Vapi provider keys page](https://dashboard.vapi.ai/keys).
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | 2. **Select LLM Model in Workflow Nodes**: You can now select and update which LLM model you want to use within workflow nodes, allowing more precise control over the assistant's behavior in different workflow nodes and easier configuration updates.
12 |
13 | 4. **Enhanced Call Monitoring and Reporting**: We've improved call monitoring with conversation turn tracking, millisecond-precision timestamps, and provided more detailed call end reasons. These enhancements make it easier to track conversation flow, perform precise time calculations, and diagnose specific call termination issues like server overloads or database errors.
14 |
15 | 5. **Enable Background Denoising**: You can now filter out background noise during calls by setting `Assistant.backgroundDenoisingEnabled` to `true`.
--------------------------------------------------------------------------------
/fern/changelog/2025-03-30.mdx:
--------------------------------------------------------------------------------
1 | 1. **TestSuiteRunTestAttempt now accepts `callId` and `metadata`**: You can now include a `callId` and `metadata` when creating a test suite run attempt, allowing you to reference calls by ID and attach session-related information.
2 |
3 | 2. **`call` property in [TestSuiteRunTestAttempt](https://api.vapi.ai/api#:~:text=TestSuiteRunTestAttemptMetadata) is no longer required**: It's now optional to include the full `call` object in a test attempt, providing flexibility for cases where call details are unnecessary or already known.
4 |
5 | 3. **Attach Metadata to Test Suite Run Attempts**: You can now attach [metadata](https://api.vapi.ai/api#:~:text=TestSuiteRunTestAttemptMetadata) like `sessionId` to test attempts for better tracking and analysis.
6 |
--------------------------------------------------------------------------------
/fern/changelog/2025-04-03.mdx:
--------------------------------------------------------------------------------
1 | 1. **Introducing `SmsSendTool` for SMS messaging support**: You can now create and manage tools of type `sms` using the new [SMS Send Tool](https://api.vapi.ai/api#:~:text=SmsSendTool), allowing you to send SMS messages via defined servers. The `sms` tool type is also now recognized in API endpoints, ensuring that SMS send tools are correctly processed during CRUD operations.
2 |
3 | 2. **New configuration options for voice and transcriber settings**: The `autoMode` property has been added to [Eleven Labs Voice Settings](https://api.vapi.ai/api#:~:text=ElevenLabsVoice), letting developers control automatic voice settings. Additionally, `confidenceThreshold` has been introduced in transcriber settings, allowing developers to set thresholds to discard low-confidence transcriptions and improve accuracy.
4 |
5 | 3. **Enhanced speed control in `CartesiaExperimentalControls`**: The `speed` property now accepts both predefined speeds (`'slowest'`, `'slow'`, `'normal'`, `'fast'`, `'fastest'`) and numeric values between -1 and 1. This gives you more precise control over speed settings for better customization.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-04.mdx:
--------------------------------------------------------------------------------
1 | 1. **Addition of `assistantId` to `TargetPlan` settings**: You can now specify an `assistantId` when testing [target plans](https://api.vapi.ai/api#:~:text=TargetPlan), allowing you to test scenarios involving specific assistants directly.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-05.mdx:
--------------------------------------------------------------------------------
1 | 1. **Introducing `SmsSendTool` for SMS messaging support**: You can now create and send `sms` text messages using the new `[Send Text`](https://api.vapi.ai/api#:~:text=SmsSendTool) tool, enabling assistants to send SMS messages via defined servers.
2 |
3 |
4 |
5 |
6 |
7 | 2. **[Eleven Labs Voice](https://api.vapi.ai/api#:~:text=ElevenLabsVoice) Auto Mode and Confidence Threshold configuration options**: When using [`Eleven Labs Voice`](https://api.vapi.ai/api#:~:text=ElevenLabsVoice) in your Assistant, you can now configure `autoMode` (default: false) to automatically manage manage chunking strategies for long texts; Eleven Labs automatically determines the best way to process and generate audio, optimizing for latency and efficiency. Additionally, `confidenceThreshold` has been introduced in transcriber schemas, allowing developers to set thresholds to discard low-confidence transcriptions and improve accuracy.
8 |
9 | 3. **Changes to `CartesiaExperimentalControls` Speed property**: The `speed` property now accepts both predefined speeds (`'slowest'`, `'slow'`, `'normal'`, `'fast'`, `'fastest'`) and numeric values between -1 and 1. This simplifies the process of controlling the speed of the generated audio with Cartesia.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-08.mdx:
--------------------------------------------------------------------------------
1 | 1. **Simplified `transport` property in `Call` configuration**: You should now configure the `transport` property in [`Call`](https://api.vapi.ai/api#:~:text=Call) as an object when creating or updating a [`Call`](https://api.vapi.ai/api#:~:text=Call), since the separate `Transport` schema has been deprecated. This simplification makes it easier to work with transport details without referencing a separate transport configuration.
2 |
3 |
4 | The `Transport` schema is now deprecated and will be removed in a future release.
5 |
6 |
7 | 2. **New call type `vapi.websocketCall`**: You can now make [phone calls over WebSockets](https://docs.vapi.ai/calls/websocket-transport) with Vapi. The `Call` schema now supports a new `type` value: `vapi.websocketCall`.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-11.mdx:
--------------------------------------------------------------------------------
1 | 1. **Updated AI Edge Condition with Prompt**: When defining an AI edge condition, the `matches` property has been renamed to `prompt`. The `prompt` allows you to provide a natural language condition (up to 1000 characters) that guides AI decision-making in workflows.
2 |
3 |
4 |
5 |
6 |
7 | 2. **Assistant Overrides per Customer**: You can now customize assistant settings for individual customers using `assistantOverrides` when [creating customers](https://api.vapi.ai/api#:~:text=CreateCustomerDTO). This enables personalized assistant interactions for each customer in batch calls.
8 |
9 | 3. **New Call Ended Reasons**: New error codes have been added to `endedReason` enums, providing more detailed insights into call terminations related to providers like Anthropic Bedrock and Vertex. This helps in better error handling and debugging of call issues.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-12.mdx:
--------------------------------------------------------------------------------
1 |
2 | 1. **Expanded Voice Selection for Assistant Voices**: You can now specify any valid `voiceId` for assistant voices without being limited to a predefined list. This provides greater flexibility to use different voices in `Assistant.voice`, and related configurations.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-15.mdx:
--------------------------------------------------------------------------------
1 | 1. **New GPT-4.1 Models Available**: You can now use `'gpt-4.1'`, `'gpt-4.1-mini'`, and `'gpt-4.1-nano'` as options for the `model` and `fallbackModels` with your [OpenAI models](https://api.vapi.ai/api#:~:text=OpenAIModel). These models may offer improved performance or features over previous versions.
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/fern/changelog/2025-04-16.mdx:
--------------------------------------------------------------------------------
1 | 1. **Assistant Overrides in Testing (`TargetPlan.assistantOverrides`)**: You can now apply `assistantOverrides` when testing an assistant with a [Target Plan](https://api.vapi.ai/api#:~:text=TargetPlan), allowing modifications to the assistant's configuration specifically for tests without changing the original assistant. This helps in testing different configurations or behaviors of an assistant without affecting the live version.
2 |
3 | 2. **Specify Voice Model with Deepgram**: You can now specify the `model` to be used by Deepgram voices by setting the `model` property to `"aura"` or `"aura-2"` (default: `"aura-2"`).
4 |
5 | 3. **Expanded Deepgram Voice Options (`voiceId` in `DeepgramVoice` and `FallbackDeepgramVoice`)**: The list of available deepgram voice options has been greatly expanded, providing a wider selection of voices for assistants. This allows you to customize the assistant's voice to better match your desired persona with `Assistant.voice["DeepgramVoice"].voiceId`.
6 |
7 |
8 |
9 |
10 |
11 |
12 | 4. **Control Text Replacement Behavior (`replaceAllEnabled` in `ExactReplacement`)**: A new property `replaceAllEnabled` allows you to decide whether to replace all instances of a specified text (`key`) or just the first occurrence in [`ExactReplacement`](https://api.vapi.ai/api#:~:text=ExactReplacement) configurations. Setting `replaceAllEnabled` to `true` ensures that all instances are replaced.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-17.mdx:
--------------------------------------------------------------------------------
1 | **1. **Custom Hooks When a Call is Ringing**: You can now define custom hooks on your phone numbers to automatically perform actions when a call is ringing. This enables you to play messages or transfer calls without additional server-side code by using the new `hooks` property in `Call.phoneNumber.hooks["phoneNumberHookCallRinging"]`.
2 |
3 | **2. **Say and Transfer Actions in Hooks**: The new [phone number hook call ringing](https://api.vapi.ai/api#:~:text=PhoneNumberHookCallRinging) allows you to specify actions that trigger when a call is ringing (`on: 'call.ringing'`). like [redirecting calls](https://api.vapi.ai/api#:~:text=TransferPhoneNumberHookAction) or [playing a message](https://api.vapi.ai/api#:~:text=SayPhoneNumberHookAction). Include these actions in the `do` array of your hook.
4 |
5 | **3. **Enhanced Call Tracking with endedReason**: When implementing call analytics, you can now track calls that ended due to hook actions through new `endedReason` values:
6 | - `'call.ringing.hook-executed-say'`: Call ended after playing a message via hook
7 | - `'call.ringing.hook-executed-transfer'`: Call ended after being transferred via hook
8 | These values let you distinguish between different automated call handling outcomes in your reporting.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-18.mdx:
--------------------------------------------------------------------------------
1 | 1. **Idle Message Count Reset in `Assistant.messagePlan`**: You can now enable `Assistant.messagePlan.idleMessageResetCountOnUserSpeechEnabled` (default: false) to allow the idle message count to reset whenever the user speaks. This means the assistant can repeatedly remind an idle user throughout the conversation.
2 |
--------------------------------------------------------------------------------
/fern/changelog/2025-04-23.mdx:
--------------------------------------------------------------------------------
1 | 1. **Create Sesame Voices Programmatically**: You can now create and manage [Sesame Voices](https://api.vapi.ai/api#:~:text=CreateSesameVoiceDTO) via the API by specifying a `voiceName` and `transcription`.
2 |
3 | 2. **AWS STS Support in OAuth2 Authentication**: You can now use AWS Security Token Service for authentication by setting the `type` of `OAuth2AuthenticationPlan` to `'aws-sts'`, enabling integration with AWS's secure token services.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-24.mdx:
--------------------------------------------------------------------------------
1 | 1. **Per-Voice Caching Control Added**: Developers can now enable or disable voice caching for each assistant's voice using the new `cachingEnabled` property in voice configurations. This allows you to optimize performance or comply with data policies by controlling whether voice responses are cached.
2 |
3 | 2. **'Condition' Value Now Accepts Strings**: When specifying conditions, the `value` property should now be provided as a string instead of an object. This simplifies condition definitions and makes it easier to set and interpret condition values.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-25.mdx:
--------------------------------------------------------------------------------
1 | 1. **New OpenAI Models 'o3' and 'o4-mini' Added**: You can now use the '`o3`' and '`o4-mini`' models with OpenAI models in `Assistant.model["OpenAIModel"].model`.
2 |
3 | 2. **'whisper' Model Added to Deepgram Transcribers**: The '`whisper`' model is now available in [Deepgram transcriber](https://api.vapi.ai/api#:~:text=DeepgramTranscriber) models for audio transcription. Select '`whisper`' in the `Assistant.transcriber["DeepgramTranscriber"].model` property to utilize this advanced transcription model.
4 |
5 | 3. **Expanded Language Support in Deepgram Transcribers**: You can now transcribe audio in '`ar`' (Arabic), '`he`' (Hebrew), and '`ur`' (Urdu) when using Deepgram transcriber in your assistant.
--------------------------------------------------------------------------------
/fern/changelog/2025-04-26.mdx:
--------------------------------------------------------------------------------
1 | 1. **Adding metadata to ToolCallResult and ToolCallResultMessage**: You can now include optional metadata in tool call results and messages. This allows you to send additional context or information to clients alongside standard tool responses.
2 |
3 | 2. **Adding `tool.completed` client message type**: Assistants can now handle a new client message type, `tool.completed`. This enables you to notify clients when a tool has finished executing.
4 |
5 | 3. **Customizable assistant messages via `message` property in [ToolCallResult](https://api.vapi.ai/api#:~:text=ToolCallResult)**: You can now specify exact messages for the assistant to say upon tool completion or failure using the `message` property. This gives you greater control over user interactions by allowing custom, context-specific responses.
6 |
--------------------------------------------------------------------------------
/fern/changelog/2025-04-27.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Assistant Hook for Call Ending Events**: You can now define actions to execute when a call is ending using [`Assistant.hooks\["AssistantHookCallEnding"\]`](https://api.vapi.ai/api#:~:text=AssistantHookCallEnding). This allows you to specify actions like transferring the call, saying a message, or invoking a function at the end of a call.
2 |
3 | 2. **Enhanced Voicemail Detection Configuration**: Configure voicemail detection more precisely with new `Assistant.voicemailDetection.backoffPlan` and `Assistant.voicemailDetection.beepMaxAwaitSeconds` properties. This lets you control retry strategies and set maximum wait times for voicemail beeps.
4 |
5 | 3. **Twilio Authentication Using API Keys**: Authenticate with Twilio using `apiKey` and `apiSecret` when importing a [Twilio Phone Number](https://dashboard.vapi.ai/phone-numbers/) This replaces the need for `authToken`.
6 |
7 | 4. **Support for New Voicemail Detection Provider and Model**: Utilize the new `vapi` provider for voicemail detection by configuring `Assistant.voicemailDetection.provider`. Additionally, the `gemini-2.5-flash-preview-04-17` model is now supported in various schemas for advanced capabilities.
8 |
9 | 5. **Expanded Workflow Nodes**: Workflows now support `Start` and `Assistant` nodes, enabling more complex and customizable call flow designs. This allows for greater flexibility in defining how calls are handled.
10 |
--------------------------------------------------------------------------------
/fern/changelog/2025-04-30.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Voicemail Detection Configuration**: You can now configure voicemail detection for assistants with Vapi using the new [`VapiVoicemailDetectionPlan`](https://api.vapi.ai/api#:~:text=VapiVoicemailDetectionPlan). This feature allows you to control how Vapi handles voicemail detection, including specifying the provider, backoff strategy, and maximum wait time for a voicemail beep. Refer to [Voicemail Detection documentation](https://docs.vapi.ai/calls/voicemail-detection) for more information, and configure it on the [Assistants tab](https://dashboard.vapi.ai/assistants#:~:text=Voicemail%20Detection).
2 |
3 | 
4 |
5 | 2. **Control SMS Capabilities on Twilio Numbers**: You can now enable or disable SMS functionality on your Twilio phone numbers with the new `smsEnabled` property. By setting `smsEnabled` to `false`, Vapi will not update the messaging webhook URL during phone number import or creation, allowing you to manage SMS settings independently.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-01.mdx:
--------------------------------------------------------------------------------
1 |
2 | 1. **Customize Server Messages with Flexible Array Input**: Before, [`serverMessages`](https://api.vapi.ai/api#:~:text=ServerMessage) could only be one of a set list of string values (enforced by enum). Now, `serverMessages` is an array of objects with no restrictions on what those objects are as long as they match the [`ServerMessage`](https://api.vapi.ai/api#:~:text=ServerMessage) schema, making the schema more open and future-proof, though less strict.
3 |
4 | We provide an example list that matches the previous values: `["conversation-update", "end-of-call-report", "function-call", "hang", "speech-update", "status-update", "tool-calls", "transfer-destination-request", "user-interrupted"]`.
5 |
6 |
7 | You now need to include the `serverMessages` property when creating or updating an assistant, ensuring you explicitly define which messages your assistant sends to your server.
8 |
9 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-03.mdx:
--------------------------------------------------------------------------------
1 | 1. **New `KnowledgeBaseCost` in Call Costs:**: You can now access detailed costs related to knowledge base queries in a call through the new `KnowledgeBaseCost` type in `call.costs[type=knowledge-base]`. This helps in tracking expenses when using knowledge base features during calls.
2 |
3 | 2. **Deprecated `smartEndpointingEnabled` Property:** The `smartEndpointingEnabled` property in `StartSpeakingPlan` is now deprecated. Developers should update their applications to use the new `smartEndpointingPlan` or `customEndpointingRules` for controlling endpointing behavior.
4 |
5 | 3. **Advanced Endpointing with `smartEndpointingPlan` and `customEndpointingRules`:** The `StartSpeakingPlan` now includes `smartEndpointingPlan` and `customEndpointingRules` properties, providing enhanced control over speech endpointing. Developers can specify endpointing methods or define custom rules to improve conversational interactions.
6 |
7 |
8 | The `smartEndpointingEnabled` property in `StartSpeakingPlan` is now deprecated. Developers should update their applications to use the new `smartEndpointingPlan` or `customEndpointingRules` for controlling endpointing behavior.
9 |
10 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-06.mdx:
--------------------------------------------------------------------------------
1 | 1. **Use Workflows as Call Entry Points**: You can now start calls or configure phone numbers using a `workflow` or `workflowId`, just like you would with `assistant`, `assistantId`, `squad`, or `squadId`. This provides more flexibility in defining how calls are initiated and allows direct use of workflows. Refer to the [Workflows documentation](https://docs.vapi.ai/workflows) and [API documentation](https://docs.vapi.ai/api-reference/calls/list#:~:text=Workflow) for more information.
2 |
3 | 2. **New Warm Transfer Mode and Hold Music in `TransferPlan`**: There's a new transfer mode `warm-transfer-experimental` in `call.squad.members.assistant.hooks.do[type=transfer].destination.transferPlan`that enhances call transfer capabilities, including voicemail detection and customer hold experience. You can also customize the hold music by specifying a `holdAudioUrl`.
4 |
5 | 3. **Simplified `clientMessages` Configuration**: The `clientMessages` property has been updated and is now required in `AssistantOverrides`, `CreateAssistantDTO`, and `UpdateAssistantDTO`. This change simplifies how you specify which messages are sent to your Client SDKs.
6 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-07.mdx:
--------------------------------------------------------------------------------
1 | 1. **`ClientMessage` Additions**: Several new client message schemas have been added with additional information about `call`, `customer`, `assistant`, `timestamp`, and `phoneNumber`. This includes:
2 |
3 | - [`Client Message Tool Calls`](https://api.vapi.ai/api#:~:text=ClientMessageToolCalls)
4 | - [`Client Message Transcript`](https://api.vapi.ai/api#:~:text=ClientMessageTranscript)
5 | - [`Client Message Speech Update`](https://api.vapi.ai/api#:~:text=ClientMessageSpeechUpdate)
6 | - [`Client Message Transfer Update`](https://api.vapi.ai/api#:~:text=ClientMessageTransferUpdate)
7 |
8 | 2. **New Hooks for Speech Interruption Events**: Two new hooks, [`Speech Interrupted Assistant Hook`](https://api.vapi.ai/api#:~:text=AssistantHookAssistantSpeechInterrupted) and [`Speech Interrupted Customer Hook`](https://api.vapi.ai/api#:~:text=AssistantHookCustomerSpeechInterrupted), enable you to define actions when speech is interrupted during a call.
9 |
10 | 3. **Call Schema Updates**: There are several notable updates to how `Call` is structured:
11 |
12 | - `costs` array now includes a new cost type: [`KnowledgeBaseCost`](https://api.vapi.ai/api#:~:text=KnowledgeBaseCost)
13 | - `phoneCallProvider` and `phoneCallProviderId` are now deprecated.
14 | - `waitFunction` in `LivekitSmartEndpointingPlan` has been updated to improve how long the assistant waits before speaking, enhancing call flow responsiveness.
15 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-08.mdx:
--------------------------------------------------------------------------------
1 |
2 | 1. **New 'Conversation' Node in Workflows**: You can now use the **Conversation** node in your workflows to create conversation tasks, enhancing how assistants interact during calls.
3 |
4 | 2. **Integration with GoHighLevel via OAuth2 Credentials**: You can now connect with GoHighLevel services using new **GoHighLevelMCPCredential** credentials in the [Provider Keys](https://dashboard.vapi.ai/keys#:~:text=GoHighLevel) section of the Vapi Dashboard.
5 |
6 | 3. **Standardized Message Types for `clientMessages` and `serverMessages`**: When configuring assistants, you now specify [Client Messages](https://api.vapi.ai/api#:~:text=ClientMessage) and [Server Messages](https://api.vapi.ai/api#:~:text=ServerMessage) using predefined message types, ensuring consistency and preventing invalid message configurations.
7 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-09.mdx:
--------------------------------------------------------------------------------
1 | 1. **Workflows Now Marked as Beta Features**: The workflow endpoints and related properties have now moved to **[BETA]**, indicating they're slightly more stable but still in active development. Refer to the [Workflows documentation](https://docs.vapi.ai/workflows) and [API documentation](https://docs.vapi.ai/api-reference/calls/list#:~:text=Workflow) for more information.
2 |
3 | 2. **New `{{endedReason}}` Variable in Templates**: You can now include the `{{endedReason}}` variable in your post-call analysis templates to access why a call ended. This helps generate more insightful summaries and evaluations based on the call's outcome.
4 |
5 | 3. **Introduction of `SayAssistantHookAction` Schema**: A new action, [`SayAssistantHookAction`](https://api.vapi.ai/api#:~:text=SayAssistantHookAction), allows the assistant to say specific messages during calls. Use this by adding it to `call.squad.members.assistant.hooks.do[type=say]` to enhance call interactions.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-10.mdx:
--------------------------------------------------------------------------------
1 | 1. **Configure Conversation Nodes with OpenAI Models**: You can now set up your assistant's workflow conversation nodes to use OpenAI models by specifying [`WorkflowOpenAIModel`](https://api.vapi.ai/api#:~:text=WorkflowOpenAIModel). Choose from a range of OpenAI models and customize parameters like `maxTokens` and `temperature` to control responses.
2 |
3 | 2. **Configure Conversation Nodes with Anthropic Models, Including *Thinking* Feature**: Your assistant's conversation nodes can now use Anthropic models by specifying [`WorkflowAnthropicModel`](https://api.vapi.ai/api#:~:text=WorkflowAnthropicModel). Select from various Anthropic models and, for `claude-3-7-sonnet-20250219`, enable the optional `thinking` feature for advanced reasoning capabilities.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-13.mdx:
--------------------------------------------------------------------------------
1 | # GoHighLevel Tools for Calendar and Contact Management
2 |
3 | You can now use new [GoHighLevel tools](https://www.gohighlevel.com) in all models, templates, and workflows directly through the [`/tool`](https://api.vapi.ai/api#:~:text=/tool) and [`/tool/{id}`](https://api.vapi.ai/api#:~:text=/tool/%7Bid%7D) endpoints with the following capabilities:
4 | - **Contact Management**:
5 | - [GoHighLevelContactGetTool](https://api.vapi.ai/api#:~:text=GoHighLevelContactGetTool): Fetch contact information from GoHighLevel
6 | - [GoHighLevelContactCreateTool](https://api.vapi.ai/api#:~:text=GoHighLevelContactCreateTool): Create new contacts in GoHighLevel
7 |
8 | - **Calendar Management**:
9 | - [GoHighLevelCalendarEventCreateTool](https://api.vapi.ai/api#:~:text=GoHighLevelCalendarEventCreateTool): Schedule new calendar events programmatically
10 | - [GoHighLevelCalendarAvailabilityTool](https://api.vapi.ai/api#:~:text=GoHighLevelCalendarAvailabilityTool): Check calendar availability for scheduling
11 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-14.mdx:
--------------------------------------------------------------------------------
1 | 1. **Specify Start Node in Workflows with `isStart` Property**: You can now explicitly define the starting point of your workflow by setting the `isStart` property to `true` on any node like [`Say`](https://api.vapi.ai/api#:~:text=Say), [`Gather`](https://api.vapi.ai/api#:~:text=Gather), or [`Hangup`](https://api.vapi.ai/api#:~:text=Hangup).
2 |
3 | 2. **Updated Model Options in `GroqModel`**: You can now use the following new Assistant modles with [Groq](https://api.vapi.ai/api#:~:text=GroqModel):
4 | - `meta-llama/llama-4-maverick-17b-128e-instruct`
5 | - `meta-llama/llama-4-scout-17b-16e-instruct`
6 | - `mistral-saba-24b`
7 | - `compound-beta`
8 | - `compound-beta-mini`
9 |
10 |
11 |
12 |
13 |
14 | Note that some older models have been removed, including `llama-3.1-70b-versatile` and `mixtral-8x7b-32768`.
15 |
16 | 3. **New `Kylie` Voice Available in Vapi**: You can now use the new `Kylie` voice when using [`Vapi` as your voice provider](https://dashboard.vapi.ai/assistants#:~:text=Voice%20Configuration). You can learn more in the [Vapi voices documentation](https://docs.vapi.ai/providers/voice/vapi-voicesn).
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-15.mdx:
--------------------------------------------------------------------------------
1 | # New Azure OpenAI GPT 4.1 Models
2 | 1. **Access to New Azure OpenAI Models**: You can now use new GPT 4.1 models in Azure OpenAI such as `gpt-4.1-2025-04-14`, `gpt-4.1-mini-2025-04-14`, and `gpt-4.1-nano-2025-04-14`.
3 |
4 | The above models will be available to configure through the console at a later date. For now, configure your assistant to use these models through [the API](https://docs.vapi.ai/api-reference/assistants/update).
--------------------------------------------------------------------------------
/fern/changelog/2025-05-16.mdx:
--------------------------------------------------------------------------------
1 | # Strip Asterisks from Transcribed Text with `stripAsterisk` Formatter
2 |
3 | 1. **New `stripAsterisk` Formatter in [FormatPlan](https://api.vapi.ai/api#:~:text=FormatPlan)**: You can now remove asterisks from transcribed text by adding it to your `Assistant.voice[VOICE_PROVIDER].chunkPlan.formatPlan.formattersEnabled` configuration.
4 |
5 |
6 | Ensure `Assistant.voice[VOICE_PROVIDER].chunkPlan.formatPlan.enabled` is set to `true` to use the `stripAsterisk` formatter.
7 |
8 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-17.mdx:
--------------------------------------------------------------------------------
1 |
2 | 1. **Introduction of `WorkflowAssistant` Schema in Workflows**: [`WorkflowAssistant`](https://api.vapi.ai/api#:~:text=WorkflowAssistant) now replaces `Assistant` in workflow definitions. Use `WorkflowAssistant` when defining assistant nodes in workflows moving forward.
3 |
4 | 2. **Adding `dataExtractionPlan` and `variableExtractionPlan` to Conversations**: You can now include [`dataExtractionPlan`](https://api.vapi.ai/api#:~:text=DataExtractionPlan) and [`variableExtractionPlan`](https://api.vapi.ai/api#:~:text=VariableExtractionPlan) to extract structured data or variables from user responses. Utilize these plans to define what data to extract during conversations in your workflows.
5 |
6 | 3. **New `McpTool` for Model Configuration**: You can now add `McpTool` to your assistant's `tools` array to use MCP (Model Control Protocol) tool calls. This tool allows your assistant to use any MCP-compatible server in your workflow.
7 |
8 | 4. **Changes to `ToolCallResult` Message Property**: The `message` property in `ToolCallResult` now accepts a single object instead of an array. Ensure that you return a single `ToolMessageComplete` or `ToolMessageFailed` object when providing messages in tool call results.
9 |
10 | 5. **Updated Required Properties in `Assistant` Schema**: [`Assistant`](https://api.vapi.ai/api#:~:text=Assistant) now requires `id`, `orgId`, `createdAt`, and `updatedAt` when creating or updating assistants. Make sure to provide these fields when creating or updating assistants.
11 |
12 |
13 | `TransferDestinationStep` is now deprecated. Update your code to use the new method for specifying transfer destinations in the `transferCall` tool.
14 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-19.mdx:
--------------------------------------------------------------------------------
1 | 1. **Renaming `SmsSendTool` to `SmsTool`:** The tool previously known as `SmsSendTool` is now `SmsTool`. You can now use the new `SmsTool` schema to send SMS messages with enhanced configuration options like `async`, `server`, `function`, and `messages`.
2 |
3 | 2. **Update Text Editor Tool Type to `'text-editor'`:** The `type` for Text Editor tools has changed from `'textEditor'` to `'text-editor'`. Make sure to update your configurations to use `type`: `'text-editor'` when specifying a Text Editor tool.
4 |
5 | 3. **Removal of `backgroundSound.maxLength` Property:** The `maxLength` constraint has been removed from the `backgroundSound` property in Assistant schemas. You no longer need to limit the length of `backgroundSound`; it can now be of any length.
6 |
7 | 4. **Deprecation of `MakeTool`:** The `MakeTool` has been removed from the available tools in various model schemas. Please update your models to remove any references to `MakeTool` and use alternative tools as needed.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-22.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Anthropic Models Available**: Two new models, `claude-opus-4-20250514` and `claude-sonnet-4-20250514`, have been added to the `model` options in `AnthropicModel` and `WorkflowAnthropicModel`. You can now specify these models in your requests to take advantage of their features.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-24.mdx:
--------------------------------------------------------------------------------
1 | 1. **New `minutesUsed` Property for Organizations**: Developers can now track the total call minutes used by their organization via the new `minutesUsed` property in the `Org` schema.
2 |
3 | 2. **Removed `server` Property from Certain Tools**: The `server` property has been removed from several tools, such as `SmsTool` and `DtmfTool`; developers should update their implementations accordingly.
4 |
5 | 3. **Updated `server` Property Description in Tools**: The `server` property's description has been updated in tools like `McpTool` and `BashTool` to clarify webhook behavior when tool calls are made.
6 |
7 | 4. **New Model `gemini-2.5-flash-preview-05-20` Available**: A new model `gemini-2.5-flash-preview-05-20` is now supported, allowing developers to utilize its features in their applications.
8 |
9 | 5. **Additional Subscription Types Added**: New subscription types—`agency`, `startup`, `growth`, and `scale`—are now available, providing more options to fit different organizational needs.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-25.mdx:
--------------------------------------------------------------------------------
1 | 1. **New `transferCompleteAudioUrl` Property in `TransferPlan`:** You can now specify a custom audio file URL using `transferCompleteAudioUrl` in `TransferPlan` when using `warm-transfer-experimental` mode to play a sound after the transfer is complete. This allows you to add a custom notification (like a beep) for the destination party after delivering the message or summary.
2 |
3 | 2. **`body` Parameter in `CreateApiRequestToolDTO` Is Now Optional:** The `body` property has been removed from the required fields in `CreateApiRequestToolDTO`, so you no longer need to include it when creating an API request tool. This means you can create API requests without a body, useful for HTTP methods like GET or DELETE.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-26.mdx:
--------------------------------------------------------------------------------
1 | **Removed `async` Property from Tool Schemas**: Developers no longer need to set the `async` property when using various tool schemas like `GhlTool`, `SmsTool`, `BashTool`, and others. These tools now operate synchronously by default; please update your code to remove any references to `async` in these schemas.
2 |
3 | **Default Roles in Message Schemas**: The `role` property in `ToolMessage`, `AssistantMessage`, and `DeveloperMessage` now has default values of `"tool"`, `"assistant"`, and `"developer"`, respectively. You can omit the `role` field when creating these messages, simplifying message construction.
4 |
5 | **Improved Descriptions for Chat Inputs and Messages**: The `input` and `messages` properties in the `Chat`, `CreateChatDTO`, and `OpenAIResponsesRequest` schemas now have clearer descriptions. This helps you understand that `input` can be a string or an array of chat messages, and `messages` provide context for multi-turn conversations.
6 |
7 | **Clarified `async` Behavior in `FunctionTool`**: The `async` property's description in `FunctionTool` and related schemas has been updated for clarity. It now better explains how setting `async` to `true` or `false` affects the assistant's behavior, facilitating more effective use of this feature.
8 |
9 | **Added Titles to Schema Definitions**: The `oneOf` definitions in the `input` property of `Chat`, `CreateChatDTO`, and `OpenAIResponsesRequest` now include `title` attributes like `"String"` and `"MessageArray"`. This improves schema documentation and assists tools in processing these definitions.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-27.mdx:
--------------------------------------------------------------------------------
1 | 1. **New Chat and Session API Endpoints**: You can now manage chats and sessions using the new API endpoints `/chat`, `/chat/{id}`, `/chat/responses`, `/session`, and `/session/{id}`. This enables you to programmatically create, retrieve, and manage chat conversations and sessions within your applications.
2 |
3 | 2. **Variable Extraction Feature Removed**: The variable extraction functionality has been removed from the API. You'll need to update your workflows if you previously used variable extraction, as it is no longer supported.
4 |
5 | 3. **Specify Regions for OpenAI Models**: You can now specify the region for OpenAI models in `OpenAIModel` and `WorkflowOpenAIModel` by including a region in the `model` property, like `gpt-4.1-2025-04-14:westus`. This helps you comply with data residency rules or regional requirements by ensuring data processing occurs in specified locations.
6 |
7 | 4. **New OpenAI Models Added**: A range of new OpenAI models, including regional variants, are now available for use. You can choose these new models to better align with your application's performance needs and regional compliance requirements.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-28.mdx:
--------------------------------------------------------------------------------
1 | 1. **Removal of `language` Property in Voice Settings**: The `language` property has been removed from the `VapiVoice` and `FallbackVapiVoice` configurations. You no longer need to set `language` when configuring voice settings; voice language may now be handled automatically or through a different configuration.
2 |
3 | 2. **Introduction of Detailed Node Artifacts**: A new `NodeArtifact` schema has been added, accessible via `call.artifact.nodes`, providing detailed information about each node in a call's workflow. You can now access messages, node names, and variables for each node to gain deeper insights into call executions.
4 |
5 | 3. **Addition of `nodes` and `variables` to Call Artifacts**:
6 | The `Artifact` schema now includes `nodes` and `variables` properties, enhancing the data available in `call.artifact`. This allows you to retrieve the history of executed workflow nodes and the final state of variables after a call.
7 |
8 | 4. **Removal of `Metrics` Schema**: The `Metrics` schema has been completely removed. If your application relies on `Metrics`, you will need to update your code to accommodate this change and explore alternative solutions.
9 |
10 | 5. **Update Voice Configuration Paths**: With the changes to voice configurations, paths like `assistant.voice` and `call.squad.members.assistant.voice` may require updates. Ensure your configurations align with the new schema definitions and remove any references to the deprecated `language` property.
11 |
12 | 6. **Enable Recording in Artifacts**: To access call recordings in your artifacts, set `assistant.artifactPlan.recordingEnabled` in your configuration. This enables the `recording` property in `call.artifact`, allowing you to review call recordings for analysis or debugging.
--------------------------------------------------------------------------------
/fern/changelog/2025-05-30.mdx:
--------------------------------------------------------------------------------
1 | # Session and Workflow Enhancements
2 |
3 | 1. **Addition of `expirationSeconds` to Session Schemas**: You can now set custom session expiration times using the `expirationSeconds` property when creating or updating sessions. This allows sessions to expire anywhere between 1 minute and 30 days, providing greater control over session lifecycles.
4 |
5 | 2. **Introduction of `globalPrompt` in Workflow Schemas**: A new `globalPrompt` property allows you to define a default prompt for entire workflows. By setting a `globalPrompt` up to 5,000 characters, you can streamline your workflow configurations without setting prompts for each individual node.
6 |
--------------------------------------------------------------------------------
/fern/changelog/2025-05-31.mdx:
--------------------------------------------------------------------------------
1 | # SIP Call Error Handling Updates
2 |
3 | The following specific SIP error codes have been added to help identify call failures:
4 |
5 |
6 | - `call.in-progress.error-sip-inbound-call-failed-to-connect`
7 | - `call.in-progress.error-providerfault-outbound-sip-403-forbidden`
8 | - `call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required`
9 | - `call.in-progress.error-providerfault-outbound-sip-503-service-unavailable`
10 | - `call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable`
11 | - `call.in-progress.error-sip-outbound-call-failed-to-connect`
12 | - `call.in-progress.error-vapifault-worker-died`
13 |
14 |
15 |
16 | The generic error code `call.in-progress.error-sip-telephony-provider-failed-to-connect-call` has been removed. Update your error handling to use the new specific error codes instead.
17 |
18 |
--------------------------------------------------------------------------------
/fern/changelog/2025-06-03.mdx:
--------------------------------------------------------------------------------
1 | # Azure OpenAI Compatibility Mode and JSON Schema Updates
2 |
3 | 1. **`toolStrictCompatibilityMode` for Azure OpenAI Models**: Added a new option to handle Azure OpenAI's validation limitations. Set `toolStrictCompatibilityMode` in your `OpenAIModel` config to either:
4 | - `strip-parameters-with-unsupported-validation`: Removes entire parameters that have unsupported validations
5 | - `strip-unsupported-validation`: Keeps parameters but removes unsupported validation aspects
6 | Default is `strip-unsupported-validation`.
7 |
--------------------------------------------------------------------------------
/fern/changelog/2025-06-04.mdx:
--------------------------------------------------------------------------------
1 | ## Assistant Configuration Updates
2 |
3 | 1. **Set Minimum Messages for Analysis**: Skip analysis for very short conversations by setting `Assistant.analysisPlan.minMessagesThreshold` (default: 2).
4 |
5 | 2. **Configure Transfer Timeout**: You can now set the timeout for warm transfer modes with `Assistant.hooks.do[type=transfer].destination.transferPlan.timeout` (default: 60). Warm transfer modes allow for a smooth handoff between agents by maintaining context and conversation history during the transfer process.
6 |
7 |
8 | This timeout setting determines how long the system will wait for the transfer to complete before timing out.
9 |
10 |
11 |
12 | 3. **Enable AssemblyAI Universal Streaming API**: You can now enable the new Universal Streaming API for AssemblyAI transcribers with `Assistant.transcriber.enableUniversalStreamingApi` and `Assistant.transcriber.fallbackPlan.transcribers.enableUniversalStreamingApi`.
13 |
14 |
15 | Set this to `true` to use AssemblyAI's new Universal Streaming API for improved transcription.
16 |
17 |
18 |
19 |
20 | **Removal of regex in JsonSchema**: You can no longer use regular expressions in your [JSON schema validations](https://api.vapi.ai/api#:~:text=JsonSchema).
21 |
22 | **Dot paths affected:**
23 | - `assistant.analysisPlan.structuredDataPlan.schema.regex`
24 | - `assistant.hooks.do[type=function].function.parameters.properties.regex`
25 | - `assistant.model.tools[type=apiRequest].body.regex`
26 | - `assistant.model.tools[type=apiRequest].headers.regex`
27 |
--------------------------------------------------------------------------------
/fern/changelog/2025-06-06.mdx:
--------------------------------------------------------------------------------
1 | # Workflows Out of Beta and Gladia Transcriptions
2 |
3 | 1. **Workflows Are Out of Beta**: You can now use workflows in production as we've removed all `[BETA]` labels from workflow-related properties and API endpoints. See [Workflow API Documentation](/docs/api/workflows) for complete details.
4 |
5 | 2. **Per-Call Workflow Customization with Overrides**: You can now customize workflows on a per-call basis using the new `Call.workflowOverrides` property. Override workflow settings and template variables using [LiquidJS syntax](https://liquidjs.com/tutorials/intro-to-liquid.html). See [Workflow Documentation](/docs/workflows) for details.
6 |
7 | 3. **Enhanced Gladia Transcriptions**: You can now transcribe audio in multiple languages using the new `languages` property in `GladiaTranscriber` and `FallbackGladiaTranscriber` (when `languageBehaviour` is `manual`). You can also use the new `solaria-1` transcription model for potentially improved results. Learn more in our [Transcription Documentation](/docs/transcription).
8 |
--------------------------------------------------------------------------------
/fern/community/conferences.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Conferences
3 | subtitle: Videos showcasing Vapi out in the wild.
4 | slug: community/conferences
5 | ---
6 |
7 |
8 | Here are some videos made by people in our community showcasing what Vapi can do:
9 |
10 |
11 |
19 |
20 |
21 | ## Send Us Your Video
22 |
23 | Have a video showcasing Vapi that you want us to feature? Let us know:
24 |
25 |
26 |
32 | Send us your video showcasing what Vapi can do, we'd like to feature it.
33 |
34 |
35 |
--------------------------------------------------------------------------------
/fern/community/demos.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Demos
3 | subtitle: Videos showcasing Vapi out in the wild.
4 | slug: community/demos
5 | ---
6 |
7 |
8 | Here are some videos made by people in our community showcasing what Vapi can do:
9 |
10 |
11 |
19 |
20 |
28 |
29 |
30 |
31 | ## Send Us Your Video
32 |
33 | Have a video showcasing Vapi that you want us to feature? Let us know:
34 |
35 |
36 |
42 | Send us your video showcasing what Vapi can do, we'd like to feature it.
43 |
44 |
45 |
--------------------------------------------------------------------------------
/fern/community/ghl.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: GoHighLevel
3 | subtitle: Videos showcasing Vapi out in the wild.
4 | slug: community/ghl
5 | ---
6 |
7 |
8 | Here are some videos made by people in our community showcasing what Vapi can do:
9 |
10 |
11 |
19 |
20 |
30 |
31 |
32 | ## Send Us Your Video
33 |
34 | Have a video showcasing Vapi that you want us to feature? Let us know:
35 |
36 |
37 |
43 | Send us your video showcasing what Vapi can do, we'd like to feature it.
44 |
45 |
46 |
--------------------------------------------------------------------------------
/fern/community/podcast.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Podcast
3 | subtitle: Videos showcasing Vapi out in the wild.
4 | slug: community/podcast
5 | ---
6 |
7 |
8 | Here are some videos made by people in our community showcasing what Vapi can do:
9 |
10 |
11 |
19 |
20 |
21 | ## Send Us Your Video
22 |
23 | Have a video showcasing Vapi that you want us to feature? Let us know:
24 |
25 |
26 |
32 | Send us your video showcasing what Vapi can do, we'd like to feature it.
33 |
34 |
35 |
--------------------------------------------------------------------------------
/fern/community/television.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Television
3 | subtitle: Videos showcasing Vapi out in the wild.
4 | slug: community/television
5 | ---
6 |
7 |
8 | Here are some videos made by people in our community showcasing what Vapi can do:
9 |
10 |
11 |
19 |
20 |
21 | ## Send Us Your Video
22 |
23 | Have a video showcasing Vapi that you want us to feature? Let us know:
24 |
25 |
26 |
32 | Send us your video showcasing what Vapi can do, we'd like to feature it.
33 |
34 |
35 |
--------------------------------------------------------------------------------
/fern/custom.js:
--------------------------------------------------------------------------------
1 | const WIDGET_TAG = 'vapi-voice-agent-widget';
2 | const isLocalhost = window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1';
3 | const WIDGET_SCRIPT_URL = isLocalhost
4 | ? 'http://localhost:9001/widget.js'
5 | : 'https://docs-widget.vercel.app/widget.js';
6 |
7 | function injectVapiWidget() {
8 | console.log('[custom.js] injectVapiWidget called');
9 | if (document.querySelector(WIDGET_TAG)) {
10 | console.log('[custom.js] Widget already present in DOM');
11 | return;
12 | }
13 |
14 | const script = document.createElement('script');
15 | script.src = WIDGET_SCRIPT_URL;
16 | script.async = true;
17 | script.onload = () => {
18 | console.log('[custom.js] Widget script loaded');
19 | // Create the web component after the script loads
20 | const widget = document.createElement(WIDGET_TAG);
21 | const apiKey = '6d46661c-2dce-4032-b62d-64c151a14e0d';
22 | widget.setAttribute('apiKey', apiKey);
23 | widget.style.position = 'fixed';
24 | widget.style.bottom = '0';
25 | widget.style.right = '0';
26 | widget.style.zIndex = '9999';
27 | document.body.appendChild(widget);
28 | console.log('[custom.js] Widget element appended to DOM');
29 | };
30 | document.body.appendChild(script);
31 | console.log('[custom.js] Widget script appended to DOM');
32 | }
33 |
34 | if (document.readyState === 'loading') {
35 | console.log('[custom.js] Waiting for DOMContentLoaded');
36 | document.addEventListener('DOMContentLoaded', injectVapiWidget);
37 | } else {
38 | injectVapiWidget();
39 | }
--------------------------------------------------------------------------------
/fern/customization/custom-voices/custom-voice.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Custom voices
3 | subtitle: Use a custom voice with your preferred provider
4 | slug: customization/custom-voices/custom-voice
5 | ---
6 |
7 | You can use your own custom voice with any supported provider by setting the `voice` property in your assistant configuration:
8 |
9 | ```json
10 | {
11 | "voice": {
12 | "provider": "deepgram",
13 | "voiceId": "your-voice-id"
14 | }
15 | }
16 | ```
17 |
--------------------------------------------------------------------------------
/fern/customization/custom-voices/elevenlabs.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ElevenLabs
3 | subtitle: Set up a custom ElevenLabs voice in Vapi
4 | slug: customization/custom-voices/elevenlabs
5 | ---
6 |
7 | This guide outlines the procedure for integrating your cloned voice with ElevenLabs through the Vapi platform.
8 |
9 | An API subscription is required for this process to work.
10 |
11 |
12 |
13 | Visit the [ElevenLabs pricing page](https://elevenlabs.io/pricing) and subscribe to an API plan that suits your needs.
14 |
15 |
16 | Go to the 'Profile + Keys' section on the ElevenLabs website to get your API key.
17 |
18 |
19 | Navigate to the [Vapi Provider Key section](https://dashboard.vapi.ai/keys) and input your ElevenLabs API key under the ElevenLabs section.
20 |
21 | Once you click save, your voice library will sync automatically.
22 |
23 |
24 | After syncing, you can search for your cloned voice in the "voices" tab in the assistants page, use it with your assistant.
25 |
26 |
27 |
28 | **Video Tutorial:**
29 |
39 |
--------------------------------------------------------------------------------
/fern/customization/custom-voices/playht.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: PlayHT
3 | subtitle: Set up a custom PlayHT voice in Vapi
4 | slug: customization/custom-voices/playht
5 | ---
6 |
7 | You can use your own custom PlayHT voice with Vapi by following these steps.
8 |
9 |
10 | An API subscription is required for this process.
11 |
12 |
13 |
14 |
15 | Visit the [PlayHT pricing page](https://play.ht/studio/pricing) and subscribe to an API plan.
16 |
17 |
18 | Go to the [API Access section](https://play.ht/studio/api-access) on PlayHT to get your User ID and Secret Key.
19 |
20 |
21 | Navigate to the [Vapi Provider Key section](https://dashboard.vapi.ai/keys) and add your PlayHT API keys under the PlayHT section.
22 |
23 |
24 | From the [Voice Library](https://dashboard.vapi.ai/voice-library) in Vapi, select PlayHT as your voice provider and click on "Sync with PlayHT."
25 |
26 |
27 | After syncing, you can search for your cloned voice within the voice library and use it with your assistant.
28 |
29 |
30 |
31 | **Video tutorial:**
32 |
--------------------------------------------------------------------------------
/fern/customization/custom-voices/tavus.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Tavus
3 | subtitle: Set up a custom Tavus replica in Vapi
4 | slug: customization/custom-voices/tavus
5 | ---
6 |
7 | You can use your own custom Tavus replica with Vapi by following these steps.
8 |
9 |
10 | An API subscription is required for this process. These steps are only needed for custom Tavus replicas, not for stock replicas on the Vapi platform.
11 |
12 |
13 |
14 |
15 | Visit the [Tavus pricing page](https://platform.tavus.io/billing) and subscribe to an API plan.
16 |
17 |
18 | Go to the [API Keys section](https://platform.tavus.io/api-keys) on Tavus to get your API key.
19 |
20 |
21 | Navigate to the [Vapi Provider Key section](https://dashboard.vapi.ai/keys) and add your Tavus API key under the Tavus section.
22 |
23 |
24 | After adding your API key, select Tavus as your assistant's voice provider and add your Custom Replica ID manually through the dashboard. Alternatively, use the API and specify the replica ID in the `voiceId` field.
25 |
26 |
27 |
28 | **Video tutorial:**
29 |
--------------------------------------------------------------------------------
/fern/customization/multilingual.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Multilingual
3 | subtitle: Set up multilingual support for your assistant
4 | slug: customization/multilingual
5 | ---
6 |
7 | ## Overview
8 |
9 | We support dozens of providers, giving you access to their available models for multilingual support.
10 |
11 | Certain providers, like google and deepgram, have multilingual transcriber models that can transcribe audio in any language.
12 |
13 | ## Transcribers (Speech-to-Text)
14 |
15 | In the dashboard's assistant tab, click on "transcriber" to view all of the available providers, languages and models for each. Each model offers different language options.
16 |
17 | ## Voice (Text-to-Speech)
18 |
19 | Each provider includes a voice tag in the name of their voice. For example, Azure offers the `es-ES-ElviraNeural` voice for Spanish. Go to voice tab in the assistants page to see all of the available models.
20 |
21 | ### Example: Setting Up a Spanish Voice Assistant
22 |
23 | ```json
24 | {
25 | "voice": {
26 | "provider": "azure",
27 | "voiceId": "es-ES-ElviraNeural"
28 | }
29 | }
30 | ```
31 |
32 | In this example, the voice `es-ES-ElviraNeural` from Azure supports Spanish. Replace `es-ES-ElviraNeural` with any other voice ID that supports your desired language.
33 |
--------------------------------------------------------------------------------
/fern/enterprise/plans.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Vapi Enterprise
3 | subtitle: Build and scale with Vapi.
4 | slug: enterprise/plans
5 | ---
6 |
7 |
8 | If you're building a production application on Vapi, we can help you every step of the way from idea to full-scale deployment.
9 |
10 | #### Enterprise Plans include:
11 |
12 | - Unlimited concurrency and higher rate limits
13 | - Reserved capacity on our weekly deployment cluster
14 | - Hands-on 24/7 support with dedicated solutions engineer
15 | - Shared Slack channel with our team
16 | - Regular check-in calls with our team
--------------------------------------------------------------------------------
/fern/faq.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Frequently Asked Questions
3 | subtitle: Frequently asked questions about Vapi.
4 | slug: faq
5 | ---
6 |
7 |
8 |
--------------------------------------------------------------------------------
/fern/fern.config.json:
--------------------------------------------------------------------------------
1 | {
2 | "organization": "vapi",
3 | "version": "0.63.35"
4 | }
--------------------------------------------------------------------------------
/fern/info-hierarchy.mdx:
--------------------------------------------------------------------------------
1 | ### Information Hierarchy
2 |
3 | #### Current
4 | * Overview
5 | * Platform
6 | * Assistants
7 | * Phone Numbers
8 | * Files
9 | * Tools
10 | * Blocks
11 | * Squads
12 | * Voice Library
13 | * Logs
14 | * Calls
15 | * API Requests
16 | * Webhooks
17 |
18 | #### Proposed Dashboard Hierarchy
19 | * Overview
20 | * Build
21 | * Assistants
22 | * Workflows
23 | * Phone Numbers
24 | * Tools
25 | * Files
26 | * Squads
27 | * Test
28 | * Voice Test Suites
29 | * Observe
30 | * Call Logs
31 | * API Logs
32 | * Webhook Logs
33 | * Community
34 | * Task Library
35 | * Workflow Library
36 | * Voice Library
37 | * Model Library
38 | * Profile
39 | * Organizations
40 | * LIST
41 | * Admin
42 | * Billing
43 | * Members
44 | * Settings
45 | * API Keys
46 | * Provider Credentials
47 | * Light/Dark Toggle
48 | * Log Out
49 |
50 | #### Docs Hierarchy
51 | * Getting Started
52 | * Build
53 | * Assistants
54 | * Workflows <--
55 | * Tools
56 | * Knowledge Base
57 | * Squads
58 | * Test
59 | * Voice Testing <--
60 | * Deploy
61 | * Phone Numbers
62 | * Calls
63 | * Community
64 | * Tasks
65 | * Workflows
66 | * Voices
67 | * Models
68 | * Transcribers
69 | * Admin
70 | * Billing
71 | * Org -- Enterprise
72 | * Org management
73 | * Provider Keys
--------------------------------------------------------------------------------
/fern/openai-realtime.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: OpenAI Realtime
3 | subtitle: You can use OpenAI's newest speech-to-speech model with your Vapi assistants.
4 | slug: openai-realtime
5 | ---
6 |
7 |
8 | The Realtime API is currently in beta, and not recommended for production use by OpenAI. We're excited to have you try this new feature and welcome your [feedback](https://discord.com/invite/pUFNcf2WmH) as we continue to refine and improve the experience.
9 |
10 |
11 | OpenAI’s Realtime API enables developers to use a native speech-to-speech model. Unlike other Vapi configurations which orchestrate a transcriber, model and voice API to simulate speech-to-speech, OpenAI’s Realtime API natively processes audio in and audio out.
12 |
13 | To start using it with your Vapi assistants, select `gpt-4o-realtime-preview-2024-12-17` as your model.
14 | - Please note that only OpenAI voices may be selected while using this model. The voice selection will not act as a TTS (text-to-speech) model, but rather as the voice used within the speech-to-speech model.
15 | - Also note that we don’t currently support Knowledge Bases with the Realtime API.
16 | - Lastly, note that our Realtime integration still retains the rest of Vapi's orchestration layer such as Endpointing and Interruption models to enable a reliable conversational flow.
--------------------------------------------------------------------------------
/fern/phone-numbers.mdx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs/01375f457235b8d7ef065ac4f898a10c846c6fb4/fern/phone-numbers.mdx
--------------------------------------------------------------------------------
/fern/providers/cloud/s3.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: AWS S3
3 | subtitle: Store recordings of chat conversations in AWS S3
4 | slug: providers/cloud/s3
5 | ---
6 |
7 | Your assistants can be configured to record chat conversations and upload
8 | the recordings to a bucket in AWS S3 when the conversation ends. You will
9 | need to configure the credential and bucket settings in the "Cloud Providers"
10 | section of the "Provider Credentials" page in the Vapi dashboard.
11 |
12 | See these [instructions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-keys-admin-managed.html) for generating AWS access keys.
13 |
14 | ## Credential Settings
15 |
16 | Setting | Description
17 | ------------------------ | -------------------------------------------------------
18 | AWS Access Key ID | The access key id for AWS
19 | AWS Secret Access Key | The secret access key for AWS
20 | S3 Bucket Name | The name of the bucket to upload recordings to
21 | S3 Path Prefix | An optional path prefix for recordings uploaded to the bucket
22 |
23 | ## Example
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/fern/providers/cloud/supabase.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Supabase S3 Storage
3 | subtitle: Store recordings of chat conversations in Supabase Storage
4 | slug: providers/cloud/supabase
5 | ---
6 |
7 | Your assistants can be configured to record chat conversations and upload
8 | the recordings to a bucket in Supabase Storage when the conversation ends. You will
9 | need to configure the credential and bucket settings in the "Cloud Providers"
10 | section of the "Provider Credentials" page in the Vapi dashboard.
11 |
12 | See these [instructions](https://supabase.com/docs/guides/storage/s3/authentication) for generating Supabase tokens and access keys, and finding your endpoint and region.
13 |
14 | ## Credential Settings
15 |
16 | Setting | Description
17 | ------------------------- | -------------------------------------------------------
18 | Bucket Name | The name of the bucket in Supabase Storage to upload recordings to
19 | Storage Region | The region of the Supabase project
20 | Storage Endpoint | The endpoint of the Supabase Storage to upload recordings to
21 | Bucket Path Prefix | An optional path prefix for recordings uploaded to the bucket
22 | Storage Access Key ID | The access key id for Supabase Storage
23 | Storage Secret Access Key | The secret access key for Supabase Storage, associated with the access key id
24 |
25 | ## Example
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/fern/providers/voice/sesame.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Sesame
3 | subtitle: What is Sesame CSM-1B?
4 | slug: providers/voice/sesame
5 | ---
6 |
7 | **What is Sesame CSM-1B?**
8 |
9 | Sesame CSM-1B is an open source text-to-speech (TTS) model that Vapi hosts for seamless integration into your voice applications. This model delivers natural-sounding speech synthesis with a default voice option and voice cloning capabilities.
10 |
11 | **Key Features:**
12 |
13 | - **Vapi-Hosted Solution**: Access this open source model directly through Vapi without managing your own infrastructure
14 | - **Voice Options**: Offers a default voice and voice cloning capabilities
15 |
16 | **Integration Benefits:**
17 |
18 | - Simplified setup with no need to self-host the model
19 | - Consistent performance through Vapi's optimized infrastructure
20 | - Seamless compatibility with all Vapi voice applications
21 |
22 | **Use Cases:**
23 |
24 | - Virtual assistants and conversational AI
25 | - Content narration and audio generation
26 | - Interactive voice applications
27 | - Prototyping voice-driven experiences
28 |
29 | **Voice Cloning:**
30 |
31 | 
32 |
33 | Sesame supports voice cloning. To clone a voice:
34 | 1. Navigate to the additional configuration tab (below the voice tab) on the assistants page
35 | 2. Upload a WAV file containing your voice sample
36 | 3. Provide the transcript of the audio file
37 | 4. Name your custom voice
38 |
39 | **Current Limitations:**
40 |
41 | The model currently has some limitations. Additional features may be introduced in future updates.
--------------------------------------------------------------------------------
/fern/sdks.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Client SDKs
3 | subtitle: Put Vapi assistants on every platform.
4 | slug: sdks
5 | ---
6 |
7 | The Vapi Client SDKs automatically configure audio streaming to and from the client, and provide a simple interface for starting calls. The interface is equivalent across all the SDKs.
8 |
9 | The SDKs are open source, and available on GitHub:
10 |
11 |
12 |
13 | Add a Vapi assistant to your web application.
14 |
15 |
20 | Add a Vapi assistant to your iOS app.
21 |
22 |
27 | Add a Vapi assistant to your Flutter app.
28 |
29 |
34 | Add a Vapi assistant to your React Native app.
35 |
36 |
41 | Multi-platform. Mac, Windows, and Linux.
42 |
43 |
44 |
45 | ---
46 |
47 |
48 | - `speech-start`, `speech-end`, and `volume-level` for creating animations. -
49 | `message` for receiving messages sent to the [Server URL](/server-url) locally
50 | on the client, so you can show live transcriptions and use function calls to
51 | perform actions on the client.
52 |
53 |
--------------------------------------------------------------------------------
/fern/security-and-privacy/privacy-policy.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Privacy Policy
3 | slug: security-and-privacy/privacy-policy
4 | ---
5 |
6 |
7 | Our Privacy Policy is hosted at [https://vapi.ai/privacy](https://vapi.ai/privacy)
8 |
--------------------------------------------------------------------------------
/fern/security-and-privacy/soc.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: SOC-2 Compliance
3 | slug: security-and-privacy/soc
4 | ---
5 |
6 |
--------------------------------------------------------------------------------
/fern/security-and-privacy/tos.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Terms of Service
3 | slug: security-and-privacy/tos
4 | ---
5 |
6 |
7 |
8 | Our Terms of Service is hosted at
9 | [https://vapi.ai/terms-of-service](https://vapi.ai/terms-of-service)
10 |
11 |
--------------------------------------------------------------------------------
/fern/server-sdks.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Server SDKs
3 | subtitle: Put Vapi assistants on every platform.
4 | slug: server-sdks
5 | ---
6 |
7 | Vapi provides server-side SDKs to help developers quickly integrate and manage voice AI capabilities into their applications. Our SDKs allow seamless interaction with Vapi's API across a wide range of programming languages, ensuring you can choose the stack that best suits your needs.
8 |
9 | The SDKs are open source, and available on GitHub:
10 |
11 |
12 |
13 | Add a Vapi assistant to your Python application.
14 |
15 |
16 | Add a Vapi assistant to your TypeScript application.
17 |
18 |
19 | Add a Vapi assistant to your Java application.
20 |
21 |
22 | Add a Vapi assistant to your Ruby application.
23 |
24 |
25 | Add a Vapi assistant to your C#/.NET application.
26 |
27 |
28 | Add a Vapi assistant to your Go application.
29 |
30 |
--------------------------------------------------------------------------------
/fern/snippets/quickstart/dashboard/provision-phone-number-with-vapi.mdx:
--------------------------------------------------------------------------------
1 | The quickest way to secure a phone number for your assistant is to create a phone number directly through Vapi.
2 |
3 | Navigate to the "Phone Numbers" section & click the "Create Phone Number" button:
4 |
5 |
6 |
7 |
8 |
9 | We will use the area code `415` for our phone number (these are area codes domestic to the US).
10 |
11 |
12 |
13 |
14 |
15 |
16 | Currently, only US phone numbers can be directly created through Vapi. Phone numbers in
17 | other regions must be imported, see our [phone calling](/phone-calling) guide.
18 |
19 |
20 | Click "Create", after creating a phone number you should see something like this:
21 |
22 |
23 |
24 |
25 |
26 |
27 | It takes a couple of minutes for the phone number to be fully activated. During this period, calls will not be functional.
28 |
29 |
30 | Once activated, the phone number will be ready for use (either for inbound or outbound calling).
31 |
--------------------------------------------------------------------------------
/fern/snippets/quickstart/platform-specific/no-code-prerequisites.mdx:
--------------------------------------------------------------------------------
1 |
2 | The following quickstart guides **require no code** & will give you a good framework for understanding
3 | how Vapi works.
4 |
5 | They may be helpful to go through before following this guide:
6 |
7 |
8 |
9 | The easiest way to start with Vapi. Build a voice agent in 5 minutes.
10 |
11 |
17 | Integrate voice calls into your web application.
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/fern/snippets/quickstart/web/links.tsx:
--------------------------------------------------------------------------------
1 | export const quickstartDemoLink = "https://stackblitz.com/~/github.com/VapiAI/quickstart-react";
2 |
--------------------------------------------------------------------------------
/fern/snippets/sdk.mdx:
--------------------------------------------------------------------------------
1 | export const SdkCards = ({ iconColor }) => (
2 |
3 |
4 | Add a Vapi assistant to your web application.
5 |
6 |
12 | Add a Vapi assistant to your iOS app.
13 |
14 |
20 | Add a Vapi assistant to your Flutter app.
21 |
22 |
28 | Add a Vapi assistant to your React Native app.
29 |
30 |
36 | Multi-platform. Mac, Windows, and Linux.
37 |
38 |
39 | );
40 |
--------------------------------------------------------------------------------
/fern/snippets/sdks/web/import-web-sdk.mdx:
--------------------------------------------------------------------------------
1 | Import the Vapi Web SDK package.
2 |
3 | ```javascript
4 | import Vapi from "@vapi-ai/web";
5 | ```
6 |
--------------------------------------------------------------------------------
/fern/snippets/sdks/web/install-web-sdk.mdx:
--------------------------------------------------------------------------------
1 | Install the package with your preferred package manager.
2 |
3 |
4 | ```bash title="npm"
5 | npm install @vapi-ai/web
6 | ```
7 |
8 | ```bash title="yarn"
9 | yarn add @vapi-ai/web
10 | ```
11 |
12 | ```bash title="pnpm"
13 | pnpm add @vapi-ai/web
14 | ```
15 |
16 | ```bash title="bun"
17 | bun add @vapi-ai/web
18 | ```
19 |
20 |
--------------------------------------------------------------------------------
/fern/snippets/sdks/web/pass-api-keys.mdx:
--------------------------------------------------------------------------------
1 | Create a new instance of the Vapi class, passing one of the following as a parameter to the constructor:
2 | - Your *public key*. Find it [**Public**](https://dashboard.vapi.ai/account)
3 | - a generated **JWT**
4 |
5 | ```javascript
6 | const vapi = new Vapi("your-public-key-or-jwt");
7 | ```
8 |
9 | You can find your public key in the [Vapi Dashboard](https://dashboard.vapi.ai/account).
10 | You can generate a JWT on the backend following [JWT Authentication](/customization/jwt-authentication) instructions.
--------------------------------------------------------------------------------
/fern/snippets/video/video.css:
--------------------------------------------------------------------------------
1 | /* for a header video */
2 |
3 | .video-embed-wrapper {
4 | position: relative;
5 | width: 100%;
6 | padding-top: 56.25%; /* 16:9 Aspect Ratio (divide 9 by 16 = 0.5625) */
7 | }
8 |
9 | .video-embed-wrapper iframe {
10 | position: absolute;
11 | top: 0;
12 | left: 0;
13 | width: 100%;
14 | height: 100%;
15 | }
16 |
--------------------------------------------------------------------------------
/fern/snippets/video/videos.tsx:
--------------------------------------------------------------------------------
1 | export const YouTubeEmbed = ({ videoUrl, altTitle }) => {
2 |
3 | return
4 |
5 |