├── .DS_Store
├── .vscode
└── settings.json
├── GHL.mdx
├── README.md
├── advanced
└── calls
│ └── sip.mdx
├── api-reference
├── analytics
│ └── create-analytics-queries.mdx
├── assistants
│ ├── create-assistant.mdx
│ ├── delete-assistant.mdx
│ ├── get-assistant.mdx
│ ├── list-assistants.mdx
│ └── update-assistant.mdx
├── call-logs
│ └── list-call-logs.mdx
├── calls
│ ├── create-call.mdx
│ ├── create-phone-call.mdx
│ ├── delete-call-data.mdx
│ ├── delete-call.mdx
│ ├── get-call.mdx
│ ├── list-calls.mdx
│ └── update-call.mdx
├── credentials
│ ├── create-credential.mdx
│ ├── delete-credential.mdx
│ ├── get-credential.mdx
│ ├── list-credentials.mdx
│ └── update-credential.mdx
├── files
│ ├── delete-file.mdx
│ ├── get-file.mdx
│ ├── list-files.mdx
│ ├── update-file.mdx
│ └── upload-file.mdx
├── logs
│ └── list-logs.mdx
├── messages
│ ├── client-inbound-message.mdx
│ ├── client-message.mdx
│ ├── server-message-response.mdx
│ └── server-message.mdx
├── metrics
│ └── list-metrics.mdx
├── openapi.mdx
├── orgs
│ ├── create-org.mdx
│ ├── get-org.mdx
│ ├── invite-user.mdx
│ ├── leave-org.mdx
│ ├── list-orgs.mdx
│ ├── list-users.mdx
│ └── update-org.mdx
├── phone-numbers
│ ├── buy-phone-number.mdx
│ ├── create-phone-number.mdx
│ ├── delete-phone-number.mdx
│ ├── get-phone-number.mdx
│ ├── import-twilio-number.mdx
│ ├── import-vonage-number.mdx
│ ├── list-phone-numbers.mdx
│ └── update-phone-number.mdx
├── providers
│ ├── add-shared-voice-to-your-provider-account.mdx
│ ├── get--locations.mdx
│ ├── get--voicessearch.mdx
│ ├── get--workflows-hooks.mdx
│ └── get--workflows.mdx
├── squads
│ ├── create-squad.mdx
│ ├── delete-squad.mdx
│ ├── get-squad.mdx
│ ├── list-squads.mdx
│ └── update-squad.mdx
├── swagger.mdx
├── tools
│ ├── create-tool.mdx
│ ├── delete-tool.mdx
│ ├── get-tool.mdx
│ ├── list-tools.mdx
│ └── update-tool.mdx
└── voice-library
│ ├── get-voices-in-voice-library-by-providers.mdx
│ ├── sync-global-voices-in-voice-library-by-providers.mdx
│ └── sync-voices-in-voice-library-by-providers.mdx
├── api.json
├── assistants.mdx
├── assistants
├── background-messages.mdx
├── call-analysis.mdx
├── dynamic-variables.mdx
├── function-calling.mdx
└── persistent-assistants.mdx
├── audio-interface.mdx
├── billing
├── billing-limits.mdx
├── cost-routing.mdx
├── estimating-costs.mdx
└── examples.mdx
├── blocks.mdx
├── blocks
├── block-types.mdx
├── create-blocks-tutorial.mdx
└── steps.mdx
├── call-forwarding.mdx
├── calls
├── call-ended-reason.mdx
└── call-features.mdx
├── challenges-of-realtime-conversation.mdx
├── changelog.mdx
├── community
├── appointment-scheduling.mdx
├── assistant-request.mdx
├── community.css
├── comparisons.mdx
├── conferences.mdx
├── demos.mdx
├── expert-directory.mdx
├── ghl.mdx
├── guide.mdx
├── inbound.mdx
├── knowledgebase.mdx
├── myvapi.mdx
├── outbound.mdx
├── podcast.mdx
├── snippets-sdks-tutorials.mdx
├── special-mentions.mdx
├── squads.mdx
├── television.mdx
├── tools-calling.mdx
└── usecase.mdx
├── customization
├── custom-keywords.mdx
├── custom-llm
│ ├── fine-tuned-openai-models.mdx
│ └── using-your-server.mdx
├── custom-voices
│ ├── custom-voice.mdx
│ ├── elevenlabs.mdx
│ └── playht.mdx
├── jwt-authentication.mdx
├── knowledgebase.mdx
├── multilingual.mdx
├── provider-keys.mdx
└── speech-configuration.mdx
├── enterprise
├── onprem.mdx
└── plans.mdx
├── examples
├── inbound-support.mdx
├── outbound-call-python.mdx
├── outbound-sales.mdx
├── pizza-website.mdx
└── voice-widget.mdx
├── faq.mdx
├── glossary.mdx
├── how-vapi-works.mdx
├── introduction.mdx
├── knowledgebase.mdx
├── mint.json
├── phone-calling.mdx
├── pricing.mdx
├── prompting-guide.mdx
├── providers
├── model
│ ├── anthropic.mdx
│ ├── deepinfra.mdx
│ ├── groq.mdx
│ ├── openai.mdx
│ ├── openrouter.mdx
│ ├── perplexity.mdx
│ └── togetherai.mdx
├── transcriber
│ ├── deepgram.mdx
│ ├── gladia.mdx
│ └── talkscriber.mdx
├── voice
│ ├── azure.mdx
│ ├── cartesia.mdx
│ ├── deepgram.mdx
│ ├── elevenlabs.mdx
│ ├── imnt.mdx
│ ├── neets.mdx
│ ├── openai.mdx
│ ├── playht.mdx
│ └── rimeai.mdx
└── voiceflow.mdx
├── quickstart.mdx
├── quickstart
├── dashboard.mdx
├── phone
│ ├── inbound.mdx
│ └── outbound.mdx
└── web.mdx
├── resources.mdx
├── script.js
├── sdk
└── web.mdx
├── sdks.mdx
├── security-and-privacy
├── hipaa.mdx
├── privacy-policy.mdx
├── soc.mdx
└── tos.mdx
├── server-url.mdx
├── server-url
├── developing-locally.mdx
├── events.mdx
├── securing-endpoints.mdx
├── setting-server-urls.mdx
└── url-priority.mdx
├── snippets
├── faq-snippet.mdx
├── quickstart
│ ├── dashboard
│ │ ├── assistant-setup-inbound.mdx
│ │ └── provision-phone-number-with-vapi.mdx
│ ├── phone
│ │ └── get-a-phone-number.mdx
│ ├── platform-specific
│ │ └── no-code-prerequisites.mdx
│ └── web
│ │ └── links.mdx
├── sdk.mdx
├── sdks
│ └── web
│ │ ├── import-web-sdk.mdx
│ │ └── install-web-sdk.mdx
└── video
│ ├── video.css
│ └── videos.mdx
├── squads-example.mdx
├── squads.mdx
├── static
└── images
│ ├── .DS_Store
│ ├── blocks
│ ├── food-order-steps.png
│ ├── recording_20241018_191621.webm
│ └── screenshots_run_20241018_191537
│ │ ├── click_flow_controls_button.png
│ │ ├── click_flow_controls_button_again.png
│ │ ├── click_save_inside_step_config.png
│ │ ├── click_save_inside_tool_call_block_config.png
│ │ ├── clicking_on_block_config.png
│ │ ├── clicking_on_blocks.png
│ │ ├── clicking_on_create_workflow.png
│ │ ├── clicking_on_new_blocks_workflow.png
│ │ ├── clicking_on_platform.png
│ │ ├── clicking_on_save_inside_conversation_block_config.png
│ │ ├── clicking_on_save_inside_step_config_start_block.png
│ │ ├── clicking_on_schedule_demo_quantstruct.png
│ │ ├── clicking_on_step_config_inside_start_block.png
│ │ ├── enter_prompt_inside_schedule_demo_time_block_config.png
│ │ ├── entering_workflow_name_schedule_demo_quantstruct.png
│ │ ├── insert_date_time_schedule_variable.png
│ │ ├── navigate_to_vapi_dashboard.png
│ │ ├── replace_text_inside_start_block_with_prompt_for_good_time_to_talk.png
│ │ └── replace_text_inside_textarea_with_new_text.png
│ ├── changelog
│ ├── credit-based-billing-oct-10.png
│ ├── invite-multiple-users.png
│ ├── organization-settings-page.png
│ ├── subscription-coupon-codes.png
│ └── tavus-voice.png
│ ├── favicon.png
│ ├── intro
│ └── custom-vs-vapi.png
│ ├── learn
│ ├── billing
│ │ ├── billing-example-template.png
│ │ ├── billing-limits-exceeded.png
│ │ ├── billing-limits.png
│ │ ├── call-pricing-breakdown.png
│ │ ├── cost-estimate.gif
│ │ ├── cost-routing.png
│ │ ├── custom-model-inbound-phone-example.png
│ │ ├── outbound-phone-example.png
│ │ └── web-interviews-example.png
│ └── platform
│ │ └── vapi-orchestration.png
│ ├── logo
│ ├── logo-dark.png
│ └── logo-light.png
│ ├── pricing
│ └── voice-pipeline-cost-breakdown.png
│ ├── quickstart
│ ├── assistant-id-dashboard.png
│ ├── dashboard
│ │ ├── assistant-created.png
│ │ ├── assistant-model-set-up.png
│ │ ├── assistant-transcriber-config.png
│ │ ├── assistant-voice-config.png
│ │ ├── auth-ui.png
│ │ ├── buy-a-phone-number.png
│ │ ├── buy-phone-number-modal.png
│ │ ├── call-assistant-web-dashboard.png
│ │ ├── choose-blank-template.png
│ │ ├── create-new-assistant-button.png
│ │ ├── inbound-assistant-set.png
│ │ ├── model-provider-keys.png
│ │ ├── name-your-assistant.png
│ │ ├── phone-number-config.png
│ │ ├── transcriber-providers-keys.png
│ │ ├── vapi-dashboard-post-signup.png
│ │ └── voice-provider-keys.png
│ ├── phone
│ │ ├── buy-phone-number-twilio.png
│ │ ├── dashboard-import-phone-number.png
│ │ ├── import-twilio-number-dashboard.png
│ │ ├── outbound
│ │ │ ├── assistant-model-setup.png
│ │ │ └── dial-outbound-call-dashboard.png
│ │ ├── phone-number-import-complete.png
│ │ ├── set-billing-information.png
│ │ ├── twilio-api-key-nav.png
│ │ └── twilio-credentials.png
│ ├── quickstart-banner.png
│ ├── vapis-pizzeria.png
│ └── web
│ │ └── microphone-permissions.png
│ └── server-url
│ ├── developing-locally
│ ├── logging-events-locally.png
│ ├── ngrok-cli-ui.png
│ └── reverse-proxy-developing-locally.png
│ ├── overview-graphic.png
│ └── settings-server-urls
│ ├── assistant-server-url-dashboard.png
│ ├── function-call-server-url-dashboard.png
│ ├── org-settings-server-urls.png
│ ├── server-url-priority.png
│ └── setting-account-server-url.png
├── status.mdx
├── support.mdx
└── tools-calling.mdx
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/.DS_Store
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "cSpell.words": ["openapi", "vonage"]
3 | }
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Vapi Docs
2 |
3 | ### Setup
4 |
5 | `npm install -g mintlify`
6 |
7 | ### Development
8 |
9 | `mintlify dev`
10 |
11 |
12 | ### Errors
13 |
14 | If you encounter an issue try setting "openapi" value from "https://api.vapi.ai/api-json" to "./api.json" in mint.json
--------------------------------------------------------------------------------
/advanced/calls/sip.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "SIP"
3 | sidebarTitle: "SIP"
4 | description: "You can make SIP calls to Vapi Assistants."
5 | ---
6 |
7 |
8 |
9 | ## 1. Create an Assistant
10 |
11 | We'll create an assistant with `POST /assistant` endpoint. This is no different than creating an assistant for other transports.
12 |
13 | ```json
14 | {
15 | "name": "My SIP Assistant",
16 | "firstMessage": "Hello {{first_name}}, you've reached me over SIP. How can I help you today?"
17 | }
18 |
19 | ```
20 |
21 |
22 |
23 |
24 |
25 | ## 2. Create A SIP Phone Number
26 |
27 | We'll create a SIP phone number with `POST /phone-number` endpoint.
28 |
29 | ```json
30 | {
31 | "provider": "vapi",
32 | "sipUri": "sip:your_unique_user_name@sip.vapi.ai",
33 | "assistantId": "your_assistant_id"
34 | }
35 |
36 | ```
37 |
38 | `sipUri` is the SIP URI of the phone number. It must be in the format `sip:username@sip.vapi.ai`. You are free to choose any username you like.
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 | ## 3. Start a SIP call.
47 |
48 | You can use any SIP softphone to test the Assistant. Examples include [Zoiper](https://www.zoiper.com/) or [Linphone](https://www.linphone.org/).
49 |
50 | You just need to dial `sip:your_unique_user_name@sip.vapi.ai` and the Assistant will answer your call.
51 |
52 | There is no authentication or SIP registration required.
53 |
54 |
55 |
56 |
57 |
58 | ## 4. Send SIP Headers to Fill Template Variables.
59 |
60 | To fill your template variables, you can send custom SIP headers.
61 |
62 | For example, to fill the `first_name` variable, you can send a SIP header `x-first_name: John`.
63 |
64 | The header name is case insensitive. So, `X-First_Name`, `x-first_name`, and `X-FIRST_NAME` are all the same.
65 |
66 |
67 |
68 |
69 |
70 | ## 5. Use a Custom Assistant For Each Call.
71 |
72 | You can use a custom assistant for SIP calls same as phone calls.
73 |
74 | Set the `assistantId` to `null` and the `serverUrl` to the URL of your server which will respond to the `assistant-request`.
75 |
76 | `PATCH /phone-number/:id`
77 | ```json
78 | {
79 | "assistantId": null,
80 | "serverUrl": "https://your_server_url"
81 | }
82 | ```
83 |
84 | Now, every time you make a call to this phone number, the server will receive a `assistant-request` event.
85 |
86 |
87 |
88 |
--------------------------------------------------------------------------------
/api-reference/analytics/create-analytics-queries.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /analytics
3 | ---
--------------------------------------------------------------------------------
/api-reference/assistants/create-assistant.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /assistant
3 | ---
--------------------------------------------------------------------------------
/api-reference/assistants/delete-assistant.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /assistant/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/assistants/get-assistant.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /assistant/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/assistants/list-assistants.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /assistant
3 | ---
--------------------------------------------------------------------------------
/api-reference/assistants/update-assistant.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: patch /assistant/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/call-logs/list-call-logs.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /log
3 | ---
--------------------------------------------------------------------------------
/api-reference/calls/create-call.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /call
3 | ---
--------------------------------------------------------------------------------
/api-reference/calls/create-phone-call.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /call/phone
3 | ---
--------------------------------------------------------------------------------
/api-reference/calls/delete-call-data.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /call/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/calls/delete-call.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /call/phone
3 | ---
--------------------------------------------------------------------------------
/api-reference/calls/get-call.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /call/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/calls/list-calls.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /call
3 | ---
--------------------------------------------------------------------------------
/api-reference/calls/update-call.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: patch /call/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/credentials/create-credential.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /credential
3 | ---
--------------------------------------------------------------------------------
/api-reference/credentials/delete-credential.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /credential/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/credentials/get-credential.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /credential/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/credentials/list-credentials.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /credential
3 | ---
--------------------------------------------------------------------------------
/api-reference/credentials/update-credential.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: put /credential/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/files/delete-file.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /file/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/files/get-file.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /file/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/files/list-files.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /file
3 | ---
--------------------------------------------------------------------------------
/api-reference/files/update-file.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: patch /file/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/files/upload-file.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /file
3 | ---
--------------------------------------------------------------------------------
/api-reference/logs/list-logs.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /log
3 | ---
--------------------------------------------------------------------------------
/api-reference/messages/client-inbound-message.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ClientInboundMessage
3 | openapi-schema: ClientInboundMessage
4 | ---
--------------------------------------------------------------------------------
/api-reference/messages/client-message.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ClientMessage
3 | openapi-schema: ClientMessage
4 | ---
--------------------------------------------------------------------------------
/api-reference/messages/server-message-response.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ServerMessageResponse
3 | openapi-schema: ServerMessageResponse
4 | ---
--------------------------------------------------------------------------------
/api-reference/messages/server-message.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ServerMessage
3 | openapi-schema: ServerMessage
4 | ---
--------------------------------------------------------------------------------
/api-reference/metrics/list-metrics.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /metrics
3 | ---
--------------------------------------------------------------------------------
/api-reference/openapi.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "OpenAPI"
3 | url: "https://api.vapi.ai/api-json"
4 | ---
5 |
6 |
7 | Our OpenAPI is hosted at
8 | [https://api.vapi.ai/api-json](https://api.vapi.ai/api-json)
9 |
10 |
--------------------------------------------------------------------------------
/api-reference/orgs/create-org.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /org
3 | ---
--------------------------------------------------------------------------------
/api-reference/orgs/get-org.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /org/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/orgs/invite-user.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /org/{id}/invite
3 | ---
--------------------------------------------------------------------------------
/api-reference/orgs/leave-org.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /org/{id}/leave
3 | ---
--------------------------------------------------------------------------------
/api-reference/orgs/list-orgs.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /org
3 | ---
--------------------------------------------------------------------------------
/api-reference/orgs/list-users.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /org/{id}/user
3 | ---
--------------------------------------------------------------------------------
/api-reference/orgs/update-org.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: patch /org/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/phone-numbers/buy-phone-number.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /phone-number/buy
3 | ---
--------------------------------------------------------------------------------
/api-reference/phone-numbers/create-phone-number.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /phone-number
3 | ---
--------------------------------------------------------------------------------
/api-reference/phone-numbers/delete-phone-number.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /phone-number/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/phone-numbers/get-phone-number.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /phone-number/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/phone-numbers/import-twilio-number.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /phone-number/import/twilio
3 | ---
--------------------------------------------------------------------------------
/api-reference/phone-numbers/import-vonage-number.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /phone-number/import/vonage
3 | ---
--------------------------------------------------------------------------------
/api-reference/phone-numbers/list-phone-numbers.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /phone-number
3 | ---
--------------------------------------------------------------------------------
/api-reference/phone-numbers/update-phone-number.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: patch /phone-number/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/providers/add-shared-voice-to-your-provider-account.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /{provider}/voices/add
3 | ---
--------------------------------------------------------------------------------
/api-reference/providers/get--locations.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /{provider}/locations
3 | ---
--------------------------------------------------------------------------------
/api-reference/providers/get--voicessearch.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /{provider}/voices/search
3 | ---
--------------------------------------------------------------------------------
/api-reference/providers/get--workflows-hooks.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /{provider}/workflows/{workflowId}/hooks
3 | ---
--------------------------------------------------------------------------------
/api-reference/providers/get--workflows.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /{provider}/workflows
3 | ---
--------------------------------------------------------------------------------
/api-reference/squads/create-squad.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /squad
3 | ---
--------------------------------------------------------------------------------
/api-reference/squads/delete-squad.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /squad/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/squads/get-squad.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /squad/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/squads/list-squads.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /squad
3 | ---
--------------------------------------------------------------------------------
/api-reference/squads/update-squad.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: patch /squad/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/swagger.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Swagger"
3 | url: "https://api.vapi.ai/api"
4 | ---
5 |
6 |
7 | Our Swagger is hosted at [https://api.vapi.ai/api](https://api.vapi.ai/api)
8 |
9 |
--------------------------------------------------------------------------------
/api-reference/tools/create-tool.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /tool
3 | ---
--------------------------------------------------------------------------------
/api-reference/tools/delete-tool.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: delete /tool/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/tools/get-tool.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /tool/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/tools/list-tools.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /tool
3 | ---
--------------------------------------------------------------------------------
/api-reference/tools/update-tool.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: patch /tool/{id}
3 | ---
--------------------------------------------------------------------------------
/api-reference/voice-library/get-voices-in-voice-library-by-providers.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: get /voice-library/{provider}
3 | ---
--------------------------------------------------------------------------------
/api-reference/voice-library/sync-global-voices-in-voice-library-by-providers.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /voice-library/sync
3 | ---
--------------------------------------------------------------------------------
/api-reference/voice-library/sync-voices-in-voice-library-by-providers.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | openapi: post /voice-library/sync/{provider}
3 | ---
--------------------------------------------------------------------------------
/assistants.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Introduction"
3 | sidebarTitle: "Introduction"
4 | description: "The core building-block of voice agents on Vapi."
5 | ---
6 |
7 | **Assistant** is a fancy word for an AI configuration that can be used across phone calls and Vapi clients. Your voice assistant can augment your customer support and experience for call centers, business websites, mobile apps, and much more.
8 |
9 | There are three core components: **Transcriber**, **Model**, and **Voice**. These can be configured, mixed, and matched for your use case. There are also various other configurable properties you can find [here](/api-reference/assistants/create-assistant) Below, check out some ways you can layer in powerful customizations and features to meet any use case.
10 |
11 | ## Advanced Concepts
12 |
13 |
14 |
15 | Add your API keys for other providers
16 |
17 |
18 | Plug in your own LLM
19 |
20 |
21 | Forward and hang up with function calls
22 |
23 |
28 | Which setup is best for you?
29 |
30 |
31 |
--------------------------------------------------------------------------------
/assistants/background-messages.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Background Messaging"
3 | sidebarTitle: "Background Messages"
4 | description: "Vapi SDK lets you silently update the chat history through efficient text message integration. This is particularly useful for background tasks or discreetly logging user interactions."
5 | ---
6 |
7 | ## Scenario Overview
8 |
9 | As a developer you may run into scenarios where a user action, such as pressing a button, needs to be logged in the chat history without overt user involvement. This could be crucial for maintaining conversation context or system logging purposes.
10 |
11 |
12 |
13 | Add a button to your interface with an `onClick` event handler that will call a function to send the system message:
14 | ```html
15 |
16 | ```
17 |
18 |
19 |
20 | When the button is clicked, the `logUserAction` function will silently insert a system message into the chat history:
21 | ```js
22 | function logUserAction() {
23 | // Function to log the user action
24 | vapi.send({
25 | type: "add-message",
26 | message: {
27 | role: "system",
28 | content: "The user has pressed the button, say peanuts",
29 | },
30 | });
31 | }
32 | ```
33 | - `vapi.send`: The primary function to interact with your assistant, handling various requests or commands.
34 | - `type: "add-message"`: Specifies the command to add a new message.
35 | - `message`: This is the actual message that you want to add to the message history.
36 | - `role`: "system" Designates the message origin as 'system', ensuring the addition is unobtrusive. Other possible values of role are 'user' | 'assistant' | 'tool' | 'function'
37 | - `content`: The actual message text to be added.
38 |
39 |
40 |
41 |
42 | - Silent logging of user activities. - Contextual updates in conversations triggered by background
43 | processes. - Non-intrusive user experience enhancements through additional information provision.
44 |
45 |
--------------------------------------------------------------------------------
/assistants/call-analysis.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Call Analysis"
3 | sidebarTitle: "Call Analysis"
4 | description: "At the end of the call, you can summarize and evaluate how it went."
5 | ---
6 |
7 | The Call Analysis feature allows you to summarize and evaluate calls, providing valuable insights into their effectiveness. This feature uses a combination of prompts and schemas to generate structured data and success evaluations based on the call's content.
8 |
9 | You can customize the below in the assistant's `assistant.analysisPlan`.
10 |
11 | ## Summary Prompt
12 |
13 | The summary prompt is used to create a concise summary of the call. This summary is stored in `call.analysis.summary`.
14 |
15 | ### Default Summary Prompt
16 |
17 | The default summary prompt is:
18 |
19 | ```text
20 | You are an expert note-taker. You will be given a transcript of a call. Summarize the call in 2-3 sentences, if applicable.
21 | ```
22 |
23 | ### Customizing the Summary Prompt
24 |
25 | You can customize the summary prompt by setting the `summaryPrompt` property in the API or SDK:
26 |
27 | ```json
28 | {
29 | "summaryPrompt": "Custom summary prompt text"
30 | }
31 | ```
32 |
33 | To disable the summary prompt, set it to an empty string `""` or `"off"`:
34 |
35 | ```json
36 | {
37 | "summaryPrompt": ""
38 | }
39 | ```
40 |
41 | ## Structured Data Prompt
42 |
43 | The structured data prompt extracts specific pieces of data from the call. This data is stored in `call.analysis.structuredData`.
44 |
45 | ### Default Structured Data Prompt
46 |
47 | The default structured data prompt is:
48 |
49 | ```text
50 | You are an expert data extractor. You will be given a transcript of a call. Extract structured data per the JSON Schema.
51 | ```
52 |
53 | ### Customizing the Structured Data Prompt
54 |
55 | You can set a custom structured data prompt using the `structuredDataPrompt` property:
56 |
57 | ```json
58 | {
59 | "structuredDataPrompt": "Custom structured data prompt text"
60 | }
61 | ```
62 |
63 | ## Structured Data Schema
64 |
65 | The structured data schema enforces the format of the extracted data. It is defined using JSON Schema standards.
66 |
67 | ### Customizing the Structured Data Schema
68 |
69 | You can set a custom structured data schema using the `structuredDataSchema` property:
70 |
71 | ```json
72 | {
73 | "structuredDataSchema": {
74 | "type": "object",
75 | "properties": {
76 | "field1": { "type": "string" },
77 | "field2": { "type": "number" }
78 | },
79 | "required": ["field1", "field2"]
80 | }
81 | }
82 | ```
83 |
84 | ## Success Evaluation Prompt
85 |
86 | The success evaluation prompt is used to determine if the call was successful. This evaluation is stored in `call.analysis.successEvaluation`.
87 |
88 | ### Default Success Evaluation Prompt
89 |
90 | The default success evaluation prompt is:
91 |
92 | ```text
93 | You are an expert call evaluator. You will be given a transcript of a call and the system prompt of the AI participant. Determine if the call was successful based on the objectives inferred from the system prompt.
94 | ```
95 |
96 | ### Customizing the Success Evaluation Prompt
97 |
98 | You can set a custom success evaluation prompt using the `successEvaluationPrompt` property:
99 |
100 | ```json
101 | {
102 | "successEvaluationPrompt": "Custom success evaluation prompt text"
103 | }
104 | ```
105 |
106 | To disable the success evaluation prompt, set it to an empty string `""` or `"off"`:
107 |
108 | ```json
109 | {
110 | "successEvaluationPrompt": ""
111 | }
112 | ```
113 |
114 | ## Success Evaluation Rubric
115 |
116 | The success evaluation rubric defines the criteria used to evaluate the call's success. The available rubrics are:
117 |
118 | - `NumericScale`: A scale of 1 to 10.
119 | - `DescriptiveScale`: A scale of Excellent, Good, Fair, Poor.
120 | - `Checklist`: A checklist of criteria and their status.
121 | - `Matrix`: A grid that evaluates multiple criteria across different performance levels.
122 | - `PercentageScale`: A scale of 0% to 100%.
123 | - `LikertScale`: A scale of Strongly Agree, Agree, Neutral, Disagree, Strongly Disagree.
124 | - `AutomaticRubric`: Automatically break down evaluation into several criteria, each with its own score.
125 | - `PassFail`: A simple 'true' if the call passed, 'false' if not.
126 |
127 | ### Customizing the Success Evaluation Rubric
128 |
129 | You can set a custom success evaluation rubric using the `successEvaluationRubric` property:
130 |
131 | ```json
132 | {
133 | "successEvaluationRubric": "NumericScale"
134 | }
135 | ```
136 |
137 | ## Combining Prompts and Rubrics
138 |
139 | You can use prompts and rubrics in combination to create detailed instructions for the call analysis:
140 |
141 | ```json
142 | {
143 | "successEvaluationPrompt": "Evaluate the call based on these criteria:...",
144 | "successEvaluationRubric": "Checklist"
145 | }
146 | ```
147 |
148 | By customizing these properties, you can tailor the call analysis to meet your specific needs and gain valuable insights from your calls.
--------------------------------------------------------------------------------
/assistants/dynamic-variables.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Dynamic Variables"
3 | sidebarTitle: "Dynamic Variables"
4 | description: "Vapi makes it easy to personalize an assistant's messages and prompts using variables, allowing each call to be customized."
5 | ---
6 |
7 | Prompts, messages, and other assistant properties can be dynamically set when starting a call based on templates.
8 | These templates are defined using double curly braces `{{variableName}}`.
9 | This is useful when you want to customize the assistant for a specific call.
10 |
11 | For example, you could set the assistant's first message to "Hello, `{{name}}`!" and then set `name` to `John` when starting the call
12 | by passing `assistantOverrides` with `variableValues` to the API or SDK:
13 |
14 | ```json
15 | {
16 | "variableValues": {
17 | "name": "John"
18 | }
19 | }
20 | ```
21 |
22 | ## Utilizing Dynamic Variables in Phone Calls
23 |
24 | To leverage dynamic variables during phone calls, follow these steps:
25 |
26 | 1. **Prepare Your Request:** Construct a JSON payload containing the following key-value pairs:
27 |
28 | * `assistantId`: Replace `"your-assistant-id"` with the actual ID of your assistant.
29 | * `assistantOverride`: This object is used to customize your assistant's behavior.
30 | * `variableValues`: An object containing the dynamic variables you want to use, in the format `{ "variableName": "variableValue" }`. For example, `{ "name": "John" }`.
31 | * `customer`: An object representing the call recipient.
32 | * `number`: Replace `"+1xxxxxxxxxx"` with the phone number you wish to call (in E.164 format).
33 | * `phoneNumberId`: Replace `"your-phone-id"` with the ID of your registered phone number. You can get it from the [Phone number](https://dashboard.vapi.ai/phone-numbers) in the dashboard.
34 |
35 | 2. **Send the Request:** Dispatch the JSON payload to the `/call/phone` endpoint using your preferred method (e.g., HTTP POST request).
36 |
37 | ```json
38 | {
39 | "assistantId": "your-assistant-id",
40 | "assistantOverrides": {
41 | "variableValues": {
42 | "name": "John"
43 | }
44 | },
45 | "customer": {
46 | "number": "+1xxxxxxxxxx"
47 | },
48 | "phoneNumberId": "your-phone-id"
49 | }
50 | ```
51 |
52 | ## Default Variables
53 |
54 | By default, the following variables are automatically filled based on the current (UTC) time,
55 | meaning that you don't need to set them manually in `variableValues`:
56 |
57 | | Variable | Description | Example |
58 | | ----------- | --------------------------- | -------------------- |
59 | | `{{now}}` | Current date and time (UTC) | Jan 1, 2024 12:00 PM |
60 | | `{{date}}` | Current date (UTC) | Jan 1, 2024 |
61 | | `{{time}}` | Current time (UTC) | 12:00 PM |
62 | | `{{month}}` | Current month (UTC) | January |
63 | | `{{day}}` | Current day of month (UTC) | 1 |
64 | | `{{year}}` | Current year (UTC) | 2024 |
65 |
66 | **Note:** You will need to add the `{{variableName}}` in this format in all your prompts, whether it is the first message or anywhere else you want to use it.
67 |
68 | ## Advanced Date and Time Usage
69 |
70 | We use [LiquidJS](https://liquidjs.com/) for dynamic variables. You can use the `date` filter to format the date and time in the timezone you want.
71 |
72 | ```liquid
73 | {{"now" | date: "%b %d, %Y, %I:%M %p", "America/New_York"}}
74 | ```
75 |
76 | This should return the current date and time in New York.
77 |
--------------------------------------------------------------------------------
/assistants/function-calling.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Function Calling"
3 | sidebarTitle: "Function Calling"
4 | description: "Additional Capabilities for Your Assistants "
5 | ---
6 |
7 | Vapi voice assistants are given three additional functions: `transferCall`,`endCall`, and `dialKeypad`. These functions can be used to transfer calls, hang up calls, and enter digits on the keypad.
8 |
9 | You **do not** need to add these functions to your model's `functions` array.
10 |
11 | #### Transfer Call
12 |
13 | When a `forwardingPhoneNumber` is present on an assistant, the assistant will be given a `transferCall` function. This function can be used to transfer the call to the `forwardingPhoneNumber`.
14 |
15 | ```json
16 | {
17 | "model": {
18 | "provider": "openai",
19 | "model": "gpt-3.5-turbo",
20 | "messages": [
21 | {
22 | "role": "system",
23 | "content": "You are an assistant at a law firm. When the user asks to be transferred, use the transferCall function."
24 | }
25 | ]
26 | },
27 | "forwardingPhoneNumber": "+16054440129"
28 | }
29 | ```
30 |
31 | #### End Call
32 |
33 | This function is provided when `endCallFunctionEnabled` is enabled on the assistant. The assistant can use this function to end the call.
34 |
35 | ```json
36 | {
37 | "model": {
38 | "provider": "openai",
39 | "model": "gpt-3.5-turbo",
40 | "messages": [
41 | {
42 | "role": "system",
43 | "content": "You are an assistant at a law firm. If the user is being mean, use the endCall function."
44 | }
45 | ]
46 | },
47 | "endCallFunctionEnabled": true
48 | }
49 | ```
50 |
51 | #### Dial Keypad
52 |
53 | This function is provided when `dialKeypadFunctionEnabled` is enabled on the assistant. The assistant will be able to enter digits on the keypad.
54 |
55 | ```json
56 | {
57 | "model": {
58 | "provider": "openai",
59 | "model": "gpt-3.5-turbo",
60 | "messages": [
61 | {
62 | "role": "system",
63 | "content": "You are an assistant at a law firm. When you hit a menu, use the dialKeypad function to enter the digits."
64 | }
65 | ]
66 | },
67 | "dialKeypadFunctionEnabled": true
68 | }
69 | ```
70 |
71 | ### Custom Functions
72 |
73 | In addition to the predefined functions, you can also define custom functions. These functions are similar to OpenAI functions and your chosen LLM will trigger them as needed based on your instructions.
74 |
75 | The functions array in the assistant definition allows you to define custom functions that the assistant can call during a conversation. Each function is an object with the following properties:
76 |
77 | - `name`: The name of the function. It must be a string containing a-z, A-Z, 0-9, underscores, or dashes, with a maximum length of 64.
78 | - `description`: A brief description of what the function does. This is used by the AI to decide when and how to call the function.
79 | - `parameters`: An object that describes the parameters the function accepts. The type property should be "object", and the properties property should be an object where each key is a parameter name and each value is an object describing the type and purpose of the parameter.
80 |
81 | Here's an example of a function definition:
82 |
83 | ```json
84 | {
85 | "functions": [
86 | {
87 | "name": "bookAppointment",
88 | "description": "Used to book the appointment.",
89 | "parameters": {
90 | "type": "object",
91 | "properties": {
92 | "datetime": {
93 | "type": "string",
94 | "description": "The date and time of the appointment in ISO format."
95 | }
96 | }
97 | }
98 | }
99 | ]
100 | }
101 | ```
102 |
103 | In this example, the bookAppointment function accepts one parameter, `datetime`, which is a string representing the date and time of the appointment in ISO format.
104 |
105 | In addition to defining custom functions, you can specify a `serverUrl` where Vapi will send the function call information. This URL can be configured at the account level or at the assistant level.
106 | At the account level, the `serverUrl` is set in the Vapi Dashboard. All assistants under the account will use this URL by default for function calls.
107 | At the assistant level, the `serverUrl` can be specified in the assistant configuration when creating or updating an assistant. This allows different assistants to use different URLs for function calls. If a `serverUrl` is specified at the assistant level, it will override the account-level Server URL.
108 |
109 | If the `serverUrl` is not defined either at the account level or the assistant level, the function call will simply be added to the chat history. This can be particularly useful when you want a function call to trigger an action on the frontend.
110 |
111 | For instance, the frontend can listen for specific function calls in the chat history and respond by updating the user interface or performing other actions. This allows for a dynamic and interactive user experience, where the frontend can react to changes in the conversation in real time.
112 |
--------------------------------------------------------------------------------
/assistants/persistent-assistants.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Persistent Assistants"
3 | sidebarTitle: "Persistent Assistants"
4 | description: "Should I use persistent assistants?"
5 | ---
6 |
7 | You might be wondering whether or not you should create an assistant using the `/assistant` endpoint with its `assistantId`. Or, can you just specify the assistant configuration when starting a call?
8 |
9 | The `/assistant` endpoint is there for convenience to save you creating your own assistants table.
10 |
11 |
12 | - You won't be adding more assistant properties on top of ours.
13 | - You want to use the same assistant across multiple calls.
14 |
15 |
16 | Otherwise, you can just specify the assistant configuration when starting a call.
17 |
--------------------------------------------------------------------------------
/audio-interface.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Orchestration Models"
3 | sidebarTitle: "Orchestration Models"
4 | description: "All the fancy stuff Vapi does on top of the core models."
5 | ---
6 |
7 | Vapi also runs a suite of audio and text models that make it's latency-optimized Speech-to-Text (STT), Large Language Model (LLM), & Text-to-Speech (TTS) pipeline feel human.
8 |
9 | Here's a high-level overview of the Vapi architecture:
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | To provide you and your customers with a superior conversational experience,
18 | we have various latency optimizations like end-to-end streaming and
19 | colocating servers that shave off every possible millisecond of latency. We
20 | also manage the coordination of interruptions, turn-taking, and other
21 | conversational dynamics.
22 |
23 |
24 | We built-in many smaller features to give developers a lot of room to
25 | customize and integrate. For example, there’s no need for you to hook up
26 | Twilio websockets or build bidirectional audio streaming. Instead, you can
27 | connect to the WebRTC stream through our
28 | [Web](https://github.com/VapiAI/web), [iOS](https://github.com/VapiAI/ios),
29 | or [Python](https://github.com/VapiAI/python) clients…and then get right
30 | back to what you were doing.
31 |
32 |
33 | Finally, we designed Vapi to be highly scalable. We accommodate everything
34 | from small businesses and companies all the way up to enterprise-level
35 | clients.
36 |
37 |
38 |
--------------------------------------------------------------------------------
/billing/billing-limits.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Billing Limits"
3 | sidebarTitle: "Billing Limits"
4 | description: "Set billing limits on your Vapi account."
5 | ---
6 |
7 | You can set billing limits in the billing section of your dashboard.
8 |
9 |
10 | You can access your billing settings at
11 | [dashboard.vapi.ai/billing](https://dashboard.vapi.ai/billing)
12 |
13 |
14 | ### Setting a Monthly Billing Limit
15 |
16 | In your billing settings you can set a monthly billing limit:
17 |
18 |
19 |
20 |
21 |
22 | ### Exceeding Billing Limits
23 |
24 | Once you have used all of your starter credits, or exceeded your set monthly usage limit, you will start seeing errors in your dashboard & via the API mentioning `Billing Limits Exceeded`.
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/billing/cost-routing.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Cost Routing"
3 | sidebarTitle: "Cost Routing"
4 | description: "Learn more about how your Vapi account is billed for provider expenses."
5 | ---
6 |
7 |
8 |
9 |
10 |
11 | During calls, requests will be made to different providers in the voice pipeline:
12 |
13 | - **transcription providers:** providers conducting speech-to-text
14 | - **model providers:** LLM providers
15 | - **voice providers:** providers conducting text-to-speech
16 | - **telephony providers:** providers like [Twilio](https://www.twilio.com)/[Vonage](https://www.vonage.com) that facilitate phone calls
17 |
18 |
19 | Per-minute telephony costs only occur during inbound/outbound phone calling. Web calls do not
20 | incur this cost.
21 |
22 |
23 | ## Where Provider Costs End-up
24 |
25 | There are 2 places these charges can end up:
26 |
27 | 1. **Provider-side:** in the account you have with the provider.
28 | 2. **With Vapi:** in your Vapi account.
29 |
30 |
31 |
32 | If we have [provider keys](customization/provider-keys) on file for a provider, the cost will be seen directly
33 | in your account with the provider. Vapi will have made the request on your behalf with your provider key.
34 |
35 | No charge will be made to your Vapi account.
36 |
37 | Charges for inbound/outbound phone calling (telephony) will always end up where the phone number
38 | was provisioned. If you import a phone number from Twilio or Vonage, per-minute charges for calling
39 | those numbers will appear with them.
40 |
41 |
42 |
43 | If no key is found on-file for the provider, Vapi will make the API request itself (with Vapi's own keys, at Vapi's expense). This expense is then passed on [**at-cost**](/glossary#at-cost) to be billed directly to your Vapi account.
44 |
45 | No charge will show up provider-side.
46 |
47 |
48 |
49 |
50 | ## Billing That "Just Works"
51 |
52 | The central idea is that everything is designed to "just work".
53 |
54 | Whether you are billed provider-side, or on Vapi's side, you will never be charged with any margin for provider fees incurred during calls.
55 |
--------------------------------------------------------------------------------
/blocks.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Introduction"
3 | sidebarTitle: "Introduction"
4 | description: "Breaking down bot conversations into smaller, more manageable prompts"
5 | ---
6 |
7 |
8 | We're currently running a beta for **Blocks**, an upcoming feature from [Vapi.ai](http://vapi.ai/) aimed at improving bot conversations. The problem we've noticed is that single LLM prompts are prone to hallucinations, unreliable tool calls, and can’t handle many-step complex instructions.
9 |
10 | **By breaking the conversation into smaller, more manageable prompts**, we can guarantee the bot will do this, then that, or if this happens, then that happens. It’s like having a checklist for conversations — less room for error, more room for getting things right.
11 |
12 |
13 | Here’s an example: For food ordering, this is what a prompt would look like.
14 |
15 |
16 |
17 | Example Prompt
18 |
19 | ```jsx
20 | [Identity]
21 | You are a friendly and efficient assistant for a food truck that serves burgers, fries, and drinks.
22 |
23 | [Task]
24 | 1. Greet the customer warmly and inquire about their main order.
25 | 2. Offer suggestions for the main order if needed.
26 | 3. If they choose a burger, suggest upgrading to a combo with fries and a drink, offering clear options (e.g., regular or special fries, different drink choices).
27 | 4. Confirm the entire order to ensure accuracy.
28 | 5. Suggest any additional items like desserts or sauces.
29 | 6. Thank the customer and let them know when their order will be ready.
30 | ```
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | There are three core types of Blocks: [Conversation](https://api.vapi.ai/api#:~:text=ConversationBlock), [Tool-call](https://api.vapi.ai/api#:~:text=ToolCallBlock), and [Workflow](https://api.vapi.ai/api#:~:text=WorkflowBlock). Each type serves a different role in shaping how your assistant engages with users.
43 |
44 |
45 |
46 | Blocks is currently in beta. We're excited to have you try this new feature and welcome your [feedback](https://discord.com/invite/pUFNcf2WmH) as we continue to refine and improve the experience.
47 |
48 |
49 | ## Advanced Concepts
50 |
51 |
52 |
53 | Learn how to structure the flow of your conversation
54 |
55 |
56 | Explore the different block types and how to use them
57 |
58 |
--------------------------------------------------------------------------------
/blocks/block-types.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Block Types"
3 | sidebarTitle: "Block Types"
4 | description: "Building the Logic and Actions for Each Step in Your Conversation "
5 | ---
6 |
7 | [**Blocks**](https://api.vapi.ai/api#/Blocks/BlockController_create) are the functional units within a Step, defining what action happens at each stage of a conversation. Each Step can contain only one Block, and there are three main types of Blocks, each designed to handle different aspects of conversation flow.
8 |
9 |
10 | Blocks is currently in beta. We're excited to have you try this new feature and welcome your [feedback](https://discord.com/invite/pUFNcf2WmH) as we continue to refine and improve the experience.
11 |
12 |
13 | #### Types
14 |
15 | - [**Conversation:**]((https://api.vapi.ai/api#:~:text=ConversationBlock)) This block type manages interactions between the assistant and the user. A conversation block is used when the assistant needs to ask the user for specific information, such as contact details or preferences.
16 | - [**Tool-call:**](https://api.vapi.ai/api#:~:text=ToolCallBlock) This block allows the assistant to make external tool calls.
17 | - [**Workflow:**](https://api.vapi.ai/api#:~:text=WorkflowBlock) This block type enables the creation of subflows, which are smaller sets of steps executed within a Block. It can contain an array of steps (`steps[]`) and uses an `inputSchema` to define the data needed to initiate the workflow, along with an `outputSchema` to handle the data returned after completing the subflow. Workflow blocks are ideal for organizing complex processes or reusing workflows across different parts of the conversation.
--------------------------------------------------------------------------------
/blocks/steps.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steps"
3 | sidebarTitle: "Steps"
4 | description: "Building and Controlling Conversation Flow for Your Assistants"
5 | ---
6 |
7 | [**Steps**](https://api.vapi.ai/api#:~:text=HandoffStep) are the core building blocks that dictate how conversations progress in a bot interaction. Each Step represents a distinct point in the conversation where the bot performs an action, gathers information, or decides where to go next. Think of Steps as checkpoints in a conversation that guide the flow, manage user inputs, and determine outcomes.
8 |
9 |
10 | Blocks is currently in beta. We're excited to have you try this new feature and welcome your [feedback](https://discord.com/invite/pUFNcf2WmH) as we continue to refine and improve the experience.
11 |
12 |
13 | #### Features
14 |
15 | - **Output:** The data or response expected from the step, as outlined in the block's `outputSchema`.
16 | - **Input:** The data necessary for the step to execute, defined in the block's `inputSchema`.
17 | - [**Destinations:**](https://api.vapi.ai/api#:~:text=StepDestination) This can be determined by a simple linear progression or based on specific criteria, like conditions or rules set within the Step. This enables dynamic decision-making, allowing the assistant to choose the next Step depending on what happens during the conversation (e.g., user input, a specific value, or a condition being met).
18 |
19 | #### Example
20 |
21 | ```json
22 | {
23 | "type": "handoff",
24 | "name": "get_user_order",
25 | "input": {
26 | "name": "John Doe",
27 | "email": "johndoe@example.com"
28 | },
29 | "destinations": [
30 | {
31 | "type": "step",
32 | "stepName": "confirm_order",
33 | "conditions": [
34 | {
35 | "type": "model-based",
36 | "instruction": "If the user has provided an order"
37 | }
38 | ]
39 | }
40 | ],
41 | "block": {
42 | "name": "ask_for_order",
43 | "type": "conversation",
44 | "inputSchema": {
45 | "type": "object",
46 | "required": ["name", "email"],
47 | "properties": {
48 | "name": { "type": "string", "description": "The customer's name" },
49 | "email": { "type": "string", "description": "The customer's email" }
50 | }
51 | },
52 | "instruction": "Greet the customer and ask for their name and email. Then ask them what they'd like to order.",
53 | "outputSchema": {
54 | "type": "object",
55 | "required": ["orders", "name"],
56 | "properties": {
57 | "orders": {
58 | "type": "string",
59 | "description": "The customer's order, e.g., 'burger with fries'"
60 | },
61 | "name": {
62 | "type": "string",
63 | "description": "The customer's name"
64 | }
65 | }
66 | }
67 | }
68 | }
69 | ```
--------------------------------------------------------------------------------
/calls/call-ended-reason.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Call Ended Reason"
3 | sidebarTitle: "Ended Reason"
4 | description: 'A guide to understanding all call "Ended Reason" types & errors.'
5 | ---
6 |
7 | This guide will discuss all possible `endedReason`s for a call.
8 |
9 | You can find these under the **"Ended Reason"** section of your [call
10 | logs](https://dashboard.vapi.ai/calls) (or under the `endedReason` field on the [Call
11 | Object](/api-reference/calls/get-call)).
12 |
13 | #### **Assistant-Related**
14 |
15 | - **assistant-ended-call**: The assistant intentionally ended the call based on the user's response.
16 | - **assistant-error**: This general error occurs within the assistant's logic or processing due to bugs, misconfigurations, or unexpected inputs.
17 | - **assistant-forwarded-call**: The assistant successfully transferred the call to another number or service.
18 | - **assistant-join-timed-out**: The assistant failed to join the call within the expected timeframe.
19 | - **assistant-not-found**: The specified assistant cannot be located or accessed, possibly due to an incorrect assistant ID or configuration issue.
20 | - **assistant-not-invalid**: The assistant ID provided is not valid or recognized by the system.
21 | - **assistant-not-provided**: No assistant ID was specified in the request, causing the system to fail.
22 | - **assistant-request-returned-error**: Communicating with the assistant resulted in an error, possibly due to network issues or problems with the assistant itself.
23 | - **assistant-request-returned-forwarding-phone-number**: The assistant triggered a call forwarding action, ending the current call.
24 | - **assistant-request-returned-invalid-assistant**: The assistant returned an invalid response or failed to fulfill the request properly.
25 | - **assistant-request-returned-no-assistant**: The assistant didn't provide any response or action to the request.
26 | - **assistant-said-end-call-phrase**: The assistant recognized a phrase or keyword triggering call termination.
27 |
28 | #### **Pipeline and LLM**
29 |
30 | These relate to issues within the AI processing pipeline or the Large Language Models (LLMs) used for understanding and generating text:
31 |
32 | - **pipeline-error-\***: Various error codes indicate specific failures within the processing pipeline, such as function execution, LLM responses, or external service integration. Examples include OpenAI, Azure OpenAI, Together AI, and several other LLMs or voice providers.
33 | - **pipeline-error-first-message-failed:** The system failed to deliver the first message. This issue usually occurs when you add your own provider key in the voice section. It may be due to exceeding your subscription or quota limit.
34 | - **pipeline-no-available-llm-model**: No suitable LLM was available to process the request.
35 |
36 | #### **Phone Calls and Connectivity**
37 |
38 | - **customer-busy**: The customer's line was busy.
39 | - **customer-ended-call**: The customer(end human user) ended the call for both inbound and outbound calls.
40 | - **customer-did-not-answer**: The customer didn't answer the call. If you're looking to build a usecase where you need the bot to talk to automated IVRs, set `assistant.voicemailDetectionEnabled=false`.
41 | - **customer-did-not-give-microphone-permission**: The user didn't grant the necessary microphone access for the call.
42 | - **phone-call-provider-closed-websocket**: The connection with the call provider was unexpectedly closed.
43 | - **twilio-failed-to-connect-call**: The Twilio service, responsible for managing calls, failed to establish a connection.
44 | - **vonage-disconnected**: The call was disconnected by Vonage, another call management service.
45 | - **vonage-failed-to-connect-call**: Vonage failed to establish the call connection.
46 | - **vonage-rejected**: The call was rejected by Vonage due to an issue or configuration problem.
47 |
48 | #### **Other Reasons**
49 |
50 | - **exceeded-max-duration**: The call reached its maximum allowed duration and was automatically terminated.
51 | - **silence-timed-out**: The call was ended due to prolonged silence, indicating inactivity.
52 | - **voicemail**: The call was diverted to voicemail.
53 |
54 | #### **Unknown**
55 |
56 | - **unknown-error**: An unexpected error occurred, and the cause is unknown. For this, please [contact support](/support) with your `call_id` and account email address, & we will investigate.
57 |
--------------------------------------------------------------------------------
/calls/call-features.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Live Call Control"
3 | sidebarTitle: "Live Call Control"
4 | ---
5 | Vapi offers two main features that provide enhanced control over live calls:
6 |
7 | 1. **Call Control**: This feature allows you to inject conversation elements dynamically during an ongoing call.
8 | 2. **Call Listen**: This feature enables real-time audio data streaming using WebSocket connections.
9 |
10 | To use these features, you first need to obtain the URLs specific to the live call. These URLs can be retrieved by triggering a `/call` endpoint, which returns the `listenUrl` and `controlUrl` within the `monitor` object.
11 |
12 | ## Obtaining URLs for Call Control and Listen
13 |
14 | To initiate a call and retrieve the `listenUrl` and `controlUrl`, send a POST request to the `/call` endpoint.
15 |
16 | ### Sample Request
17 |
18 | ```bash
19 | curl 'https://api.vapi.ai/call/phone'
20 | -H 'authorization: Bearer YOUR_API_KEY'
21 | -H 'content-type: application/json'
22 | --data-raw '{
23 | "assistantId": "5b0a4a08-133c-4146-9315-0984f8c6be80",
24 | "customer": {
25 | "number": "+12345678913"
26 | },
27 | "phoneNumberId": "42b4b25d-031e-4786-857f-63b346c9580f"
28 | }'
29 |
30 | ```
31 |
32 | ### Sample Response
33 |
34 | ```json
35 | {
36 | "id": "7420f27a-30fd-4f49-a995-5549ae7cc00d",
37 | "assistantId": "5b0a4a08-133c-4146-9315-0984f8c6be80",
38 | "phoneNumberId": "42b4b25d-031e-4786-857f-63b346c9580f",
39 | "type": "outboundPhoneCall",
40 | "createdAt": "2024-09-10T11:14:12.339Z",
41 | "updatedAt": "2024-09-10T11:14:12.339Z",
42 | "orgId": "eb166faa-7145-46ef-8044-589b47ae3b56",
43 | "cost": 0,
44 | "customer": {
45 | "number": "+12345678913"
46 | },
47 | "status": "queued",
48 | "phoneCallProvider": "twilio",
49 | "phoneCallProviderId": "CA4c6793d069ef42f4ccad69a0957451ec",
50 | "phoneCallTransport": "pstn",
51 | "monitor": {
52 | "listenUrl": "wss://aws-us-west-2-production1-phone-call-websocket.vapi.ai/7420f27a-30fd-4f49-a995-5549ae7cc00d/transport",
53 | "controlUrl": ""
54 | }
55 | }
56 |
57 | ```
58 |
59 | ## Call Control Feature
60 |
61 | Once you have the `controlUrl`, you can inject a message into the live call using a POST request. This can be done by sending a JSON payload to the `controlUrl`.
62 |
63 | ### Example: Injecting a Message
64 |
65 | ```bash
66 | curl -X POST 'https://aws-us-west-2-production1-phone-call-websocket.vapi.ai/7420f27a-30fd-4f49-a995-5549ae7cc00d/control'
67 | -H 'content-type: application/json'
68 | --data-raw '{
69 | "type": "say",
70 | "message": "Welcome to Vapi, this message was injected during the call."
71 | }'
72 |
73 | ```
74 |
75 | The message will be spoken in real-time during the ongoing call.
76 |
77 | ## Call Listen Feature
78 |
79 | The `listenUrl` allows you to connect to a WebSocket and stream the audio data in real-time. You can either process the audio directly or save the binary data to analyze or replay later.
80 |
81 | ### Example: Saving Audio Data from a Live Call
82 |
83 | Here is a simple implementation for saving the audio buffer from a live call using Node.js:
84 |
85 | ```jsx
86 | const WebSocket = require('ws');
87 | const fs = require('fs');
88 |
89 | let pcmBuffer = Buffer.alloc(0);
90 |
91 | const ws = new WebSocket("wss://aws-us-west-2-production1-phone-call-websocket.vapi.ai/7420f27a-30fd-4f49-a995-5549ae7cc00d/transport");
92 |
93 | ws.on('open', () => console.log('WebSocket connection established'));
94 |
95 | ws.on('message', (data, isBinary) => {
96 | if (isBinary) {
97 | pcmBuffer = Buffer.concat([pcmBuffer, data]);
98 | console.log(`Received PCM data, buffer size: ${pcmBuffer.length}`);
99 | } else {
100 | console.log('Received message:', JSON.parse(data.toString()));
101 | }
102 | });
103 |
104 | ws.on('close', () => {
105 | if (pcmBuffer.length > 0) {
106 | fs.writeFileSync('audio.pcm', pcmBuffer);
107 | console.log('Audio data saved to audio.pcm');
108 | }
109 | });
110 |
111 | ws.on('error', (error) => console.error('WebSocket error:', error));
112 |
113 | ```
114 |
--------------------------------------------------------------------------------
/challenges-of-realtime-conversation.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Implementing Realtime Conversation"
3 | sidebarTitle: "Implementing Realtime Conversation"
4 | description: "A background on the challenges of implementing realtime LLM-based human conversation."
5 | ---
6 |
7 | ### The Challenges of LLM Conversation
8 |
9 | #### Introduction
10 |
11 | There are different scopes of voice AI application complexity. Some applications are very turn-based (affording for delays, less interruption sensitive), while others require a rigorous & realtime, conversational agent (that nears being indistinguishable from a human in conversation).
12 |
13 | ### Framing the Problem
14 |
15 | Writing a voice AI application from scratch is a difficult problem.
16 |
17 | In software development, there are certain problems you can & _will_ want to solve yourself, & another class of problems that are so wide-ranging & complex that it is best to cutoff complexity by using a third-party service.
18 |
19 | Replicating the nuances & complexities of human conversation is a deep rabbit-hole, & ...
20 |
21 | `under construction`
22 |
--------------------------------------------------------------------------------
/community/assistant-request.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Assistant Request"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
10 | ## Send Us Your Video
11 |
12 | Have a video showcasing Vapi that you want us to feature? Let us know:
13 |
14 |
15 |
21 | Send us your video showcasing what Vapi can do, we'd like to feature it.
22 |
23 |
24 |
--------------------------------------------------------------------------------
/community/community.css:
--------------------------------------------------------------------------------
1 | /* for a grid of videos */
2 |
3 | .video-grid {
4 | display: flex;
5 | flex-wrap: wrap;
6 | gap: 20px; /* Spacing between videos */
7 | }
8 |
9 | .video-grid iframe,
10 | .video-grid a {
11 | flex: 0 0 calc(50% - 20px); /* Flex grow is 0, basis is 50% minus the gap */
12 | aspect-ratio: 560 / 315; /* Maintain the aspect ratio of 16:9 */
13 | max-width: calc(50% - 20px); /* Max width is also set to 50% minus the gap */
14 | height: auto; /* Allow height to auto adjust based on aspect ratio */
15 | }
16 |
17 | .video-grid a {
18 | aspect-ratio: 1;
19 | }
20 |
21 | @media (max-width: 600px) {
22 | .video-grid iframe {
23 | flex: 0 0 100%; /* Flex grow is 0, basis is 100% */
24 | max-width: 100%; /* Allow max-width to be full width on mobile */
25 | }
26 | }
27 |
28 | .card-img {
29 | height: 200px;
30 | object-fit: contain;
31 | margin: auto;
32 | background: white; /*TODO: change color as per theme*/
33 | }
34 |
35 | .card-content {
36 | display: flex;
37 | flex-direction: column;
38 | align-items: center;
39 | margin-top: auto;
40 | text-align: center;
41 | }
42 |
43 | .card-content > h3 {
44 | margin: 16px 0 8px 0;
45 | font-size: 1.5em;
46 | text-align: center;
47 | }
48 |
49 | .card-content > p {
50 | font-size: 1em;
51 | text-align: center;
52 | }
53 |
--------------------------------------------------------------------------------
/community/comparisons.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Comparisons"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
93 |
94 | ## Send Us Your Video
95 |
96 | Have a video showcasing Vapi that you want us to feature? Let us know:
97 |
98 |
99 |
105 | Send us your video showcasing what Vapi can do, we'd like to feature it.
106 |
107 |
108 |
--------------------------------------------------------------------------------
/community/conferences.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Conferences"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
18 |
19 | ## Send Us Your Video
20 |
21 | Have a video showcasing Vapi that you want us to feature? Let us know:
22 |
23 |
24 |
30 | Send us your video showcasing what Vapi can do, we'd like to feature it.
31 |
32 |
33 |
--------------------------------------------------------------------------------
/community/demos.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Demos"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
18 |
26 |
27 |
28 |
29 | ## Send Us Your Video
30 |
31 | Have a video showcasing Vapi that you want us to feature? Let us know:
32 |
33 |
34 |
40 | Send us your video showcasing what Vapi can do, we'd like to feature it.
41 |
42 |
43 |
--------------------------------------------------------------------------------
/community/ghl.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "GoHighLevel"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
18 |
28 |
29 |
30 | ## Send Us Your Video
31 |
32 | Have a video showcasing Vapi that you want us to feature? Let us know:
33 |
34 |
35 |
41 | Send us your video showcasing what Vapi can do, we'd like to feature it.
42 |
43 |
44 |
--------------------------------------------------------------------------------
/community/inbound.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Inbound"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
64 |
65 | ## Send Us Your Video
66 |
67 | Have a video showcasing Vapi that you want us to feature? Let us know:
68 |
69 |
70 |
76 | Send us your video showcasing what Vapi can do, we'd like to feature it.
77 |
78 |
79 |
--------------------------------------------------------------------------------
/community/knowledgebase.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Knowledgebase"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
25 |
35 |
36 |
46 |
47 |
48 | ## Send Us Your Video
49 |
50 | Have a video showcasing Vapi that you want us to feature? Let us know:
51 |
52 |
53 |
59 | Send us your video showcasing what Vapi can do, we'd like to feature it.
60 |
61 |
62 |
--------------------------------------------------------------------------------
/community/outbound.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Outbound"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
82 |
83 | ## Send Us Your Video
84 |
85 | Have a video showcasing Vapi that you want us to feature? Let us know:
86 |
87 |
88 |
94 | Send us your video showcasing what Vapi can do, we'd like to feature it.
95 |
96 |
97 |
--------------------------------------------------------------------------------
/community/podcast.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Podcast"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
18 |
19 | ## Send Us Your Video
20 |
21 | Have a video showcasing Vapi that you want us to feature? Let us know:
22 |
23 |
24 |
30 | Send us your video showcasing what Vapi can do, we'd like to feature it.
31 |
32 |
33 |
--------------------------------------------------------------------------------
/community/snippets-sdks-tutorials.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Snippets & SDKs Tutorials"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
25 |
33 |
41 |
42 |
52 |
53 |
54 | ## Send Us Your Video
55 |
56 | Have a video showcasing Vapi that you want us to feature? Let us know:
57 |
58 |
59 |
65 | Send us your video showcasing what Vapi can do, we'd like to feature it.
66 |
67 |
68 |
--------------------------------------------------------------------------------
/community/special-mentions.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Special Mentions"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
27 |
28 |
38 |
39 |
49 |
50 |
51 | ## Send Us Your Video
52 |
53 | Have a video showcasing Vapi that you want us to feature? Let us know:
54 |
55 |
56 |
62 | Send us your video showcasing what Vapi can do, we'd like to feature it.
63 |
64 |
65 |
--------------------------------------------------------------------------------
/community/squads.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Squads"
3 | ---
4 |
5 | Here are some videos made by people in our community showcasing what Vapi can do:
6 |
7 |
8 |
16 |
26 |
36 |
46 |
47 |
57 |
58 |
59 | ## Send Us Your Video
60 |
61 | Have a video showcasing Vapi that you want us to feature? Let us know:
62 |
63 |
64 |
70 | Send us your video showcasing what Vapi can do, we'd like to feature it.
71 |
72 |
73 |
--------------------------------------------------------------------------------
/community/television.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Television"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
18 |
19 | ## Send Us Your Video
20 |
21 | Have a video showcasing Vapi that you want us to feature? Let us know:
22 |
23 |
24 |
30 | Send us your video showcasing what Vapi can do, we'd like to feature it.
31 |
32 |
33 |
--------------------------------------------------------------------------------
/community/tools-calling.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Appointment Scheduling"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
10 |
11 |
12 | ## Send Us Your Video
13 |
14 | Have a video showcasing Vapi that you want us to feature? Let us know:
15 |
16 |
17 |
23 | Send us your video showcasing what Vapi can do, we'd like to feature it.
24 |
25 |
26 |
--------------------------------------------------------------------------------
/community/usecase.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Usecase"
3 | description: "Videos showcasing Vapi out in the wild."
4 | ---
5 |
6 | Here are some videos made by people in our community showcasing what Vapi can do:
7 |
8 |
9 |
17 |
25 |
33 |
41 |
49 |
57 |
58 |
59 | ## Send Us Your Video
60 |
61 | Have a video showcasing Vapi that you want us to feature? Let us know:
62 |
63 |
64 |
70 | Send us your video showcasing what Vapi can do, we'd like to feature it.
71 |
72 |
73 |
--------------------------------------------------------------------------------
/customization/custom-keywords.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Custom Keywords"
3 | sidebarTitle: "Custom Keywords"
4 | description: "Enhanced transcription accuracy guide"
5 | ---
6 |
7 | VAPI allows you to improve the accuracy of your transcriptions by leveraging Deepgram's keyword boosting feature. This is particularly useful when dealing with specialized terminology or uncommon proper nouns. By providing specific keywords to the Deepgram model, you can enhance transcription quality directly through VAPI.
8 |
9 | ### Why Use Keyword Boosting?
10 |
11 | Keyword boosting is beneficial for:
12 |
13 | - Enhancing the recognition of specialized terms and proper nouns.
14 | - Improving transcription accuracy without the need for a custom-trained model.
15 | - Quickly updating the model's vocabulary with new or uncommon words.
16 |
17 | ### Important Notes
18 |
19 | - Keywords should be uncommon words or proper nouns not frequently recognized by the model.
20 | - Custom model training is the most effective way to ensure accurate keyword recognition.
21 | - For more than 50 keywords, consider custom model training by contacting Deepgram.
22 |
23 | ## Enabling Keyword Boosting in VAPI
24 |
25 | ### API Call Integration
26 |
27 | To enable keyword boosting, you need to add a `keywords` parameter to your VAPI assistant's transcriber section. This parameter should include the keywords and their respective intensifiers.
28 |
29 | ### Example of POST Request
30 |
31 | To create an assistant with keyword boosting enabled, you can make the following POST request to VAPI:
32 |
33 | ```bash
34 | bashCopy code
35 | curl \
36 | --request POST \
37 | --header 'Authorization: Bearer ' \
38 | --header 'Content-Type: application/json' \
39 | --data '{
40 | "name": "Emma",
41 | "model": {
42 | "model": "gpt-4o",
43 | "provider": "openai"
44 | },
45 | "voice": {
46 | "voiceId": "emma",
47 | "provider": "azure"
48 | },
49 | "transcriber": {
50 | "provider": "deepgram",
51 | "model": "nova-2",
52 | "language": "bg",
53 | "smartFormat": true,
54 | "keywords": [
55 | "snuffleupagus:1"
56 | ]
57 | },
58 | "firstMessage": "Hi, I am Emma, what is your name?",
59 | "firstMessageMode": "assistant-speaks-first"
60 | }' \
61 | https://api.vapi.ai/assistant
62 |
63 | ```
64 |
65 | In this configuration:
66 |
67 | - **name**: The name of the assistant.
68 | - **model**: Specifies the model and provider for the assistant's conversational capabilities.
69 | - **voice**: Specifies the voice and provider for the assistant's speech.
70 | - **transcriber**: Specifies Deepgram as the transcription provider, along with the model, language, smart formatting, and keywords for boosting.
71 | - **firstMessage**: The initial message the assistant will speak.
72 | - **firstMessageMode**: Specifies that the assistant speaks first.
73 |
74 | ### Intensifiers
75 |
76 | Intensifiers are exponential factors that boost or suppress the likelihood of the specified keyword being recognized. The default intensifier is `1`. Higher values increase the likelihood, while `0` is equivalent to not specifying a keyword.
77 |
78 | - **Boosting Example:** `keywords=snuffleupagus:5`
79 | - **Suppressing Example:** `keywords=kansas:-10`
80 |
81 | ### Best Practices for Keyword Boosting
82 |
83 | 1. **Send Uncommon Keywords:** Focus on keywords not successfully transcribed by the model.
84 | 2. **Send Keywords Once:** Avoid repeating keywords.
85 | 3. **Use Individual Keywords:** Prefer individual terms over phrases.
86 | 4. **Use Proper Spelling:** Spell proper nouns as you want them to appear in transcripts.
87 | 5. **Moderate Intensifiers:** Start with small increments to avoid false positives.
88 | 6. **Custom Model Training:** For extensive vocabulary needs, consider custom model training.
89 |
90 | ### Additional Resources
91 |
92 | For more detailed information on Deepgram's keyword boosting feature, refer to the Deepgram Keyword Boosting Documentation.
93 |
94 | By following these guidelines, you can effectively utilize Deepgram's keyword boosting feature within your VAPI assistant, ensuring enhanced transcription accuracy for specialized terminology and uncommon proper nouns.
--------------------------------------------------------------------------------
/customization/custom-llm/fine-tuned-openai-models.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Fine-tuned OpenAI models"
3 | sidebarTitle: "Fine-tuned OpenAI models"
4 | description: "Use Another LLM or Your Own Server"
5 | ---
6 |
7 | Vapi supports using any OpenAI-compatible endpoint as the LLM. This includes services like [OpenRouter](https://openrouter.ai/), [AnyScale](https://www.anyscale.com/), [Together AI](https://www.together.ai/), or your own server.
8 |
9 |
10 | - For an open-source LLM, like Mixtral
11 | - To update the context during the conversation
12 | - To customize the messages before they're sent to an LLM
13 |
14 |
15 | ## Using an LLM provider
16 |
17 | You'll first want to POST your API key via the `/credential` endpoint:
18 |
19 | ```json
20 | {
21 | "provider": "openrouter",
22 | "apiKey": ""
23 | }
24 | ```
25 |
26 | Then, you can create an assistant with the model provider:
27 |
28 | ```json
29 | {
30 | "name": "My Assistant",
31 | "model": {
32 | "provider": "openrouter",
33 | "model": "cognitivecomputations/dolphin-mixtral-8x7b",
34 | "messages": [
35 | {
36 | "role": "system",
37 | "content": "You are an assistant."
38 | }
39 | ],
40 | "temperature": 0.7
41 | }
42 | }
43 | ```
44 | ## Using Fine-Tuned OpenAI Models
45 |
46 | To set up your OpenAI Fine-Tuned model, you need to follow these steps:
47 |
48 | 1. Set the custom llm URL to `https://api.openai.com/v1`.
49 | 2. Assign the custom llm key to the OpenAI key.
50 | 3. Update the model to their model.
51 | 4. Execute a PATCH request to the `/assistant` endpoint and ensure that `model.metadataSendMode` is set to off.
52 |
53 | ## Using your server
54 |
55 | To set up your server to act as the LLM, you'll need to create an endpoint that is compatible with the [OpenAI Client](https://platform.openai.com/docs/api-reference/making-requests). For best results, your endpoint should also support streaming completions.
56 |
57 | If your server is making calls to an OpenAI compatble API, you can pipe the requests directly back in your response to Vapi.
58 |
59 | If you'd like your OpenAI-compatible endpoint to be authenticated, you can POST your server's API key and URL via the `/credential` endpoint:
60 |
61 | ```json
62 | {
63 | "provider": "custom-llm",
64 | "apiKey": ""
65 | }
66 | ```
67 |
68 | If your server isn't authenticated, you can skip this step.
69 |
70 | Then, you can create an assistant with the `custom-llm` model provider:
71 |
72 | ```json
73 | {
74 | "name": "My Assistant",
75 | "model": {
76 | "provider": "custom-llm",
77 | "url": "",
78 | "model": "my-cool-model",
79 | "messages": [
80 | {
81 | "role": "system",
82 | "content": "You are an assistant."
83 | }
84 | ],
85 | "temperature": 0.7
86 | }
87 | }
88 | ```
89 |
--------------------------------------------------------------------------------
/customization/custom-llm/using-your-server.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Connecting Your Custom LLM to Vapi: A Comprehensive Guide"
3 | sidebarTitle: "Custom LLM"
4 | ---
5 |
6 | This guide provides a comprehensive walkthrough on integrating Vapi with OpenAI's gpt-3.5-turbo-instruct model using a custom LLM configuration. We'll leverage Ngrok to expose a local development environment for testing and demonstrate the communication flow between Vapi and your LLM.
7 | ## Prerequisites
8 |
9 | - **Vapi Account**: Access to the Vapi Dashboard for configuration.
10 | - **OpenAI API Key**: With access to the gpt-3.5-turbo-instruct model.
11 | - **Python Environment**: Set up with the OpenAI library (`pip install openai`).
12 | - **Ngrok**: For exposing your local server to the internet.
13 | - **Code Reference**: Familiarize yourself with the `/openai-sse/chat/completions` endpoint function in the provided Github repository: [Server-Side Example Python Flask](https://github.com/VapiAI/server-side-example-python-flask/blob/main/app/api/custom_llm.py).
14 |
15 | ## Step 1: Setting Up Your Local Development Environment
16 |
17 | **1. Create a Python Script (app.py):**
18 |
19 | ```python
20 | from flask import Flask, request, jsonify
21 | import openai
22 |
23 | app = Flask(__name__)
24 | openai.api_key = "YOUR_OPENAI_API_KEY" # Replace with your actual API key
25 |
26 | @app.route("/chat/completions", methods=["POST"])
27 | def chat_completions():
28 | data = request.get_json()
29 | # Extract relevant information from data (e.g., prompt, conversation history)
30 | # ...
31 |
32 | response = openai.ChatCompletion.create(
33 | model="gpt-3.5-turbo-instruct",
34 | messages=[
35 | {"role": "system", "content": "You are a helpful assistant."},
36 | # ... (Add messages from conversation history and current prompt)
37 | ]
38 | )
39 | # Format response according to Vapi's structure
40 | # ...
41 | return jsonify(formatted_response)
42 |
43 | if __name__ == "__main__":
44 | app.run(debug=True, port=5000) # You can adjust the port if needed
45 | ```
46 | **2. Run the Script:**
47 | Execute the Python script using python app.py in your terminal. This will start the Flask server on the specified port (5000 in this example).
48 |
49 | **3. Expose with Ngrok:**
50 | Open a new terminal window and run ngrok http 5000 (replace 5000 with your chosen port) to create a public URL that tunnels to your local server.
51 |
52 | ## Step 2: Configuring Vapi with Custom LLM
53 | **1. Access Vapi Dashboard:**
54 | Log in to your Vapi account and navigate to the "Model" section.
55 |
56 | **2. Select Custom LLM:**
57 | Choose the "Custom LLM" option to set up the integration.
58 |
59 | **3. Enter Ngrok URL:**
60 | Paste the public URL generated by ngrok (e.g., https://your-unique-id.ngrok.io) into the endpoint field. This will be the URL Vapi uses to communicate with your local server.
61 |
62 | **4. Test the Connection:**
63 | Send a test message through the Vapi interface to ensure it reaches your local server and receives a response from the OpenAI API. Verify that the response is displayed correctly in Vapi.
64 |
65 | ## Step 3: Understanding the Communication Flow
66 | **1. Vapi Sends POST Request:**
67 | When a user interacts with your Vapi application, Vapi sends a POST request containing conversation context and metadata to the configured endpoint (your ngrok URL).
68 |
69 | **2. Local Server Processes Request:**
70 | Your Python script receives the POST request and the chat_completions function is invoked.
71 |
72 | **3. Extract and Prepare Data:**
73 | The script parses the JSON data, extracts relevant information (prompt, conversation history), and builds the prompt for the OpenAI API call.
74 |
75 | **4. Call to OpenAI API:**
76 | The constructed prompt is sent to the gpt-3.5-turbo-instruct model using the openai.ChatCompletion.create method.
77 |
78 | **5. Receive and Format Response:**
79 | The response from OpenAI, containing the generated text, is received and formatted according to Vapi's expected structure.
80 |
81 | **6. Send Response to Vapi:**
82 | The formatted response is sent back to Vapi as a JSON object.
83 |
84 | **7. Vapi Displays Response:**
85 | Vapi receives the response and displays the generated text within the conversation interface to the user.
86 |
87 | By following these detailed steps and understanding the communication flow, you can successfully connect Vapi to OpenAI's gpt-3.5-turbo-instruct model and create powerful conversational experiences within your Vapi applications. The provided code example and reference serve as a starting point for you to build and customize your integration based on your specific needs.
88 |
89 | **Video Tutorial:**
90 |
--------------------------------------------------------------------------------
/customization/custom-voices/custom-voice.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Introduction"
3 | sidebarTitle: "Introduction"
4 | description: "Use Custom Voice with your favourite provider instead of the preset ones."
5 | ---
6 |
7 | Vapi lets you use various providers with some preset voice. At the same time you can also create your own custom voices in the supported providers and use them with Vapi.
8 |
9 | You can update the `voice` property in the assistant configuration when you are creating the assistant to use your custom voice.
10 |
11 | ```json
12 | {
13 | "voice": {
14 | "provider": "deepgram",
15 | "voiceId": "your-voice-id"
16 | }
17 | }
18 | ```
19 |
--------------------------------------------------------------------------------
/customization/custom-voices/elevenlabs.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Elevenlabs"
3 | description: "Quickstart: Setup Elevenlabs Custom Voice"
4 | ---
5 |
6 | This guide outlines the procedure for integrating your cloned voice with 11labs through the VAPI platform.
7 |
8 | An subscription is required for this process to work.
9 |
10 | To integrate your cloned voice with 11labs using the VAPI platform, follow these steps.
11 |
12 | 1. **Obtain an 11labs API Subscription:** Visit the [11labs pricing page](https://elevenlabs.io/pricing) and subscribe to an API plan that suits your needs.
13 | 2. **Retrieve Your API Key:** Go to the 'Profile + Keys' section on the 11labs website to get your API key.
14 | 3. **Enter Your API Key in VAPI:** Navigate to the [VAPI Provider Key section](https://dashboard.vapi.ai/keys) and input your 11labs API key under the 11labs section.
15 | 4. **Sync Your Cloned Voice:** From the [Voice Library](https://dashboard.vapi.ai/voice-library) in VAPI, select 11labs as your voice provider and click on "Sync with 11labs."
16 | 5. **Search and Use Your Cloned Voice:** After syncing, you can search for your cloned voice within the voice library and directly use it with your assistant.
17 |
18 | By following these steps, you will successfully integrate your cloned voice from 11labs with VAPI.
19 |
20 | **Video Tutorial:**
21 |
31 |
--------------------------------------------------------------------------------
/customization/custom-voices/playht.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "PlayHT"
3 | description: "Quickstart: Setup PlayHT Custom Voice"
4 | ---
5 |
6 | This guide outlines the procedure for integrating your cloned voice with Play.ht through the VAPI platform.
7 |
8 | An API subscription is required for this process to work.
9 |
10 | To integrate your cloned voice with [Play.ht](http://play.ht/) using the VAPI platform, follow these steps.
11 |
12 | 1. **Obtain a Play.ht API Subscription:** Visit the [Play.ht pricing page](https://play.ht/studio/pricing) and subscribe to an API plan.
13 | 2. **Retrieve Your User ID and Secret Key:** Go to the [API Access section](https://play.ht/studio/api-access) on Play.ht to get your User ID and Secret Key.
14 | 3. **Enter Your API Keys in VAPI:** Navigate to the [VAPI Provider Key section](https://dashboard.vapi.ai/keys) and input your Play.ht API keys under the Play.ht section.
15 | 4. **Sync Your Cloned Voice:** From the [Voice Library](https://dashboard.vapi.ai/voice-library) in VAPI, select Play.ht as your voice provider and click on "Sync with Play.ht."
16 | 5. **Search and Use Your Cloned Voice:** After syncing, you can search for your cloned voice within the voice library and directly use it with your assistant.
17 |
18 | **Video Tutorial:**
19 |
--------------------------------------------------------------------------------
/customization/jwt-authentication.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "JWT Authentication"
3 | sidebarTitle: "JWT Authentication"
4 | description: "Secure API authentication guide"
5 | ---
6 | This documentation provides an overview of JWT (JSON Web Token) Authentication and demonstrates how to generate a JWT token and use it to authenticate API requests securely.
7 |
8 | ## Prerequisites
9 |
10 | Before you proceed, ensure you have the following:
11 |
12 | - An environment that supports JWT generation and API calls (e.g., a programming language or framework)
13 | - An account with a service that requires JWT authentication
14 | - Environment variables set up for the necessary credentials (e.g., organization ID and private key, both can be found in your Vapi portal)
15 |
16 | ## Generating a JWT Token
17 |
18 | The following steps outline how to generate a JWT token:
19 |
20 | 1. **Define the Payload**: The payload contains the data you want to include in the token. In this case, it includes an `orgId`.
21 | 2. **Get the Private Key**: The private key (provided by Vapi) is used to sign the token. Ensure it is securely stored, often in environment variables.
22 | 3. **Set Token Options**: Define options for the token, such as the expiration time (`expiresIn`).
23 | 4. **Generate the Token**: Use a JWT library or built-in functionality to generate the token with the payload, key, and options.
24 |
25 | ### Example
26 |
27 | ```js
28 | // Define the payload
29 | const payload = {
30 | orgId: process.env.ORG_ID,
31 | };
32 |
33 | // Get the private key from environment variables
34 | const key = process.env.PRIVATE_KEY;
35 |
36 | // Define token options
37 | const options = {
38 | expiresIn: '1h',
39 | };
40 |
41 | // Generate the token using a JWT library or built-in functionality
42 | const token = generateJWT(payload, key, options);
43 | ```
44 |
45 | ### Explanation
46 |
47 | - **Payload**: The payload includes the `orgId`, representing the organization ID.
48 | - **Key**: The private key is used to sign the token, ensuring its authenticity.
49 | - **Options**: The `expiresIn` option specifies that the token will expire in 1 hour.
50 | - **Token Generation**: The `generateJWT` function (a placeholder for the actual JWT generation method) creates the token using the provided payload, key, and options.
51 |
52 | ## Making an Authenticated API Request
53 |
54 | Once the token is generated, you can use it to make authenticated API requests. The following steps outline how to make an authenticated request:
55 |
56 | 1. **Define the API Endpoint**: Specify the URL of the API you want to call.
57 | 2. **Set the Headers**: Include the `Content-Type` and `Authorization` headers in your request. The `Authorization` header should include the generated JWT token prefixed with `Bearer`.
58 | 3. **Make the API Call**: Use an appropriate method to send the request and handle the response.
59 |
60 | ### Example
61 |
62 | ```js
63 | async function getAssistants() {
64 | const response = await fetch('https://api.vapi.ai/assistant', {
65 | method: 'GET',
66 | headers: {
67 | 'Content-Type': 'application/json',
68 | Authorization: `Bearer ${token}`,
69 | },
70 | });
71 |
72 | const data = await response.json();
73 | console.log(data);
74 | }
75 |
76 | fetchData().catch(console.error);
77 |
78 | ```
79 |
80 | ### Explanation
81 |
82 | - **API Endpoint**: The URL of the API you want to call.
83 | - **Headers**: The `Content-Type` is set to `application/json`, and the `Authorization` header includes the generated JWT token.
84 | - **API Call**: The `fetchData` function makes an asynchronous GET request to the specified API endpoint and logs the response.
85 |
86 | ### Usage
87 |
88 | With the generated token, you can authenticate API requests to any endpoint requiring authentication. The token will be valid for the duration specified in the options (1 hour in this case).
89 |
90 | ## Conclusion
91 |
92 | This documentation covered the basics of generating a JWT token and demonstrated how to use the token to make authenticated API requests. Ensure that your environment variables (e.g., `ORG_ID` and `PRIVATE_KEY`) are correctly set up before running the code.
93 |
--------------------------------------------------------------------------------
/customization/multilingual.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Multilingual"
3 | sidebarTitle: "Multilingual"
4 | description: "Learn how to set up and test multilingual support in Vapi."
5 | ---
6 |
7 | Vapi's multilingual support is primarily facilitated through transcribers, which are part of the speech-to-text process. The pipeline consists of three key elements: text-to-speech, speech-to-text, and the llm model, which acts as the brain of the operation. Each of these elements can be customized using different providers.
8 |
9 | ## Transcribers (Speech-to-Text)
10 |
11 | Currently, Vapi supports two providers for speech-to-text transcriptions:
12 |
13 | - `Deepgram` (nova - family models)
14 | - `Talkscriber` (whisper model)
15 |
16 | Each provider supports different languages. For more detailed information, you can visit your dashboard and navigate to the transcribers tab on the assistant page. Here, you can see the languages supported by each provider and the available models. **Note that not all models support all languages**. For specific details, you can refer to the documentation for the corresponding providers.
17 |
18 | ## Voice (Text-to-Speech)
19 |
20 | Once you have set your transcriber and corresponding language, you can choose a voice for text-to-speech in that language. For example, you can choose a voice with a Spanish accent if needed.
21 |
22 | Vapi currently supports the following providers for text-to-speech:
23 |
24 | - `PlayHT`
25 | - `11labs`
26 | - `Rime-ai`
27 | - `Deepgram`
28 | - `OpenAI`
29 | - `Azure`
30 | - `Lmnt`
31 | - `Neets`
32 |
33 | Each provider offers varying degrees of language support. Azure, for instance, supports the most languages, with approximately 400 prebuilt voices across 140 languages and variants. You can also create your own custom languages with other providers.
34 |
35 | ## Multilingual Support
36 |
37 | For multilingual support, you can choose providers like Eleven Labs or Azure, which have models and voices designed for this purpose. This allows your voice assistant to understand and respond in multiple languages, enhancing the user experience for non-English speakers.
38 |
39 | To set up multilingual support, you no longer need to specify the desired language when configuring the voice assistant. This configuration in the voice section is deprecated.
40 |
41 | Instead, you directly choose a voice that supports the desired language from your voice provider. This can be done when you are setting up or modifying your voice assistant.
42 |
43 | Here is an example of how to set up a voice assistant that speaks Spanish:
44 |
45 | ```json
46 | {
47 | "voice": {
48 | "provider": "azure",
49 | "voiceId": "es-ES-ElviraNeural"
50 | }
51 | }
52 | ```
53 |
54 | In this example, the voice `es-ES-ElviraNeural` from the provider `azure` supports Spanish. You can replace `es-ES-ElviraNeural` with the ID of any other voice that supports your desired language.
55 |
56 | By leveraging Vapi's multilingual support, you can make your voice assistant more accessible and user-friendly, reaching a wider audience and providing a better user experience.
57 |
--------------------------------------------------------------------------------
/customization/provider-keys.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Provider Keys"
3 | sidebarTitle: "Provider Keys"
4 | description: "Bring your own API keys to Vapi."
5 | ---
6 |
7 | Have a custom model or voice with one of the providers? Or an enterprise account with volume pricing?
8 |
9 | No problem! You can bring your own API keys to Vapi. You can add them in the [Dashboard](https://dashboard.vapi.ai) under the **Provider Keys** tab. Once your API key is validated, you won't be charged when using that provider through Vapi. Instead, you'll be charged directly by the provider.
10 |
11 | ## Transcription Providers
12 |
13 | Currently, the only available transcription provider is `deepgram`. To use a custom model, you can specify the deepgram model ID in the `transcriber.model` parameter of the [Assistant](/api-reference/assistants/create-assistant).
14 |
15 | ## Model Providers
16 |
17 | We are currently have support for any OpenAI-compatible endpoint. This includes services like [OpenRouter](https://openrouter.ai/), [AnyScale](https://www.anyscale.com/), [Together AI](https://www.together.ai/), or your own server.
18 |
19 | To use one of these providers, you can specify the `provider` and `model` in the `model` parameter of the [Assistant](/api-reference/assistants/create-assistant).
20 |
21 | You can find more details in the [Custom LLMs](customization/custom-llm/fine-tuned-openai-models) section of the documentation.
22 |
23 | ## Voice Providers
24 |
25 | All voice providers are supported. Once you've validated your API through the [Dashboard](https://dashboard.vapi.ai), any voice ID from your provider can be used in the `voice.voiceId` field of the [Assistant](/api-reference/assistants/create-assistant).
26 |
--------------------------------------------------------------------------------
/customization/speech-configuration.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Speech Configuration"
3 | description: "Timing control for assistant speech"
4 | ---
5 |
6 | The Speaking Plan and Stop Speaking Plan are essential configurations designed to optimize the timing of when the assistant begins and stops speaking during interactions with a customer. These plans ensure that the assistant does not interrupt the customer and also prevents awkward pauses that can occur if the assistant starts speaking too late. Adjusting these parameters helps tailor the assistant’s responsiveness to different conversational dynamics.
7 |
8 | **Note**: At the moment these configurations can currently only be made via API.
9 |
10 | ## Start Speaking Plan
11 |
12 | - **Wait Time Before Speaking**: You can set how long the assistant waits before speaking after the customer finishes. The default is 0.4 seconds, but you can increase it if the assistant is speaking too soon, or decrease it if there’s too much delay.
13 |
14 | - **Smart Endpointing**: This feature uses advanced processing to detect when the customer has truly finished speaking, especially if they pause mid-thought. It’s off by default but can be turned on if needed.
15 |
16 | - **Transcription-Based Detection**: Customize how the assistant determines that the customer has stopped speaking based on what they’re saying. This offers more control over the timing.
17 |
18 |
19 | ## Stop Speaking Plan
20 |
21 | - **Words to Stop Speaking**: Define how many words the customer needs to say before the assistant stops talking. If you want immediate reaction, set this to 0. Increase it to avoid interruptions by brief acknowledgments like "okay" or "right".
22 |
23 | - **Voice Activity Detection**: Adjust how long the customer needs to be speaking before the assistant stops. The default is 0.2 seconds, but you can tweak this to balance responsiveness and avoid false triggers.
24 |
25 | - **Pause Before Resuming**: Control how long the assistant waits before starting to talk again after being interrupted. The default is 1 second, but you can adjust it depending on how quickly the assistant should resume.
26 |
27 | ## Considerations for Configuration
28 |
29 | - **Customer Style**: Think about whether the customer pauses mid-thought or provides continuous speech. Adjust wait times and enable smart endpointing as needed.
30 |
31 | - **Background Noise**: If there’s a lot of background noise, you may need to tweak the settings to avoid false triggers.
32 |
33 | - **Conversation Flow**: Aim for a balance where the assistant is responsive but not intrusive. Test different settings to find the best fit for your needs.
34 |
--------------------------------------------------------------------------------
/enterprise/onprem.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "On-Prem Deployments"
3 | sidebarTitle: "On-Prem Deployments"
4 | description: "Deploy Vapi in your private cloud."
5 | ---
6 |
7 | Vapi On-Prem allows you to deploy Vapi's best in class enterprise voice infrastructure AI directly in your own cloud. It can be deployed in a dockerized format on any cloud provider, in any geographic location, running on your GPUs.
8 |
9 | With On-Prem, your audio and text data stays in your cloud. Data never passes through Vapi's servers. If you're are handling sensitive data (e.g. health, financial, legal) and are under strict data requirements, you should consider deploying on-prem.
10 |
11 | Your device regularly sends performance and usage information to Vapi's cloud. This data helps adjust your device's GPU resources and is also used for billing. All network traffic from your device is tracked in an audit log, letting your engineering or security team see what the device is doing at all times.
12 |
13 | ## Frequently Asked Questions
14 |
15 | #### Can the appliance adjust to my needs?
16 |
17 | Yes, the Vapi On-Prem appliance automatically adjusts its GPU resources to handle your workload as required by our service agreement. It can take a few minutes to adjust to changes in your workload. If you need quicker adjustments, you might want to ask for more GPUs by contacting support@vapi.ai.
18 |
19 | #### What if I can’t get enough GPUs from my cloud provider?
20 |
21 | If you're struggling to get more GPUs from your provider, contact support@vapi.ai for help.
22 |
23 | #### Can I access Vapi's AI models?
24 |
25 | No, our AI models are on secure machines in your Isolated VPC and you can’t log into these machines or check their files.
26 |
27 | #### How can I make sure my data stays within my cloud?
28 |
29 | Your device operates in VPCs that you control. You can check the network settings and firewall rules, and look at traffic logs to make sure everything is as it should be. The Control VPC uses open source components, allowing you to make sure the policies are being followed. Performance data and model updates are sent to Vapi, but all other traffic leaving your device is logged, except for the data sent back to your API clients.
30 |
31 | ## Contact us
32 |
33 | For more information about Vapi On-Prem, please contact us at support@vapi.ai
34 |
--------------------------------------------------------------------------------
/enterprise/plans.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Vapi Enterprise"
3 | sidebarTitle: "Vapi Enterprise"
4 | description: "Build and scale with Vapi."
5 | ---
6 |
7 | If you're building a production application on Vapi, we can help you every step of the way from idea to full-scale deployment.
8 |
9 | On the Pay-As-You-Go plan, there is a limit of **10 concurrent calls**. On Enterprise, we reserve GPUs for you on our Enterprise cluster so you can scale up to **millions of calls**.
10 |
11 | #### Enterprise Plans include:
12 |
13 | - Reserved concurrency and higher rate limits
14 | - Hands-on 24/7 support
15 | - Shared Slack channel with our team
16 | - Included minutes with volume pricing
17 | - Calls with our engineering team 2-3 times per week
18 | - Access to the Vapi SIP trunk for telephony
19 |
20 | ## Contact us
21 |
22 | To get started on Vapi Enterprise, [fill out this form](https://book.vapi.ai).
23 |
--------------------------------------------------------------------------------
/examples/outbound-call-python.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Outbound Calls from Python 📞"
3 | sidebarTitle: "Python Outbound Snippet"
4 | description: "Some sample code for placing an outbound call using Python"
5 | ---
6 |
7 | ```python
8 | import requests
9 |
10 | # Your Vapi API Authorization token
11 | auth_token = ''
12 | # The Phone Number ID, and the Customer details for the call
13 | phone_number_id = ''
14 | customer_number = "+14151231234"
15 |
16 | # Create the header with Authorization token
17 | headers = {
18 | 'Authorization': f'Bearer {auth_token}',
19 | 'Content-Type': 'application/json',
20 | }
21 |
22 | # Create the data payload for the API request
23 | data = {
24 | 'assistant': {
25 | "firstMessage": "Hey, what's up?",
26 | "model": {
27 | "provider": "openai",
28 | "model": "gpt-3.5-turbo",
29 | "messages": [
30 | {
31 | "role": "system",
32 | "content": "You are an assistant."
33 | }
34 | ]
35 | },
36 | "voice": "jennifer-playht"
37 | },
38 | 'phoneNumberId': phone_number_id,
39 | 'customer': {
40 | 'number': customer_number,
41 | },
42 | }
43 |
44 | # Make the POST request to Vapi to create the phone call
45 | response = requests.post(
46 | 'https://api.vapi.ai/call/phone', headers=headers, json=data)
47 |
48 | # Check if the request was successful and print the response
49 | if response.status_code == 201:
50 | print('Call created successfully')
51 | print(response.json())
52 | else:
53 | print('Failed to create call')
54 | print(response.text)
55 | ```
56 |
--------------------------------------------------------------------------------
/faq.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Frequently Asked Questions"
3 | sidebarTitle: "FAQ"
4 | description: "Frequently asked questions about Vapi."
5 | ---
6 |
7 | import FaqSnippet from "/snippets/faq-snippet.mdx";
8 |
9 |
10 |
--------------------------------------------------------------------------------
/how-vapi-works.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Orchestration Models"
3 | sidebarTitle: "Orchestration Models"
4 | description: "All the fancy stuff Vapi does on top of the core models."
5 | ---
6 |
7 | Vapi also runs a suite of audio and text models that make it's latency-optimized Speech-to-Text (STT), Large Language Model (LLM), & Text-to-Speech (TTS) pipeline feel human.
8 |
9 | Here's a high-level overview of the Vapi architecture:
10 |
11 |
12 |
13 |
14 |
15 | These are some of the models that are part of the Orchestration suite. We currently have lots of other models in the pipeline that will be added to the orchestration suite soon. The ultimate goal is to achieve human performance.
16 |
17 | ### Endpointing
18 |
19 | Endpointing is a fancy word for knowing when the user is done speaking. Traditional methods use silence detection with a timeout. Unfortunately, if we want sub-second response-times, that's not going to work.
20 |
21 | Vapi's uses a custom fusion audio-text model to know when a user has completed their turn. Based on both the user's tone and what they're saying, it decides how long to pause before hitting the LLM.
22 |
23 | This is critical to make sure the user isn't interrupted mid-thought while still providing sub-second response times when they're done speaking.
24 |
25 | ### Interruptions (Barge-in)
26 |
27 | Interruptions (aka. barge-in in research circles) is the ability to detect when the user would like to interject and stop the assistant's speech.
28 |
29 | Vapi uses a custom model to distinguish when there is a true interruption, like "stop", "hold up", "that's not what I mean, and when there isn't, like "yeah", "oh gotcha", "okay."
30 |
31 | It also keeps track of where the assistant was cut off, so the LLM knows what it wasn't able to say.
32 |
33 | ### Background Noise Filtering
34 |
35 | Many of our models, including the transcriber, are audio-based. In the real world, things like music and car horns can interfere with model performance.
36 |
37 | We use a proprietary real-time noise filtering model to ensure the audio is cleaned without sacrificing latency, before it reaches the inner models of the pipeline.
38 |
39 | ### Background Voice Filtering
40 |
41 | We rely quite heavily on the transcription model to know what's going on, for interruptions, endpointing, backchanneling, and for the user's statement passed to the LLM.
42 |
43 | Transcription models are built to pick up everything that sounds like speech, so this can be a problem. As you can imagine, having a TV on in the background or echo coming back into the mic can severely impact the conversation ability of a system like Vapi.
44 |
45 | Background noise cancellation is a well-researched problem. Background voice cancellation is not. To solve this, we built proprietary audio filtering model that's able to **focus in** on the primary speaker and block everything else out.
46 |
47 | ### Backchanneling
48 |
49 | Humans like to affirm each other while they speak with statements like "yeah", "uh-huh", "got it", "oh no!"
50 |
51 | They're not considered interruptions, they're just used to let the speaker know that their statement has been understood, and encourage the user to continue their statement.
52 |
53 | A backchannel cue used at the wrong moment can derail a user's statement. Vapi uses a proprietary fusion audio text model to determine the best moment to backchannel and to decide which backchannel cue is most appropriate to use.
54 |
55 | ### Emotion Detection
56 |
57 | How a person says something is just as important as what they're saying. So we've trained a real-time audio model to extract the emotional inflection of the user's statement.
58 |
59 | This emotional information is then fed into the LLM, so knows to behave differently if the user is angry, annoyed, or confused.
60 |
61 | ### Filler Injection
62 |
63 | The output of LLMs tends to be formal, and not conversational. People speak with phrases like "umm", "ahh", "i mean", "like", "so", etc.
64 |
65 | You can prompt the model to output like this, but we treat our user's prompts as **sacred**. Making a change like this to a prompt can change the behavior in unintended ways.
66 |
67 | To ensure we don't add additional latency transforming the output, we've built a custom model that's able to convert streaming input and make it sound conversational in real-time.
68 |
--------------------------------------------------------------------------------
/knowledgebase.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Creating Custom Knowledge Bases for Your Voice AI Assistants"
3 | sidebarTitle: "Knowledge Base"
4 | description: "Learn how to create and integrate custom knowledge bases into your voice AI assistants."
5 | ---
6 |
7 | import AssistantSetupInboundAccordionGroup from "/snippets/quickstart/dashboard/assistant-setup-inbound.mdx";
8 | import GetAPhoneNumberSnippet from "/snippets/quickstart/phone/get-a-phone-number.mdx";
9 | import { Image } from "/snippets/images/images.mdx";
10 |
11 |
20 |
21 | ## **What is Vapi's Knowledge Base?**
22 | Our Knowledge Base is a collection of custom documents that contain information on specific topics or domains. By integrating a Knowledge Base into your voice AI assistant, you can enable it to provide more accurate and informative responses to user queries.
23 |
24 | ### **Why Use a Knowledge Base?**
25 | Using a Knowledge Base with your voice AI assistant offers several benefits:
26 |
27 | * **Improved accuracy**: By integrating custom documents into your assistant, you can ensure that it provides accurate and up-to-date information to users.
28 | * **Enhanced capabilities**: A Knowledge Base enables your assistant to answer complex queries and provide detailed responses to user inquiries.
29 | * **Customization**: With a Knowledge Base, you can tailor your assistant's responses to specific domains or topics, making it more effective and informative.
30 |
31 | ## **How to Create a Knowledge Base**
32 |
33 | To create a Knowledge Base, follow these steps:
34 |
35 | ### **Step 1: Upload Your Documents**
36 |
37 | Navigate to Overview > Documents and upload your custom documents in Markdown, PDF, plain text, or Microsoft Word (.doc and .docx) format to Vapi's Knowledge Base.
38 |
39 |
40 |
41 | ### **Step 2: Create an Assistant**
42 |
43 | Create a new assistant in Vapi and, on the right sidebar menu, select the document you've just added to the Knowledge Base feature.
44 |
45 |
46 |
47 | ### **Step 3: Configure Your Assistant**
48 |
49 | Customize your assistant's system prompt to utilize the Knowledge Base for responding to user queries.
50 |
51 | ## **Best Practices for Creating Effective Knowledge Bases**
52 |
53 | * **Organize Your documents**: Organize your documents by topic or category to ensure that your assistant can quickly retrieve relevant information.
54 | * **Use Clear and concise language**: Use clear and concise language in your documents to ensure that your assistant can accurately understand and respond to user queries.
55 | * **Keep your documents up-to-date**: Regularly update your documents to ensure that your assistant provides the most accurate and up-to-date information.
56 |
57 |
58 | For more information on creating effective Knowledge Bases, check out our tutorial on [Best Practices for Knowledge Base Creation](https://youtu.be/i5mvqC5sZxU).
59 |
60 |
61 | By following these guidelines, you can create a comprehensive Knowledge Base that enhances the capabilities of your voice AI assistant and provides valuable information to users.
--------------------------------------------------------------------------------
/phone-calling.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Phone Calling"
3 | sidebarTitle: "Introduction"
4 | description: "Learn how to create and configure phone numbers with Vapi."
5 | ---
6 |
7 |
8 |
9 | You can set up a phone number to place and receive phone calls. Phone numbers can be bought directly through Vapi, or you can use your own from Twilio.
10 |
11 | You can buy a phone number through the dashboard or use the [`/phone-numbers/buy`](/api-reference/phone-numbers/buy-phone-number)` endpoint.
12 |
13 | If you want to use your own phone number, you can also use the dashboard or the [`/phone-numbers/import`](/api-reference/phone-numbers/import-twilio-number) endpoint. This will use your Twilio credentials to verify the number and configure it with Vapi services.
14 |
15 |
16 |
17 |
18 | You can place an outbound call from one of your phone numbers using the
19 | [`/call/phone`](/api-reference/calls/create-phone-call) endpoint. If the system message will be
20 | different with every call, you can specify a temporary assistant in the `assistant` field. If you
21 | want to reuse an assistant, you can specify its ID in the `assistantId` field.
22 |
23 |
24 |
25 | You can provide an `assistantId` to a phone number and it will use that assistant when receiving inbound calls.
26 |
27 | You may want to specify the assistant based on the caller's phone number. If a phone number doesn't have an `assistantId`, Vapi will attempt to retrieve the assistant from your server using your [Server URL](/server-url#retrieving-assistants).
28 |
29 |
30 |
31 | Video Tutorial on How to Import Numbers from Twilio for International Calls:
32 |
33 |
43 |
44 |
--------------------------------------------------------------------------------
/pricing.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Pricing Overview"
3 | sidebarTitle: "Overview"
4 | description: "Only pay for the minutes you use."
5 | ---
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | Vapi itself charges $0.05 per minute for calls. Prorated to the second.
16 |
17 |
23 | Transcriber, model, voice, & telephony costs charged at-cost.
24 |
25 |
31 | Bring your own API keys for providers, Vapi makes requests on your behalf.
32 |
33 |
39 | Phone numbers purchased through Vapi bill at $2/mo.
40 |
41 |
42 |
43 | ### Starter Credits
44 |
45 | Every new account is granted **$10 in free credits** to begin testing voice workflows. You can [begin using Vapi](/quickstart/dashboard) without a credit card.
46 |
47 | ---
48 |
49 | ## Enterprise
50 |
51 | Handling a large volume of calls? You can find more information on our Enterprise plans [here](/enterprise).
52 |
53 | - Higher concurrency and rate limits
54 | - Hands-on 24/7 support
55 | - Shared Slack channel with our team
56 | - Included minutes with volume pricing
57 | - Calls with our engineering team 2-3 times per week
58 |
59 | ## Further Reading
60 |
61 |
62 |
68 | Learn more about how Vapi routes provider costs.
69 |
70 |
76 | Learn more about estimating costs for your voice pipeline.
77 |
78 |
84 | Learn how to set billing limits for your account.
85 |
86 |
92 | Read full end-to-end billing breakdowns to better understand how Vapi bills.
93 |
94 |
95 |
--------------------------------------------------------------------------------
/prompting-guide.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Prompting Guide"
3 | sidebarTitle: "Prompting Guide"
4 | ---
5 |
6 | Prompt engineering is the art of crafting effective instructions for AI agents, directly influencing their performance and reliability. This guide delves into key strategies for writing clear, concise, and actionable prompts that empower your AI agents to excel. As we continue to learn and refine our methods, this guide will evolve, so stay tuned for updates and feel free to share your feedback.
7 |
8 | ### Building Blocks of Effective Prompts: Sectional Organization
9 |
10 | To enhance clarity and maintainability, it's recommended to break down system prompts into distinct sections, each focusing on a specific aspect:
11 |
12 | - **Identity:** Define the persona and role of the AI agent, setting the tone for interactions.
13 | - **Style:** Establish stylistic guidelines, such as conciseness, formality, or humor, to ensure consistent communication.
14 | - **Response Guidelines:** Specify formatting preferences, question limits, or other structural elements for responses.
15 | - **Task & Goals:** Outline the agent's objectives and the steps it should take to achieve them.
16 |
17 | **Example:**
18 |
19 | ```jsx
20 | [Identity]
21 | You are a helpful and knowledgeable virtual assistant for a travel booking platform.
22 |
23 | [Style]
24 | - Be informative and comprehensive.
25 | - Maintain a professional and polite tone.
26 | - Be concise, as you are currently operating as a Voice Conversation.
27 |
28 | [Response Guideline]
29 | - Present dates in a clear format (e.g., January 15, 2024).
30 | - Offer up to three travel options based on user preferences.
31 |
32 | [Task]
33 | 1. Greet the user and inquire about their desired travel destination.
34 | 2. Ask about travel dates and preferences (e.g., budget, interests).
35 | 3. Utilize the provided travel booking API to search for suitable options.
36 | 4. Present the top three options to the user, highlighting key features.
37 |
38 | ```
39 |
40 |
41 | ### Task Breakdown: Step-by-Step Instructions
42 | For complex interactions, breaking down the task into a sequence of steps enhances the agent's understanding and ensures a structured conversation flow. Incorporate conditional logic to guide the agent's responses based on user input.
43 | Example:
44 |
45 | ```jsx
46 | [Task]
47 | 1. Welcome the user to the technical support service.
48 | 2. Inquire about the nature of the technical issue.
49 | 3. If the issue is related to software, ask about the specific software and problem details.
50 | 4. If the issue is hardware-related, gather information about the device and symptoms.
51 | 5. Based on the collected information, provide troubleshooting steps or escalate to a human technician if necessary.
52 | ```
53 |
54 | ### Controlling Response Timing
55 | To prevent the agent from rushing through the conversation, explicitly indicate when to wait for the user's response before proceeding to the next step.
56 |
57 | ```jsx
58 | [Task]
59 | 1. Inform the user about the purpose of the call.
60 | 2. Ask for the user's name and account information.
61 |
62 | 3. Inquire about the reason for the call and offer assistance options....
63 | ```
64 |
65 | ### Explicit Tool Integration
66 | Specify when and how the agent should utilize external tools or APIs. Reference the tools by their designated names and describe their functions to ensure accurate invocation.
67 | Example:
68 |
69 | ```jsx
70 | [Task]
71 | ...
72 | 3. If the user wants to know about something, use the get_data function with the parameter 'query', which will contain the user's question to initiate the process.
73 | 4. Guide the user through the password reset steps provided by the API....
74 | ```
75 |
76 | ### Some Common Issues
77 | **1. Numbers sound very robotic, how can we improve it?**
78 |
79 | ⇒ When working with numbers that need to be expressed in writing, it's often preferable to spell them out. This approach usually sounds more natural.
80 |
81 | **2. How to make assistants sound more human-like?**
82 |
83 | ⇒ Add personality and provide tone to the assistant to make it sound more like a human being. Here are some examples:
84 |
85 | ```jsx
86 | - Tone: conversational, spartan, use less corporate jargon
87 | - Act as Iva, a sales agent who is a human being and has real feelings.
88 | ```
--------------------------------------------------------------------------------
/providers/voice/playht.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "PlayHT"
3 | sidebarTitle: "PlayHT"
4 | description: "What is PlayHT?"
5 | ---
6 |
7 | **What is PlayHT?**
8 |
9 | In the dynamic world of artificial intelligence, PlayHT emerges as a leading provider of voice AI solutions. Specializing in text-to-speech (TTS) and voice cloning technologies, PlayHT delivers highly realistic and versatile AI-generated voices that cater to a wide array of applications. From enhancing marketing videos to making content more accessible, PlayHT’s innovative tools empower users to create engaging and professional-grade audio content effortlessly.
10 |
11 | **The Evolution of AI Voice Technology:**
12 |
13 | AI voice technology has significantly evolved over the past decade. Initially limited to robotic and monotone outputs, advancements in machine learning and neural networks have paved the way for natural and expressive voice synthesis. PlayHT has harnessed these advancements to offer superior AI voices that are nearly indistinguishable from human speech, setting a new standard in the industry.
14 |
15 | **Overview of PlayHT’s Offerings:**
16 |
17 | PlayHT provides a robust suite of voice AI tools designed to meet diverse needs:
18 |
19 | **Text to Speech:**
20 |
21 | - PlayHT’s TTS technology converts written text into highly realistic speech, making it ideal for creating voiceovers, audiobooks, and other spoken content. This technology supports over 142 languages and accents, allowing users to generate audio content that is not only clear and engaging but also linguistically diverse.
22 |
23 | **Voice Cloning:**
24 |
25 | - PlayHT’s voice cloning feature enables users to create digital replicas of voices with high accuracy. This is particularly useful for preserving voices, personalizing digital assistants, and generating unique character voices for media and entertainment. The cloned voices maintain the nuances and emotional expressiveness of the original, ensuring a lifelike audio experience.
26 |
27 | **Voice Generation API:**
28 |
29 | - PlayHT offers a Voice Generation API that allows developers to integrate AI voice capabilities into their applications. This API supports real-time voice synthesis and cloning, providing a flexible and powerful solution for various interactive applications, including chatbots, virtual assistants, and gaming.
30 |
31 | **Use Cases for PlayHT:**
32 |
33 | - The applications of PlayHT’s technology are extensive and impactful:
34 |
35 | **Marketing:**
36 |
37 | - In the marketing sector, PlayHT’s realistic AI voices enhance the quality of promotional videos, explainer videos, and advertisements. Brands can create consistent and professional voiceovers that captivate audiences and convey messages effectively.
38 |
39 | **E-Learning:**
40 |
41 | - For educational content, PlayHT provides voices capable of pronouncing complex terminologies and acronyms, making e-learning materials more engaging and easier to understand. This helps in creating comprehensive and interactive training modules.
42 |
43 | **Accessibility:**
44 |
45 | - PlayHT’s TTS technology is a boon for accessibility, converting text into speech to assist individuals with visual impairments or reading difficulties. This promotes inclusivity and ensures that information is accessible to all.
46 |
47 | **Gaming:**
48 |
49 | - In the gaming industry, PlayHT’s voice cloning and TTS capabilities bring characters to life, enhancing the overall gaming experience. Developers can quickly generate high-quality voiceovers for dialogues, narration, and character interactions.
50 |
51 | **Impact on Content Creation:**
52 |
53 | - PlayHT is revolutionizing content creation by offering tools that are both powerful and user-friendly. By enabling creators to produce high-quality audio content quickly and efficiently, PlayHT reduces the time and costs associated with traditional recording methods. This democratizes access to professional-grade audio production, fostering innovation and creativity across various domains.
54 |
55 | **Innovation and Research:**
56 |
57 | Committed to pushing the boundaries of voice AI, PlayHT invests in continuous research and development. Their team of experts focuses on enhancing the quality, expressiveness, and versatility of AI-generated voices, exploring new applications, and refining existing technologies.
58 |
59 | **AI Safety and Ethics:**
60 |
61 | PlayHT prioritizes the ethical use of AI technology. They have implemented stringent safeguards to prevent misuse and are actively engaged in discussions about the responsible development and deployment of AI. Ensuring the privacy and security of users’ data is a core aspect of their operations.
62 |
63 | **Integrations and Compatibility:**
64 |
65 | PlayHT’s Voice Generation API enables seamless integration with various platforms and applications. This flexibility ensures that users can incorporate PlayHT’s voice AI capabilities into their existing systems without any hassle, streamlining workflows and enhancing functionality.
--------------------------------------------------------------------------------
/providers/voice/rimeai.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "RimeAI"
3 | sidebarTitle: "RimeAI"
4 | description: "What is Rime.ai?"
5 | ---
6 |
7 | **What is Rime.ai?**
8 |
9 | Rime.ai is a pioneering platform in the field of speech synthesis, offering real-time, lifelike voice generation. Specializing in creating natural-sounding voices tailored to demographic specifics, Rime.ai provides tools that allow businesses and developers to engage their audiences more effectively. By leveraging advanced AI, Rime.ai delivers high-quality audio that is indistinguishable from human speech, setting a new standard in the industry.
10 |
11 | **The Evolution of AI Speech Synthesis:**
12 |
13 | AI speech synthesis has come a long way from its early days of robotic-sounding outputs. Advances in machine learning, neural networks, and data processing have transformed synthetic speech into highly realistic and expressive audio. Rime.ai has harnessed these technological advancements to create voices that sound natural and convey the desired emotions and nuances.
14 |
15 | **Overview of Rime.ai’s Offerings:**
16 |
17 | Rime.ai provides a comprehensive suite of speech synthesis tools designed to meet various needs:
18 |
19 | **Real-time Speech Synthesis:**
20 |
21 | - Rime.ai’s real-time speech synthesis technology enables instant generation of lifelike voices. This is particularly useful for applications requiring immediate feedback, such as interactive voice response (IVR) systems, live virtual assistants, and real-time translation services. The technology boasts sub-300 millisecond response times, ensuring seamless and efficient communication.
22 |
23 | **Demographically Specific Voice Control:**
24 |
25 | - One of Rime.ai’s standout features is its ability to generate voices that are demographically specific. This means businesses can tailor their audio output to match the cultural, regional, and social characteristics of their target audience. With over 200 distinct voices available, Rime.ai allows for precise customization, enhancing user engagement and relatability.
26 |
27 | **Use Cases for Rime.ai:**
28 |
29 |
30 | - Rime.ai’s technology is versatile and applicable across multiple sectors:
31 |
32 | **IVR Systems:**
33 |
34 | - Interactive voice response systems benefit greatly from Rime.ai’s real-time speech synthesis. By providing natural and clear voices, IVR systems can improve user interactions, reduce call handling times, and enhance overall customer satisfaction.
35 |
36 | **Newsreading:**
37 |
38 | In the media industry, Rime.ai’s lifelike voices can be used for automated newsreading, delivering news updates in a natural and engaging manner. This ensures consistency and professionalism in audio content delivery.
39 |
40 |
41 | **Narration:**
42 |
43 | - For audiobooks, educational materials, and other forms of narration, Rime.ai offers high-quality voice generation that enhances the listening experience. The ability to match voices to the content’s demographic audience further adds to the personalization and effectiveness of the narration.
44 |
45 | **Impact on Content Creation:**
46 |
47 | Rime.ai is revolutionizing content creation by providing tools that allow for quick and efficient production of high-quality audio. By eliminating the need for traditional recording methods, creators can save time and resources while still producing professional-grade content. This democratization of audio production opens up new opportunities for innovation and creativity.
48 |
49 | **Innovation and Research:**
50 |
51 | Rime.ai is committed to continuous innovation and research in speech synthesis technology. Their team of experts is dedicated to improving the naturalness, expressiveness, and versatility of AI-generated voices. By exploring new applications and refining existing technologies, Rime.ai aims to stay at the forefront of the industry.
52 |
53 | **AI Safety and Ethics:**
54 |
55 | Ensuring the ethical use of AI is a top priority for Rime.ai. They have implemented robust safeguards to prevent misuse of their technology and are actively involved in discussions about responsible AI development. Protecting user data and maintaining transparency in AI operations are central to their approach.
56 |
57 | **Integrations and Compatibility:**
58 |
59 | Rime.ai’s API allows seamless integration with various platforms and applications. This ensures that users can incorporate Rime.ai’s speech synthesis capabilities into their existing systems effortlessly, enhancing functionality and improving user experience.
--------------------------------------------------------------------------------
/providers/voiceflow.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Voiceflow"
3 | sidebarTitle: "Voiceflow"
4 | description: "Vapi x Voiceflow"
5 | ---
6 |
7 | ## Overview
8 |
9 | Voiceflow is a conversational AI platform that helps teams build, manage, and deploy AI agents, especially chatbots, to enhance customer experiences. It enables users to create advanced AI chatbots without coding, using a user-friendly drag-and-drop flow builder. This feature allows businesses to customize chatbot interactions and efficiently automate customer support processes.
10 |
11 | To link VAPI with Voiceflow, host a proxy using Voiceflow's AI features. This proxy handles requests from Voiceflow, sends them to VAPI's text completion API, and returns VAPI's responses to Voiceflow. You'll need to host this proxy on your server to manage communication between VAPI and Voiceflow.
12 |
13 | ## Workshop
14 |
15 | The workshop conducted by VAPI in collaboration with Voiceflow provided an in-depth exploration of building voice agents using the Voiceflow platform, deployed through VAPI. During this session, participants learned how to create voice agents that leverage Voiceflow's user-friendly design tools alongside VAPI's voice capabilities. The workshop featured a live demonstration, where attendees could see the entire process of building a voice agent in real-time, including designing the agent, setting up necessary integrations, and testing functionality.
16 |
17 |
27 | By the end of the workshop, participants gained insights into building and deploying voice agents, practical skills in designing conversational flows, and an understanding of voice agents.
28 |
--------------------------------------------------------------------------------
/quickstart.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Core Models"
3 | sidebarTitle: "Core Models"
4 | description: "The three core components to Vapi's voice AI pipeline."
5 | ---
6 |
7 | At it's core, Vapi is an orchestration layer over three modules: the **transcriber**, the **model**, and the **voice**.
8 |
9 |
10 |
11 |
12 |
13 | These three modules can be swapped out with **any provider** of your choosing; OpenAI, Groq, Deepgram, ElevenLabs, PlayHT, etc. You can even plug in your server to act as the LLM.
14 |
15 | Vapi takes these three modules, optimizes the latency, manages the scaling & streaming, and orchestrates the conversation flow to make it sound human.
16 |
17 |
18 |
19 |
20 | When a person speaks, the client device (whether it is a laptop, phone,
21 | etc) will record raw audio (1’s & 0’s at the core of it).
22 |
23 |
24 | This raw audio will have to either be transcribed on the client device
25 | itself, or get shipped off to a server somewhere to turn into
26 | transcription text.
27 |
28 |
29 |
30 |
31 | That transcript text will then get fed into a prompt & run through an LLM
32 | ([LLM inference](/glossary#inference)). The LLM is the core intelligence
33 | that simulates a person behind-the-scenes.
34 |
35 |
36 |
37 |
38 | The LLM outputs text that now must be spoken. That text is turned back
39 | into raw audio (again, 1’s & 0’s), that is playable back at the user’s
40 | device.
41 |
42 |
43 | This process can also either happen on the user’s device itself, or on a
44 | server somewhere (then the raw speech audio be shipped back to the user).
45 |
46 |
47 |
48 |
49 | The idea is to perform each phase in realtime (sensitive down to 50-100ms level), streaming between every layer. Ideally the whole flow [voice-to-voice](/glossary#voice-to-voice) clocks in at \<500-700ms.
50 |
51 | Vapi pulls all these pieces together, ensuring a smooth & responsive conversation (in addition to providing you with a simple set of tools to manage these inner-workings).
52 |
--------------------------------------------------------------------------------
/quickstart/phone/inbound.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Inbound Calling"
3 | sidebarTitle: "Inbound Calling"
4 | description: "Quickstart handling inbound calls with Vapi."
5 | ---
6 |
7 | import AssistantSetupInboundAccordionGroup from "/snippets/quickstart/dashboard/assistant-setup-inbound.mdx";
8 | import GetAPhoneNumberSnippet from "/snippets/quickstart/phone/get-a-phone-number.mdx";
9 | import { YouTubeEmbed } from "/snippets/video/videos.mdx";
10 |
11 |
15 |
16 | An inbound call is a phone call that comes **"in"** towards a phone number, & in our case, our AI assistant will be there to pick up the phone call.
17 |
18 | There are **4 steps** we will cover to handle our first inbound phone call:
19 |
20 | 1. **Create an Assistant:** we will create an [assistant](/assistants) & instruct it on how to conduct the call
21 | 2. **Get a Phone Number:** we can either import existing numbers we own, or purchase one through Vapi
22 | 3. **Attach Our Assistant:** we will put our assistant behind the phone number to pick up calls
23 | 4. **Call the Number:** we can then call the number & talk to our assistant
24 |
25 | ## Vapi’s Pizzeria
26 |
27 | We will be implementing a simple order-taking assistant that receives customer calls at a pizza shop called “Vapi’s Pizzeria”.
28 |
29 | Vapi’s has 3 types of menu items: `pizza`, `side`s, & `drink`s. Customers will be ordering 1 of each.
30 |
31 |
32 |
33 |
34 |
35 | ## Assistant Setup
36 |
37 | First we're going to set up our assistant in the dashboard. Once our assistant’s **transcriber**, **model**, & **voice** are set up, we can call it to place our order.
38 |
39 |
40 | You can visit your dashboard by going to
41 | [dashboard.vapi.ai](https://dashboard.vapi.ai)
42 |
43 |
44 |
45 |
46 | ## Get a Phone Number
47 |
48 | Now that we've configured how our assistant will behave, we want to figure out how to call it. We will need a phone number that we can make phone calls to.
49 |
50 |
51 |
52 | ## Attach Your Assistant
53 |
54 | Now that we have a configured assistant & a phone number, we will put our assistant behind the phone number to pick up incoming phone calls.
55 |
56 | In the `Inbound` area of the phone number detail view, select your assistant in the dropdown under `Assistant`.
57 |
58 |
59 |
60 |
61 |
62 | ## Call the Number
63 |
64 | You can now make a phone call to the number. Your assistant will pick up the phone & manage the order-taking conversation. Happy ordering!
65 |
66 |
67 | Your assistant won't yet be able to hang-up the phone at the end of the call.
68 | We will learn more about configuring call end behaviour in later guides.
69 |
70 |
--------------------------------------------------------------------------------
/resources.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Code Resources"
3 | sidebarTitle: "Code Resources"
4 | description: "Find all of our resources here."
5 | ---
6 |
7 | {/* Use this LInk to modify the content -> https://onecompiler.com/ejs/425khha82 */}
8 |
9 |
20 |
--------------------------------------------------------------------------------
/script.js:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | // Get the Mintlify search containers, going to reuse them as the triggers for Inkeep
4 | const searchButtonContainerIds = [
5 | "search-bar-entry",
6 | "search-bar-entry-mobile",
7 | ];
8 |
9 | // Clone and replace, needed to remove existing event listeners
10 | const clonedSearchButtonContainers = searchButtonContainerIds.map((id) => {
11 | const originalElement = document.getElementById(id);
12 | const clonedElement = originalElement.cloneNode(true);
13 |
14 | originalElement.parentNode.replaceChild(clonedElement, originalElement);
15 |
16 | return clonedElement;
17 | });
18 |
19 | // Load the Inkeep script
20 | const inkeepScript = document.createElement("script");
21 | inkeepScript.type = "module";
22 | inkeepScript.src =
23 | "https://unpkg.com/@inkeep/widgets-embed@0.2.265/dist/embed.js";
24 | document.body.appendChild(inkeepScript);
25 |
26 | // Once the Inkeep script has loaded, load the Inkeep chat components
27 | inkeepScript.addEventListener("load", function () {
28 | // Customization settings
29 | const sharedConfig = {
30 | baseSettings: {
31 | apiKey: "a58574ddc0e41c75990d1c0e890ad3c8725dc9e7c8ee3d3e",
32 | integrationId: "clthv1rgg000sdjil26l2vg03",
33 | organizationId: "org_SGvQFUfKzrYkf8z8",
34 | primaryBrandColor: "#5DFECA",
35 | },
36 | aiChatSettings: {
37 | chatSubjectName: "Vapi",
38 | botAvatarSrcUrl:
39 | "https://storage.googleapis.com/organization-image-assets/vapi-botAvatarSrcUrl-1709929183314.png",
40 | botAvatarDarkSrcUrl:
41 | "https://storage.googleapis.com/organization-image-assets/vapi-botAvatarDarkSrcUrl-1709929110474.png",
42 | getHelpCallToActions: [
43 | {
44 | name: "Contact Us",
45 | url: "mailto:support@vapi.ai",
46 | icon: {
47 | builtIn: "IoMail",
48 | },
49 | },
50 | ],
51 | quickQuestions: [
52 | "What voices are supported?",
53 | "What languages are supported?",
54 | "How do I connect a custom LLM?",
55 | "How do I fetch the prompt dynamically?",
56 | ],
57 | },
58 | };
59 |
60 | // for syncing with dark mode
61 | const colorModeSettings = {
62 | observedElement: document.documentElement,
63 | isDarkModeCallback: (el) => {
64 | return el.classList.contains("dark");
65 | },
66 | colorModeAttribute: "class",
67 | };
68 |
69 | // add the "Ask AI" pill chat button
70 | Inkeep().embed({
71 | componentType: "ChatButton",
72 | colorModeSync: colorModeSettings,
73 | properties: sharedConfig,
74 | });
75 |
76 | // instantiate Inkeep "custom trigger" component
77 | const inkeepSearchModal = Inkeep({
78 | ...sharedConfig.baseSettings,
79 | }).embed({
80 | componentType: "CustomTrigger",
81 | colorModeSync: colorModeSettings,
82 | properties: {
83 | ...sharedConfig,
84 | isOpen: false,
85 | onClose: () => {
86 | inkeepSearchModal.render({
87 | isOpen: false,
88 | });
89 | },
90 | modalSettings: {
91 | onShortcutKeyPressed: () => {
92 | inkeepSearchModal.render({
93 | isOpen: true,
94 | });
95 | },
96 | shortcutKey: "k",
97 | }
98 | },
99 | });
100 |
101 | // When the Mintlify search bar clone is clicked, open the Inkeep search modal
102 | clonedSearchButtonContainers.forEach((trigger) => {
103 | trigger.addEventListener("click", function () {
104 | inkeepSearchModal.render({
105 | isOpen: true,
106 | });
107 | });
108 | });
109 | });
110 |
--------------------------------------------------------------------------------
/sdks.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Client SDKs"
3 | sidebarTitle: "Overview"
4 | description: "Put Vapi assistants on every platform."
5 | ---
6 |
7 | import { SdkCards } from "/snippets/sdk.mdx";
8 |
9 | The Vapi Client SDKs automatically configure audio streaming to and from the client, and provide a simple interface for starting calls. The interface is equivalent across all the SDKs.
10 |
11 | The SDKs are open source, and available on GitHub:
12 |
13 |
14 |
15 | ---
16 |
17 |
18 | - `speech-start`, `speech-end`, and `volume-level` for creating animations. -
19 | `message` for receiving messages sent to the [Server URL](/server-url) locally
20 | on the client, so you can show live transcriptions and use function calls to
21 | perform actions on the client.
22 |
23 |
--------------------------------------------------------------------------------
/security-and-privacy/hipaa.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "HIPAA Compliance"
3 | sidebarTitle: "HIPAA Compliance"
4 | description: "Learn how to ensure privacy when using Vapi's voice assistant platform."
5 | ---
6 |
7 | ## Introduction to Privacy at Vapi
8 |
9 | At Vapi, we are committed to delivering exceptional voice assistant services while upholding the highest standards of privacy and data protection for our users. We understand the importance of balancing service quality with the need to respect and protect personal and sensitive information. Our privacy policies and practices are designed to give you control over your data while benefiting from the full capabilities of our platform.
10 |
11 | ## Understanding HIPAA Compliance Basics
12 |
13 | The Health Insurance Portability and Accountability Act (HIPAA) is a United States legislation that provides data privacy and security provisions for safeguarding medical information. HIPAA compliance is crucial for any entity that deals with protected health information (PHI), ensuring that sensitive patient data is handled, stored, and transmitted with the highest standards of security and confidentiality. The key concepts of HIPAA compliance include the Privacy Rule, which protects the privacy of individually identifiable health information; the Security Rule, which sets standards for the security of electronic protected health information (e-PHI); and the Breach Notification Rule, which requires covered entities to notify individuals, HHS, and in some cases, the media of a breach of unsecured PHI. Compliance with these rules is not just about adhering to legal requirements but also about building trust with your customers by demonstrating your commitment to protecting their sensitive data. By enabling the `hipaaEnabled` configuration in Vapi’s voice assistant platform, you are taking a significant step towards aligning your operations with these HIPAA principles, ensuring that your use of technology adheres to these critical privacy and security standards.
14 |
15 | ## Understanding Default Settings
16 |
17 | By default, Vapi records your calls and stores logs and transcriptions. This practice is aimed at continuously improving the quality of our service, ensuring that you receive the best possible experience. However, we recognize the importance of privacy and provide options for users who prefer more control over their data.
18 |
19 | ## Opting for Privacy: The HIPAA Compliance Option
20 |
21 | For users prioritizing privacy, particularly in compliance with the Health Insurance Portability and Accountability Act (HIPAA), Vapi offers the flexibility to opt out of our default data recording settings. Choosing HIPAA compliance through our platform ensures that you can still use our voice assistant services without compromising on privacy requirements.
22 |
23 | ## Enabling HIPAA Compliance
24 |
25 | HIPAA compliance can be ensured by enabling the `hipaaEnabled` configuration in your assistant settings. This simple yet effective setting guarantees that no call logs, recordings, or transcriptions are stored during or after your calls. An end-of-call report message will be generated and stored on your server for record-keeping, ensuring compliance without storing sensitive data on Vapi's systems.
26 |
27 | To enable HIPAA compliance, set hipaaEnabled to true within your assistant's configuration:
28 |
29 | ```JSON
30 | {
31 | "hipaaEnabled": true
32 | }
33 | ```
34 |
35 | Note: The default value for hipaaEnabled is false. Activating this setting is a proactive measure to align with HIPAA standards, requiring manual configuration adjustment.
36 |
37 | ## FAQs
38 |
39 | **Q: Will enabling HIPAA compliance affect the quality of Vapi’s service?**
40 | A: Enabling HIPAA compliance does not degrade the quality of the voice assistant services. However, it limits access to certain features, such as reviewing call logs or transcriptions, that some users may find valuable for quality improvement purposes.
41 |
42 | **Q: Who should use the HIPAA compliance feature?**
43 | A: This feature is particularly useful for businesses and organizations in the healthcare sector or any entity that handles sensitive health information and must comply with HIPAA regulations.
44 |
45 | **Q: Can I switch between default and HIPAA-compliant settings?**
46 | A: Yes, users can toggle the hipaaEnabled setting as needed. However, we recommend carefully considering the implications of each option on your data privacy and compliance requirements.
47 |
48 | ## Need Further Assistance?
49 |
50 | If you have more questions about privacy, HIPAA compliance, or how to configure your Vapi assistant, our support team is here to help. Contact us at security@vapi.ai for personalized assistance and more information on how to make the most of Vapi’s voice assistant platform while ensuring your data remains protected.
51 |
--------------------------------------------------------------------------------
/security-and-privacy/privacy-policy.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Privacy Policy"
3 | url: "https://vapi.ai/privacy"
4 | ---
5 |
6 | Our Privacy Policy is hosted at [https://vapi.ai/privacy](https://vapi.ai/privacy)
7 |
--------------------------------------------------------------------------------
/security-and-privacy/soc.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "SOC-2 Compliance"
3 | url: "https://security.vapi.ai/"
4 | ---
5 |
--------------------------------------------------------------------------------
/security-and-privacy/tos.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Terms of Service"
3 | url: "https://vapi.ai/terms-of-service"
4 | ---
5 |
6 |
7 | Our Terms of Service is hosted at
8 | [https://vapi.ai/terms-of-service](https://vapi.ai/terms-of-service)
9 |
10 |
--------------------------------------------------------------------------------
/server-url.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Server URLs"
3 | sidebarTitle: "Introduction"
4 | description: "Learn how to set up your server to receive and respond to messages from Vapi."
5 | ---
6 |
7 |
8 |
9 |
10 |
11 | Server URLs allow your application to **receive data** & **communicate with Vapi** during conversations. Conversation events can include:
12 |
13 | - **Status Updates:** updates on the status of a call
14 | - **Transcript Updates**: call transcripts
15 | - **Function Calls:** payloads delivered when your assistant wants certain actions executed
16 | - **Assistant Requests:** in certain circumstances, Vapi may ping your server to get dynamic configuration for an assistant handling a specific call
17 | - **End of Call Report:** call summary data at the end of a call
18 | - **Hang Notifications:** get notified when your assistant fails to reply for a certain amount of time
19 |
20 | In our [quickstart guides](/quickstart) we learned how to setup a basic back-and-forth conversation with a Vapi assistant.
21 |
22 | To build more complex & custom applications, we're going to need to get real-time conversation data to our backend. **This is where server URLs come in.**
23 |
24 |
25 | If you're familiar with functional programming, Server URLs are like callback functions. But
26 | instead of specifying a function to get data back on, we specify a URL to a server (to POST data
27 | back to).
28 |
29 |
30 | ## Get Started
31 |
32 | To get started using server URLs, read our guides:
33 |
34 |
35 |
41 | Server URLs can be set in multiple places. Learn where here.
42 |
43 |
44 | Read about the different types of events Vapi can send to your server.
45 |
46 |
52 | Learn about receiving server events in your local development environment.
53 |
54 |
55 |
56 | ## FAQ
57 |
58 |
59 |
60 | The server URL can be any publicly accessible URL pointing to an HTTP endpoint. This can be a:
61 | - **Cloud Server:** your application might be deployed on a cloud platform like [Railway](https://railway.app), [AWS](https://aws.com), [GCP](https://cloud.google.com/gcp), etc — as a persistent web server.
62 | - **Serverless Function:** services like [Vercel](https://vercel.com/docs/functions), [AWS Lambda](https://aws.amazon.com/lambda/), [Google Cloud Functions](https://cloud.google.com/functions), [Cloudflare](https://developers.cloudflare.com/workers/), etc — allow you to host on-demand cloud functions.
63 | - **Workflow Orchestrator:** platforms like [Pipedream](https://pipedream.com) & [Make](https://www.make.com) allow you to program workflows (often without code) that can receive events via HTTP triggers.
64 |
65 | The main idea is that Vapi needs a location on the Internet that it can drop data to & converse with your application.
66 |
67 |
68 |
69 | [Webhooks](/glossary#webhook) are traditionally unidirectional & stateless, with the target endpoint usually only replying with a status code to acknowledge message reception. Certain server URL events (like assistant requests) may require a meaningful reply from your server.
70 |
71 | "Server URL" is a more general term that encompasses both webhooks & bidirectional communication.
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/server-url/securing-endpoints.mdx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/server-url/securing-endpoints.mdx
--------------------------------------------------------------------------------
/server-url/url-priority.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "URL Priority"
3 | sidebarTitle: "URL Priority"
4 | description: "lorem."
5 | ---
6 |
--------------------------------------------------------------------------------
/snippets/faq-snippet.mdx:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | If you are **a developer building a voice AI application simulating human conversation** (w/ LLMs — to whatever degree of application complexity) — Vapi is built for you.
5 |
6 | Whether you are building for a completely "turn-based" use case (like appointment setting), all the way to robust agentic voice applications (like virtual assistants), Vapi is tooled to solve for your voice AI workflow.
7 |
8 | Vapi runs on any platform: the web, mobile, or even embedded systems (given network access).
9 |
10 |
11 |
12 |
13 | Not a problem, we can likely already support it. Vapi is designed to be modular at every level of the voice pipeline: Text-to-speech, LLM, Speech-to-text.
14 |
15 | You can bring your own custom models for any part of the pipeline.
16 |
17 | - **If they’re hosted with one of our providers:** you just need to add your [provider keys](customization/provider-keys), then specify the custom model in your API requests.
18 | - **If they are hosted elsewhere:** you can use the `Custom LLM` provider and specify the [URL to your model](customization/custom-llm/fine-tuned-openai-models) in your API request.
19 |
20 | Everything is interchangeable, mix & match to suit your usecase.
21 |
22 |
23 |
24 |
25 | You could (and the person writing this right now did, from scratch) — but there are good reasons for not doing so.
26 |
27 | Writing a great realtime voice AI application from scratch is a fairly challenging task (more on those challenges [here](/challenges-of-realtime-conversation)). Most of these challenges are not apparent until you face them, then you realize you are 3 weeks into a rabbit hole that may take months to properly solve out of.
28 |
29 | Think of Vapi as hiring a software engineering team for this hard problem, while you focus on what uniquely generates value for your voice AI application.
30 |
31 | ---
32 |
33 | But to address cost, the vast majority of cost in running your application will come from provider cost (Speect-to-text, LLM, Text-to-speech) direct with vendors (Deepgram, OpenAI, ElevenLabs, etc) — where we add no fee (vendor cost passes-through). These would have to be incurred anyway.
34 |
35 | Vapi only charges its small fee on top of these for the continuous maintenance & improvement of these hardest components of your system (which would have costed you time to write/maintain).
36 |
37 | No matter what, some cost is inescapable (in money, time, etc) to solve this challenging technical problem.
38 |
39 | Our focus is solely on foundational Voice AI orchestration, & it’s what we put our full time and resources into.
40 |
41 | To learn more about Vapi’s pricing, you can visit our [pricing page](/pricing).
42 |
43 |
44 |
45 |
46 | No — in fact, the setup could not be easier:
47 | - **Web Dashboard:** It can take minutes to get up & running with our [dashboard](https://dashboard.vapi.ai/).
48 | - **Client SDKs:** You can start calls with 1 line of code with any of our [client SDKs](/sdks).
49 |
50 | For more advanced features like function calling, you will have to set up a [Server URL](/server-url) to receive and respond to messages.
51 |
52 |
53 |
54 |
55 | Vapi focuses on developers. Giving developers modular, simple, & robust tooling to build any voice AI application imaginable.
56 |
57 | Vapi also has some of the lowest latency & (equally important) highest reliability amongst any other voice AI platform built for developers.
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/snippets/quickstart/dashboard/provision-phone-number-with-vapi.mdx:
--------------------------------------------------------------------------------
1 | The quickest way to secure a phone number for your assistant is to purchase a phone number directly through Vapi.
2 |
3 |
4 | Ensure you have a card on file that Vapi can bill before proceeding, you can add your billing
5 | information in your dashboard at [dashboard.vapi.ai/billing](https://dashboard.vapi.ai/billing)
6 |
7 |
8 | Navigate to the "Phone Numbers" section & click the "Buy number" button:
9 |
10 |
11 |
12 |
13 |
14 | We will use the area code `415` for our phone number (these are area codes domestic to the US & Canada).
15 |
16 |
17 |
18 |
19 |
20 |
21 | Currently, only US & Canada phone numbers can be directly purchased through Vapi. Phone numbers in
22 | other regions must be imported, see our [phone calling](/phone-calling) guide.
23 |
24 |
25 | Click "Buy", after purchasing a phone number you should see something like this:
26 |
27 |
28 |
29 |
30 |
31 | The phone number is now ready to be used (either for inbound or outbound calling).
32 |
--------------------------------------------------------------------------------
/snippets/quickstart/phone/get-a-phone-number.mdx:
--------------------------------------------------------------------------------
1 | import ProvisionPhoneNumberWithVapi from "/snippets/quickstart/dashboard/provision-phone-number-with-vapi.mdx";
2 |
3 | There are **2 ways** we can get a phone number into our Vapi account:
4 |
5 | 1. **Purchase a Number Through Vapi:** we can directly purchase phone numbers through Vapi.
6 |
7 | - Vapi will provision the phone number for us via Twilio
8 | - This can be done in the dashboard, or via the API (we will use the dashboard)
9 |
10 | 2. **Import from Twilio or Vonage:** if we already have a phone number with an external telephony provider (like Twilio or Vonage), we can import them into our Vapi account.
11 |
12 |
13 |
14 |
15 |
16 |
17 | We can also import an existing phone number we already own with either Twilio or Vonage.
18 |
19 | For example's sake, we will proceed with [**Twilio**](https://twilio.com) (though the steps are the same for Vonage as
20 | well).
21 |
22 |
23 |
24 | If you don't already have a number in Twilio, you can purchase one by going to your Twilio console's "Buy a number" section:
25 |
26 |
27 |
28 |
29 |
30 | Once you've purchased a number, it will immediately be ready for import into Vapi.
31 |
32 |
33 | To complete the import on Vapi's side, we will need to grab our Twilio **"Account SID"** & **"Auth Token"**.
34 |
35 | You should see a section for "API keys & tokens", the credentials we will need for the import will live here.
36 |
37 |
38 |
39 |
40 |
41 | Once we are in our "API keys & tokens" section, we will grab the Account SID & Auth Token:
42 |
43 |
44 |
45 |
46 |
47 | We will use both of these credentials in the next step of importing via the Dashboard.
48 |
49 |
50 | Navigate to the “Phone Numbers” section & click the “Import” button:
51 |
52 |
53 |
54 |
55 |
56 | There you will input your phone number, as well as the credentials you retrieved in the previous step:
57 |
58 |
59 |
60 |
61 |
62 | Hit "Import" & you will come to the phone number detail page:
63 |
64 |
65 |
66 |
67 |
68 | Your number is now ready to be attached to an assistant for inbound or outbound phone calling.
69 |
70 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/snippets/quickstart/platform-specific/no-code-prerequisites.mdx:
--------------------------------------------------------------------------------
1 |
2 | The following quickstart guides **require no code** & will give you a good framework for understanding
3 | how Vapi works.
4 |
5 | They may be helpful to go through before following this guide:
6 |
7 |
8 |
9 | The easiest way to start with Vapi. Run a voice agent in minutes.
10 |
11 |
17 | Quickly get started handling inbound phone calls.
18 |
19 |
25 | Quickly get started sending outbound phone calls.
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/snippets/quickstart/web/links.mdx:
--------------------------------------------------------------------------------
1 | export const quickstartDemoLink = "https://stackblitz.com/~/github.com/VapiAI/quickstart-react";
2 |
--------------------------------------------------------------------------------
/snippets/sdk.mdx:
--------------------------------------------------------------------------------
1 | export const SdkCards = ({ iconColor }) => (
2 |
3 |
4 | Add a Vapi assistant to your web application.
5 |
6 |
13 | Add a Vapi assistant to your iOS app.
14 |
15 |
22 | Add a Vapi assistant to your Flutter app.
23 |
24 |
31 | Add a Vapi assistant to your React Native app.
32 |
33 |
40 | Multi-platform. Mac, Windows, and Linux.
41 |
42 |
43 | );
44 |
--------------------------------------------------------------------------------
/snippets/sdks/web/import-web-sdk.mdx:
--------------------------------------------------------------------------------
1 | Import the package:
2 |
3 | ```javascript
4 | import Vapi from "@vapi-ai/web";
5 | ```
6 |
7 | Then, create a new instance of the Vapi class, passing your **Public Key** as a parameter to the constructor:
8 |
9 | ```javascript
10 | const vapi = new Vapi("your-public-key");
11 | ```
12 |
13 | You can find your public key in the [Vapi Dashboard](https://dashboard.vapi.ai/account).
14 |
--------------------------------------------------------------------------------
/snippets/sdks/web/install-web-sdk.mdx:
--------------------------------------------------------------------------------
1 | Install the package:
2 |
3 | ```bash
4 | yarn add @vapi-ai/web
5 | ```
6 |
7 | or w/ npm:
8 |
9 | ```bash
10 | npm install @vapi-ai/web
11 | ```
12 |
--------------------------------------------------------------------------------
/snippets/video/video.css:
--------------------------------------------------------------------------------
1 | /* for a header video */
2 |
3 | .video-embed-wrapper {
4 | position: relative;
5 | width: 100%;
6 | padding-top: 56.25%; /* 16:9 Aspect Ratio (divide 9 by 16 = 0.5625) */
7 | }
8 |
9 | .video-embed-wrapper iframe {
10 | position: absolute;
11 | top: 0;
12 | left: 0;
13 | width: 100%;
14 | height: 100%;
15 | }
16 |
--------------------------------------------------------------------------------
/snippets/video/videos.mdx:
--------------------------------------------------------------------------------
1 | export const YouTubeEmbed = ({ videoUrl, altTitle }) => {
2 |
3 | return
4 |
5 |
6 |
14 |
15 |
16 | };
17 |
--------------------------------------------------------------------------------
/squads-example.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Configuring Inbound and Outbound Calls for Squads"
3 | sidebarTitle: "Example"
4 | description: "Configuring assistants for inbound/outbound calls."
5 | ---
6 |
7 | This guide details how to set up and manage inbound and outbound call functionality within Squads, leveraging AI assistants.
8 |
9 | ### Key Concepts
10 | * **Transient Assistant:** A temporary assistant configuration passed directly in the request payload.
11 | * **Assistant ID:** A unique identifier referring to a pre-existing assistant configuration.
12 |
13 | When using Assistant IDs, ensure the `name` property in the payload matches the associated assistant's name accurately.
14 |
15 | ### Inbound Call Configuration
16 |
17 | When your server receives a request of type `assistant-request`, respond with a JSON payload structured as follows:
18 |
19 |
20 | ```json
21 | {
22 | "squad": {
23 | "members": [
24 | {
25 | "assistant": {
26 | "name": "Emma",
27 | "model": { "model": "gpt-4o", "provider": "openai" },
28 | "voice": { "voiceId": "emma", "provider": "azure" },
29 | "transcriber": { "provider": "deepgram" },
30 | "firstMessage": "Hi, I am Emma, what is your name?",
31 | "firstMessageMode": "assistant-speaks-first"
32 | },
33 | "assistantDestinations": [
34 | {
35 | "type": "assistant",
36 | "assistantName": "Mary",
37 | "message": "Please hold on while I transfer you to our appointment booking assistant Mary.",
38 | "description": "Transfer the user to the appointment booking assistant."
39 | }
40 | ]
41 | },
42 | {
43 | "assistantId": "your-assistant-id"
44 | }
45 | ]
46 | }
47 | }
48 | ```
49 |
50 | **In this example:**
51 |
52 | * The first `members` entry is a **transient assistant** (full configuration provided).
53 | * The second `members` entry uses an **Assistant ID**.
54 | * `assistantDestinations` defines how to **transfer the call** to another assistant.
55 |
56 | ### Outbound Call Configuration
57 |
58 | To initiate an outbound call, send a POST request to the API endpoint /call/phone with a JSON payload structured as follows:
59 |
60 | ```json
61 | {
62 | "squad": {
63 | "members": [
64 | {
65 | "assistant": {
66 | "name": "Emma",
67 | "model": { "model": "gpt-4o", "provider": "openai" },
68 | "voice": { "voiceId": "emma", "provider": "azure" },
69 | "transcriber": { "provider": "deepgram" },
70 | "firstMessage": "Hi, I am Emma, what is your name?",
71 | "firstMessageMode": "assistant-speaks-first"
72 | },
73 | "assistantDestinations": [
74 | {
75 | "type": "assistant",
76 | "assistantName": "Mary",
77 | "message": "Please hold on while I transfer you to our appointment booking assistant Mary.",
78 | "description": "Transfer the user to the appointment booking assistant."
79 | }
80 | ]
81 | },
82 | {
83 | "assistantId": "your-assistant-id"
84 | }
85 | ]
86 | },
87 | "customer": {
88 | "number": "your-phone-number"
89 | },
90 | "phoneNumberId": "your-phone-number-id"
91 | }
92 | ```
93 |
94 | **Key points:**
95 |
96 | * `customer.number` is the phone number to call.
97 | * `phoneNumberId` is a unique identifier for the phone number (obtain this from your provider).
98 |
--------------------------------------------------------------------------------
/squads.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Squads"
3 | sidebarTitle: "Introduction"
4 | description: "Use Squads to handle complex workflows and tasks."
5 | ---
6 |
7 | Sometimes, complex workflows are easier to manage with multiple assistants.
8 | You can think of each assistant in a Squad as a leg of a conversation tree.
9 | For example, you might have one assistant for lead qualification, which transfers to another for booking an appointment if they’re qualified.
10 |
11 | Prior to Squads you would put all functionality in one assistant, but Squads were added to break up the complexity of larger prompts into smaller specialized assistants with specific tools and fewer goals.
12 | Squads enable calls to transfer assistants mid-conversation, while maintaining full conversation context.
13 |
14 | ## Usage
15 |
16 | To use Squads, you can create a `squad` when starting a call and specify `members` as a list of assistants and destinations.
17 | The first member is the assistant that will start the call, and assistants can be either persistent or transient.
18 |
19 | Each assistant should be assigned the relevant assistant transfer destinations.
20 | Transfers are specified by assistant name and are used when the model recognizes a specific trigger.
21 |
22 | ```json
23 | {
24 | "squad": {
25 | "members": [
26 | {
27 | "assistantId": "information-gathering-assistant-id",
28 | "assistantDestinations": [{
29 | "type": "assistant",
30 | "assistantName": "Appointment Booking",
31 | "message": "Please hold on while I transfer you to our appointment booking assistant.",
32 | "description": "Transfer the user to the appointment booking assistant after they say their name."
33 | }],
34 | },
35 | {
36 | "assistant": {
37 | "name": "Appointment Booking",
38 | ...
39 | },
40 | }
41 | ]
42 | }
43 | }
44 | ```
45 |
46 |
47 | ## Best practices
48 |
49 | The following are some best practices for using Squads to reduce errors:
50 |
51 | - Group assistants by closely related tasks
52 | - Create as few assistants as possible to reduce complexity
53 | - Make sure descriptions for transfers are clear and concise
54 |
55 |
--------------------------------------------------------------------------------
/static/images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/.DS_Store
--------------------------------------------------------------------------------
/static/images/blocks/food-order-steps.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/food-order-steps.png
--------------------------------------------------------------------------------
/static/images/blocks/recording_20241018_191621.webm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/recording_20241018_191621.webm
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/click_flow_controls_button.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/click_flow_controls_button.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/click_flow_controls_button_again.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/click_flow_controls_button_again.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/click_save_inside_step_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/click_save_inside_step_config.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/click_save_inside_tool_call_block_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/click_save_inside_tool_call_block_config.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_block_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_block_config.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_blocks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_blocks.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_create_workflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_create_workflow.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_new_blocks_workflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_new_blocks_workflow.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_platform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_platform.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_save_inside_conversation_block_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_save_inside_conversation_block_config.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_save_inside_step_config_start_block.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_save_inside_step_config_start_block.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_schedule_demo_quantstruct.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_schedule_demo_quantstruct.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/clicking_on_step_config_inside_start_block.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/clicking_on_step_config_inside_start_block.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/enter_prompt_inside_schedule_demo_time_block_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/enter_prompt_inside_schedule_demo_time_block_config.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/entering_workflow_name_schedule_demo_quantstruct.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/entering_workflow_name_schedule_demo_quantstruct.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/insert_date_time_schedule_variable.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/insert_date_time_schedule_variable.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/navigate_to_vapi_dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/navigate_to_vapi_dashboard.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/replace_text_inside_start_block_with_prompt_for_good_time_to_talk.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/replace_text_inside_start_block_with_prompt_for_good_time_to_talk.png
--------------------------------------------------------------------------------
/static/images/blocks/screenshots_run_20241018_191537/replace_text_inside_textarea_with_new_text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/blocks/screenshots_run_20241018_191537/replace_text_inside_textarea_with_new_text.png
--------------------------------------------------------------------------------
/static/images/changelog/credit-based-billing-oct-10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/changelog/credit-based-billing-oct-10.png
--------------------------------------------------------------------------------
/static/images/changelog/invite-multiple-users.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/changelog/invite-multiple-users.png
--------------------------------------------------------------------------------
/static/images/changelog/organization-settings-page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/changelog/organization-settings-page.png
--------------------------------------------------------------------------------
/static/images/changelog/subscription-coupon-codes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/changelog/subscription-coupon-codes.png
--------------------------------------------------------------------------------
/static/images/changelog/tavus-voice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/changelog/tavus-voice.png
--------------------------------------------------------------------------------
/static/images/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/favicon.png
--------------------------------------------------------------------------------
/static/images/intro/custom-vs-vapi.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/intro/custom-vs-vapi.png
--------------------------------------------------------------------------------
/static/images/learn/billing/billing-example-template.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/billing-example-template.png
--------------------------------------------------------------------------------
/static/images/learn/billing/billing-limits-exceeded.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/billing-limits-exceeded.png
--------------------------------------------------------------------------------
/static/images/learn/billing/billing-limits.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/billing-limits.png
--------------------------------------------------------------------------------
/static/images/learn/billing/call-pricing-breakdown.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/call-pricing-breakdown.png
--------------------------------------------------------------------------------
/static/images/learn/billing/cost-estimate.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/cost-estimate.gif
--------------------------------------------------------------------------------
/static/images/learn/billing/cost-routing.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/cost-routing.png
--------------------------------------------------------------------------------
/static/images/learn/billing/custom-model-inbound-phone-example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/custom-model-inbound-phone-example.png
--------------------------------------------------------------------------------
/static/images/learn/billing/outbound-phone-example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/outbound-phone-example.png
--------------------------------------------------------------------------------
/static/images/learn/billing/web-interviews-example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/billing/web-interviews-example.png
--------------------------------------------------------------------------------
/static/images/learn/platform/vapi-orchestration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/learn/platform/vapi-orchestration.png
--------------------------------------------------------------------------------
/static/images/logo/logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/logo/logo-dark.png
--------------------------------------------------------------------------------
/static/images/logo/logo-light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/logo/logo-light.png
--------------------------------------------------------------------------------
/static/images/pricing/voice-pipeline-cost-breakdown.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/pricing/voice-pipeline-cost-breakdown.png
--------------------------------------------------------------------------------
/static/images/quickstart/assistant-id-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/assistant-id-dashboard.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/assistant-created.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/assistant-created.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/assistant-model-set-up.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/assistant-model-set-up.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/assistant-transcriber-config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/assistant-transcriber-config.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/assistant-voice-config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/assistant-voice-config.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/auth-ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/auth-ui.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/buy-a-phone-number.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/buy-a-phone-number.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/buy-phone-number-modal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/buy-phone-number-modal.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/call-assistant-web-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/call-assistant-web-dashboard.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/choose-blank-template.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/choose-blank-template.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/create-new-assistant-button.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/create-new-assistant-button.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/inbound-assistant-set.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/inbound-assistant-set.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/model-provider-keys.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/model-provider-keys.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/name-your-assistant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/name-your-assistant.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/phone-number-config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/phone-number-config.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/transcriber-providers-keys.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/transcriber-providers-keys.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/vapi-dashboard-post-signup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/vapi-dashboard-post-signup.png
--------------------------------------------------------------------------------
/static/images/quickstart/dashboard/voice-provider-keys.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/dashboard/voice-provider-keys.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/buy-phone-number-twilio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/buy-phone-number-twilio.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/dashboard-import-phone-number.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/dashboard-import-phone-number.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/import-twilio-number-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/import-twilio-number-dashboard.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/outbound/assistant-model-setup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/outbound/assistant-model-setup.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/outbound/dial-outbound-call-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/outbound/dial-outbound-call-dashboard.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/phone-number-import-complete.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/phone-number-import-complete.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/set-billing-information.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/set-billing-information.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/twilio-api-key-nav.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/twilio-api-key-nav.png
--------------------------------------------------------------------------------
/static/images/quickstart/phone/twilio-credentials.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/phone/twilio-credentials.png
--------------------------------------------------------------------------------
/static/images/quickstart/quickstart-banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/quickstart-banner.png
--------------------------------------------------------------------------------
/static/images/quickstart/vapis-pizzeria.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/vapis-pizzeria.png
--------------------------------------------------------------------------------
/static/images/quickstart/web/microphone-permissions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/quickstart/web/microphone-permissions.png
--------------------------------------------------------------------------------
/static/images/server-url/developing-locally/logging-events-locally.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/developing-locally/logging-events-locally.png
--------------------------------------------------------------------------------
/static/images/server-url/developing-locally/ngrok-cli-ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/developing-locally/ngrok-cli-ui.png
--------------------------------------------------------------------------------
/static/images/server-url/developing-locally/reverse-proxy-developing-locally.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/developing-locally/reverse-proxy-developing-locally.png
--------------------------------------------------------------------------------
/static/images/server-url/overview-graphic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/overview-graphic.png
--------------------------------------------------------------------------------
/static/images/server-url/settings-server-urls/assistant-server-url-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/settings-server-urls/assistant-server-url-dashboard.png
--------------------------------------------------------------------------------
/static/images/server-url/settings-server-urls/function-call-server-url-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/settings-server-urls/function-call-server-url-dashboard.png
--------------------------------------------------------------------------------
/static/images/server-url/settings-server-urls/org-settings-server-urls.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/settings-server-urls/org-settings-server-urls.png
--------------------------------------------------------------------------------
/static/images/server-url/settings-server-urls/server-url-priority.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/settings-server-urls/server-url-priority.png
--------------------------------------------------------------------------------
/static/images/server-url/settings-server-urls/setting-account-server-url.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VapiAI/docs-archived/76238b8eabc00ffcbe0bd8744f826615878b8b58/static/images/server-url/settings-server-urls/setting-account-server-url.png
--------------------------------------------------------------------------------
/status.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Status"
3 | url: "https://status.vapi.ai/"
4 | ---
5 |
6 |
7 | Our uptime status is hosted [here](https://status.vapi.ai/)
8 |
9 |
--------------------------------------------------------------------------------
/support.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Support"
3 | sidebarTitle: "Support"
4 | description: "We are open to all kinds of help inquiry, feedback and feature request, help inquiry."
5 | ---
6 |
7 | ## Join Vapi community
8 |
9 | - To take part in community discussion join our [Discord](https://discord.com/invite/pUFNcf2WmH) server to collaborate with other developers.
10 | - For quick support: Visit #support channel to submit support requests.
11 |
--------------------------------------------------------------------------------