/* eslint-disable */ /* tslint:disable */ // @ts-nocheck /* * --------------------------------------------------------------- * ## THIS FILE WAS GENERATED VIA SWAGGER-TYPESCRIPT-API ## * ## ## * ## AUTHOR: acacode ## * ## SOURCE: https://github.com/acacode/swagger-typescript-api ## * --------------------------------------------------------------- */ export interface FallbackTranscriberPlan { transcribers: ( | FallbackAssemblyAITranscriber | FallbackAzureSpeechTranscriber | FallbackCustomTranscriber | FallbackDeepgramTranscriber | FallbackElevenLabsTranscriber | FallbackGladiaTranscriber | FallbackGoogleTranscriber | FallbackTalkscriberTranscriber | FallbackSpeechmaticsTranscriber | FallbackOpenAITranscriber | FallbackCartesiaTranscriber )[]; } export interface AssemblyAITranscriber { /** This is the transcription provider that will be used. */ provider: "assembly-ai"; /** This is the language that will be set for the transcription. */ language?: "en"; /** * Transcripts below this confidence threshold will be discarded. * * @default 0.4 * @min 0 * @max 1 * @example 0.4 */ confidenceThreshold?: number; /** * This enables formatting of transcripts. * * @default true * @example true */ formatTurns?: boolean; /** * This is the end of turn confidence threshold. The minimum confidence that the end of turn is detected. * Note: Only used if startSpeakingPlan.smartEndpointingPlan is not set. * @min 0 * @max 1 * @default 0.7 * @min 0 * @max 1 * @example 0.7 */ endOfTurnConfidenceThreshold?: number; /** * This is the minimum end of turn silence when confident in milliseconds. * Note: Only used if startSpeakingPlan.smartEndpointingPlan is not set. * @default 160 * @min 0 * @example 160 */ minEndOfTurnSilenceWhenConfident?: number; /** * @deprecated * @min 0 * @example 160 */ wordFinalizationMaxWaitTime?: number; /** * This is the maximum turn silence time in milliseconds. * Note: Only used if startSpeakingPlan.smartEndpointingPlan is not set. * @default 400 * @min 0 * @example 400 */ maxTurnSilence?: number; /** * Use VAD to assist with endpointing decisions from the transcriber. * When enabled, transcriber endpointing will be buffered if VAD detects the user is still speaking, preventing premature turn-taking. * When disabled, transcriber endpointing will be used immediately regardless of VAD state, allowing for quicker but more aggressive turn-taking. * Note: Only used if startSpeakingPlan.smartEndpointingPlan is not set. * * @default true * @example true */ vadAssistedEndpointingEnabled?: boolean; /** The WebSocket URL that the transcriber connects to. */ realtimeUrl?: string; /** Add up to 2500 characters of custom vocabulary. */ wordBoost?: string[]; /** * Keyterms prompting improves recognition accuracy for specific words and phrases. * Can include up to 100 keyterms, each up to 50 characters. * Costs an additional $0.04/hour when enabled. */ keytermsPrompt?: string[]; /** The duration of the end utterance silence threshold in milliseconds. */ endUtteranceSilenceThreshold?: number; /** * Disable partial transcripts. * Set to `true` to not receive partial transcripts. Defaults to `false`. */ disablePartialTranscripts?: boolean; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface AzureSpeechTranscriber { /** This is the transcription provider that will be used. */ provider: "azure"; /** This is the language that will be set for the transcription. The list of languages Azure supports can be found here: https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=stt */ language?: | "af-ZA" | "am-ET" | "ar-AE" | "ar-BH" | "ar-DZ" | "ar-EG" | "ar-IL" | "ar-IQ" | "ar-JO" | "ar-KW" | "ar-LB" | "ar-LY" | "ar-MA" | "ar-OM" | "ar-PS" | "ar-QA" | "ar-SA" | "ar-SY" | "ar-TN" | "ar-YE" | "az-AZ" | "bg-BG" | "bn-IN" | "bs-BA" | "ca-ES" | "cs-CZ" | "cy-GB" | "da-DK" | "de-AT" | "de-CH" | "de-DE" | "el-GR" | "en-AU" | "en-CA" | "en-GB" | "en-GH" | "en-HK" | "en-IE" | "en-IN" | "en-KE" | "en-NG" | "en-NZ" | "en-PH" | "en-SG" | "en-TZ" | "en-US" | "en-ZA" | "es-AR" | "es-BO" | "es-CL" | "es-CO" | "es-CR" | "es-CU" | "es-DO" | "es-EC" | "es-ES" | "es-GQ" | "es-GT" | "es-HN" | "es-MX" | "es-NI" | "es-PA" | "es-PE" | "es-PR" | "es-PY" | "es-SV" | "es-US" | "es-UY" | "es-VE" | "et-EE" | "eu-ES" | "fa-IR" | "fi-FI" | "fil-PH" | "fr-BE" | "fr-CA" | "fr-CH" | "fr-FR" | "ga-IE" | "gl-ES" | "gu-IN" | "he-IL" | "hi-IN" | "hr-HR" | "hu-HU" | "hy-AM" | "id-ID" | "is-IS" | "it-CH" | "it-IT" | "ja-JP" | "jv-ID" | "ka-GE" | "kk-KZ" | "km-KH" | "kn-IN" | "ko-KR" | "lo-LA" | "lt-LT" | "lv-LV" | "mk-MK" | "ml-IN" | "mn-MN" | "mr-IN" | "ms-MY" | "mt-MT" | "my-MM" | "nb-NO" | "ne-NP" | "nl-BE" | "nl-NL" | "pa-IN" | "pl-PL" | "ps-AF" | "pt-BR" | "pt-PT" | "ro-RO" | "ru-RU" | "si-LK" | "sk-SK" | "sl-SI" | "so-SO" | "sq-AL" | "sr-RS" | "sv-SE" | "sw-KE" | "sw-TZ" | "ta-IN" | "te-IN" | "th-TH" | "tr-TR" | "uk-UA" | "ur-IN" | "uz-UZ" | "vi-VN" | "wuu-CN" | "yue-CN" | "zh-CN" | "zh-CN-shandong" | "zh-CN-sichuan" | "zh-HK" | "zh-TW" | "zu-ZA"; /** Controls how phrase boundaries are detected, enabling either simple time/silence heuristics or more advanced semantic segmentation. */ segmentationStrategy?: "Default" | "Time" | "Semantic"; /** * Duration of detected silence after which the service finalizes a phrase. Configure to adjust sensitivity to pauses in speech. * @min 100 * @max 5000 */ segmentationSilenceTimeoutMs?: number; /** * Maximum duration a segment can reach before being cut off when using time-based segmentation. * @min 20000 * @max 70000 */ segmentationMaximumTimeMs?: number; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface CartesiaTranscriber { provider: "cartesia"; model?: "ink-whisper"; language?: | "aa" | "ab" | "ae" | "af" | "ak" | "am" | "an" | "ar" | "as" | "av" | "ay" | "az" | "ba" | "be" | "bg" | "bh" | "bi" | "bm" | "bn" | "bo" | "br" | "bs" | "ca" | "ce" | "ch" | "co" | "cr" | "cs" | "cu" | "cv" | "cy" | "da" | "de" | "dv" | "dz" | "ee" | "el" | "en" | "eo" | "es" | "et" | "eu" | "fa" | "ff" | "fi" | "fj" | "fo" | "fr" | "fy" | "ga" | "gd" | "gl" | "gn" | "gu" | "gv" | "ha" | "he" | "hi" | "ho" | "hr" | "ht" | "hu" | "hy" | "hz" | "ia" | "id" | "ie" | "ig" | "ii" | "ik" | "io" | "is" | "it" | "iu" | "ja" | "jv" | "ka" | "kg" | "ki" | "kj" | "kk" | "kl" | "km" | "kn" | "ko" | "kr" | "ks" | "ku" | "kv" | "kw" | "ky" | "la" | "lb" | "lg" | "li" | "ln" | "lo" | "lt" | "lu" | "lv" | "mg" | "mh" | "mi" | "mk" | "ml" | "mn" | "mr" | "ms" | "mt" | "my" | "na" | "nb" | "nd" | "ne" | "ng" | "nl" | "nn" | "no" | "nr" | "nv" | "ny" | "oc" | "oj" | "om" | "or" | "os" | "pa" | "pi" | "pl" | "ps" | "pt" | "qu" | "rm" | "rn" | "ro" | "ru" | "rw" | "sa" | "sc" | "sd" | "se" | "sg" | "si" | "sk" | "sl" | "sm" | "sn" | "so" | "sq" | "sr" | "ss" | "st" | "su" | "sv" | "sw" | "ta" | "te" | "tg" | "th" | "ti" | "tk" | "tl" | "tn" | "to" | "tr" | "ts" | "tt" | "tw" | "ty" | "ug" | "uk" | "ur" | "uz" | "ve" | "vi" | "vo" | "wa" | "wo" | "xh" | "yi" | "yue" | "yo" | "za" | "zh" | "zu"; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface BackoffPlan { /** * This is the type of backoff plan to use. Defaults to fixed. * * @default fixed * @example "fixed" */ type: "fixed" | "exponential"; /** * This is the maximum number of retries to attempt if the request fails. Defaults to 0 (no retries). * * @default 0 * @min 0 * @max 10 * @example 0 */ maxRetries: number; /** * This is the base delay in seconds. For linear backoff, this is the delay between each retry. For exponential backoff, this is the initial delay. * @min 0 * @max 10 * @example 1 */ baseDelaySeconds: number; /** * This is the excluded status codes. If the response status code is in this list, the request will not be retried. * By default, the request will be retried for any non-2xx status code. * @example [400,401,403,404] */ excludedStatusCodes?: object[]; } export interface Server { /** * This is the timeout in seconds for the request. Defaults to 20 seconds. * * @default 20 * @min 1 * @max 300 * @example 20 */ timeoutSeconds?: number; /** * The credential ID for server authentication * @example "550e8400-e29b-41d4-a716-446655440000" */ credentialId?: string; /** * If enabled, requests will originate from a static set of IPs owned and managed by Vapi. * * @default false * @example false */ staticIpAddressesEnabled?: boolean; /** This is where the request will be sent. */ url?: string; /** * These are the headers to include in the request. * * Each key-value pair represents a header name and its value. * * Note: Specifying an Authorization header here will override the authorization provided by the `credentialId` (if provided). This is an anti-pattern and should be avoided outside of edge case scenarios. */ headers?: object; /** * This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried). * * @default undefined (the request will not be retried) */ backoffPlan?: BackoffPlan; } export interface CustomTranscriber { /** This is the transcription provider that will be used. Use `custom-transcriber` for providers that are not natively supported. */ provider: "custom-transcriber"; /** * This is where the transcription request will be sent. * * Usage: * 1. Vapi will initiate a websocket connection with `server.url`. * * 2. Vapi will send an initial text frame with the sample rate. Format: * ``` * { * "type": "start", * "encoding": "linear16", // 16-bit raw PCM format * "container": "raw", * "sampleRate": {{sampleRate}}, * "channels": 2 // customer is channel 0, assistant is channel 1 * } * ``` * * 3. Vapi will send the audio data in 16-bit raw PCM format as binary frames. * * 4. You can read the messages something like this: * ``` * ws.on('message', (data, isBinary) => { * if (isBinary) { * pcmBuffer = Buffer.concat([pcmBuffer, data]); * console.log(`Received PCM data, buffer size: ${pcmBuffer.length}`); * } else { * console.log('Received message:', JSON.parse(data.toString())); * } * }); * ``` * * 5. You will respond with transcriptions as you have them. Format: * ``` * { * "type": "transcriber-response", * "transcription": "Hello, world!", * "channel": "customer" | "assistant" * } * ``` */ server: Server; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface DeepgramTranscriber { /** This is the transcription provider that will be used. */ provider: "deepgram"; /** This is the Deepgram model that will be used. A list of models can be found here: https://developers.deepgram.com/docs/models-languages-overview */ model?: | "nova-3" | "nova-3-general" | "nova-3-medical" | "nova-2" | "nova-2-general" | "nova-2-meeting" | "nova-2-phonecall" | "nova-2-finance" | "nova-2-conversationalai" | "nova-2-voicemail" | "nova-2-video" | "nova-2-medical" | "nova-2-drivethru" | "nova-2-automotive" | "nova" | "nova-general" | "nova-phonecall" | "nova-medical" | "enhanced" | "enhanced-general" | "enhanced-meeting" | "enhanced-phonecall" | "enhanced-finance" | "base" | "base-general" | "base-meeting" | "base-phonecall" | "base-finance" | "base-conversationalai" | "base-voicemail" | "base-video" | "whisper" | "flux-general-en" | string; /** This is the language that will be set for the transcription. The list of languages Deepgram supports can be found here: https://developers.deepgram.com/docs/models-languages-overview */ language?: | "ar" | "az" | "ba" | "bg" | "br" | "ca" | "cs" | "da" | "da-DK" | "de" | "de-CH" | "el" | "en" | "en-AU" | "en-CA" | "en-GB" | "en-IE" | "en-IN" | "en-NZ" | "en-US" | "es" | "es-419" | "es-LATAM" | "et" | "eu" | "fi" | "fr" | "fr-CA" | "ha" | "haw" | "he" | "hi" | "hi-Latn" | "hu" | "id" | "is" | "it" | "ja" | "jw" | "kn" | "ko" | "ko-KR" | "ln" | "lt" | "lv" | "mk" | "ms" | "multi" | "nl" | "nl-BE" | "no" | "pl" | "pt" | "pt-BR" | "pt-PT" | "ro" | "ru" | "sk" | "sl" | "sn" | "so" | "sr" | "su" | "sv" | "sv-SE" | "ta" | "taq" | "th" | "th-TH" | "tr" | "tt" | "uk" | "ur" | "vi" | "yo" | "zh" | "zh-CN" | "zh-HK" | "zh-Hans" | "zh-Hant" | "zh-TW"; /** * This will be use smart format option provided by Deepgram. It's default disabled because it can sometimes format numbers as times but it's getting better. * @example false */ smartFormat?: boolean; /** * If set to true, this will add mip_opt_out=true as a query parameter of all API requests. See https://developers.deepgram.com/docs/the-deepgram-model-improvement-partnership-program#want-to-opt-out * * This will only be used if you are using your own Deepgram API key. * * @default false * @default false * @example false */ mipOptOut?: boolean; /** * If set to true, this will cause deepgram to convert spoken numbers to literal numerals. For example, "my phone number is nine-seven-two..." would become "my phone number is 972..." * * @default false * @example false */ numerals?: boolean; /** * Transcripts below this confidence threshold will be discarded. * * @default 0.4 * @min 0 * @max 1 * @example 0.4 */ confidenceThreshold?: number; /** * Eager end-of-turn confidence required to fire a eager end-of-turn event. Setting a value here will enable EagerEndOfTurn and SpeechResumed events. It is disabled by default. Only used with Flux models. * @min 0 * @max 1 * @example 0.3 */ eagerEotThreshold?: number; /** * End-of-turn confidence required to finish a turn. Only used with Flux models. * * @default 0.7 * @min 0.5 * @max 0.9 * @example 0.7 */ eotThreshold?: number; /** * A turn will be finished when this much time has passed after speech, regardless of EOT confidence. Only used with Flux models. * * @default 5000 * @min 500 * @max 10000 * @example 5000 */ eotTimeoutMs?: number; /** These keywords are passed to the transcription model to help it pick up use-case specific words. Anything that may not be a common word, like your company name, should be added here. */ keywords?: string[]; /** Keyterm Prompting allows you improve Keyword Recall Rate (KRR) for important keyterms or phrases up to 90%. */ keyterm?: string[]; /** * This is the timeout after which Deepgram will send transcription on user silence. You can read in-depth documentation here: https://developers.deepgram.com/docs/endpointing. * * Here are the most important bits: * - Defaults to 10. This is recommended for most use cases to optimize for latency. * - 10 can cause some missing transcriptions since because of the shorter context. This mostly happens for one-word utterances. For those uses cases, it's recommended to try 300. It will add a bit of latency but the quality and reliability of the experience will be better. * - If neither 10 nor 300 work, contact support@vapi.ai and we'll find another solution. * * @default 10 * @min 10 * @max 500 */ endpointing?: number; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface ElevenLabsTranscriber { /** This is the transcription provider that will be used. */ provider: "11labs"; /** This is the model that will be used for the transcription. */ model?: "scribe_v1"; language?: | "aa" | "ab" | "ae" | "af" | "ak" | "am" | "an" | "ar" | "as" | "av" | "ay" | "az" | "ba" | "be" | "bg" | "bh" | "bi" | "bm" | "bn" | "bo" | "br" | "bs" | "ca" | "ce" | "ch" | "co" | "cr" | "cs" | "cu" | "cv" | "cy" | "da" | "de" | "dv" | "dz" | "ee" | "el" | "en" | "eo" | "es" | "et" | "eu" | "fa" | "ff" | "fi" | "fj" | "fo" | "fr" | "fy" | "ga" | "gd" | "gl" | "gn" | "gu" | "gv" | "ha" | "he" | "hi" | "ho" | "hr" | "ht" | "hu" | "hy" | "hz" | "ia" | "id" | "ie" | "ig" | "ii" | "ik" | "io" | "is" | "it" | "iu" | "ja" | "jv" | "ka" | "kg" | "ki" | "kj" | "kk" | "kl" | "km" | "kn" | "ko" | "kr" | "ks" | "ku" | "kv" | "kw" | "ky" | "la" | "lb" | "lg" | "li" | "ln" | "lo" | "lt" | "lu" | "lv" | "mg" | "mh" | "mi" | "mk" | "ml" | "mn" | "mr" | "ms" | "mt" | "my" | "na" | "nb" | "nd" | "ne" | "ng" | "nl" | "nn" | "no" | "nr" | "nv" | "ny" | "oc" | "oj" | "om" | "or" | "os" | "pa" | "pi" | "pl" | "ps" | "pt" | "qu" | "rm" | "rn" | "ro" | "ru" | "rw" | "sa" | "sc" | "sd" | "se" | "sg" | "si" | "sk" | "sl" | "sm" | "sn" | "so" | "sq" | "sr" | "ss" | "st" | "su" | "sv" | "sw" | "ta" | "te" | "tg" | "th" | "ti" | "tk" | "tl" | "tn" | "to" | "tr" | "ts" | "tt" | "tw" | "ty" | "ug" | "uk" | "ur" | "uz" | "ve" | "vi" | "vo" | "wa" | "wo" | "xh" | "yi" | "yue" | "yo" | "za" | "zh" | "zu"; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface GladiaCustomVocabularyConfigDTO { /** Array of vocabulary items (strings or objects with value, pronunciations, intensity, language) */ vocabulary: (string | GladiaVocabularyItemDTO)[]; /** * Default intensity for vocabulary items (0.0 to 1.0) * @min 0 * @max 1 * @default 0.5 */ defaultIntensity?: number; } export interface GladiaTranscriber { /** This is the transcription provider that will be used. */ provider: "gladia"; /** This is the Gladia model that will be used. Default is 'fast' */ model?: "fast" | "accurate" | "solaria-1"; /** Defines how the transcription model detects the audio language. Default value is 'automatic single language'. */ languageBehaviour?: | "manual" | "automatic single language" | "automatic multiple languages"; /** Defines the language to use for the transcription. Required when languageBehaviour is 'manual'. */ language?: | "af" | "sq" | "am" | "ar" | "hy" | "as" | "az" | "ba" | "eu" | "be" | "bn" | "bs" | "br" | "bg" | "ca" | "zh" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fo" | "fi" | "fr" | "gl" | "ka" | "de" | "el" | "gu" | "ht" | "ha" | "haw" | "he" | "hi" | "hu" | "is" | "id" | "it" | "ja" | "jv" | "kn" | "kk" | "km" | "ko" | "lo" | "la" | "lv" | "ln" | "lt" | "lb" | "mk" | "mg" | "ms" | "ml" | "mt" | "mi" | "mr" | "mn" | "my" | "ne" | "no" | "nn" | "oc" | "ps" | "fa" | "pl" | "pt" | "pa" | "ro" | "ru" | "sa" | "sr" | "sn" | "sd" | "si" | "sk" | "sl" | "so" | "es" | "su" | "sw" | "sv" | "tl" | "tg" | "ta" | "tt" | "te" | "th" | "bo" | "tr" | "tk" | "uk" | "ur" | "uz" | "vi" | "cy" | "yi" | "yo"; /** Defines the languages to use for the transcription. Required when languageBehaviour is 'manual'. */ languages?: | "af" | "sq" | "am" | "ar" | "hy" | "as" | "az" | "ba" | "eu" | "be" | "bn" | "bs" | "br" | "bg" | "ca" | "zh" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fo" | "fi" | "fr" | "gl" | "ka" | "de" | "el" | "gu" | "ht" | "ha" | "haw" | "he" | "hi" | "hu" | "is" | "id" | "it" | "ja" | "jv" | "kn" | "kk" | "km" | "ko" | "lo" | "la" | "lv" | "ln" | "lt" | "lb" | "mk" | "mg" | "ms" | "ml" | "mt" | "mi" | "mr" | "mn" | "my" | "ne" | "no" | "nn" | "oc" | "ps" | "fa" | "pl" | "pt" | "pa" | "ro" | "ru" | "sa" | "sr" | "sn" | "sd" | "si" | "sk" | "sl" | "so" | "es" | "su" | "sw" | "sv" | "tl" | "tg" | "ta" | "tt" | "te" | "th" | "bo" | "tr" | "tk" | "uk" | "ur" | "uz" | "vi" | "cy" | "yi" | "yo"; /** * Provides a custom vocabulary to the model to improve accuracy of transcribing context specific words, technical terms, names, etc. If empty, this argument is ignored. * ⚠️ Warning ⚠️: Please be aware that the transcription_hint field has a character limit of 600. If you provide a transcription_hint longer than 600 characters, it will be automatically truncated to meet this limit. * @maxLength 600 * @example "custom vocabulary" */ transcriptionHint?: string; /** * If prosody is true, you will get a transcription that can contain prosodies i.e. (laugh) (giggles) (malefic laugh) (toss) (music)… Default value is false. * @example false */ prosody?: boolean; /** * If true, audio will be pre-processed to improve accuracy but latency will increase. Default value is false. * @example false */ audioEnhancer?: boolean; /** * Transcripts below this confidence threshold will be discarded. * * @default 0.4 * @min 0 * @max 1 * @example 0.4 */ confidenceThreshold?: number; /** * Endpointing time in seconds - time to wait before considering speech ended * @min 0.01 * @max 10 * @example 0.05 */ endpointing?: number; /** * Speech threshold - sensitivity configuration for speech detection (0.0 to 1.0) * @min 0 * @max 1 * @example 0.6 */ speechThreshold?: number; /** * Enable custom vocabulary for improved accuracy * @example false */ customVocabularyEnabled?: boolean; /** Custom vocabulary configuration */ customVocabularyConfig?: GladiaCustomVocabularyConfigDTO; /** * Region for processing audio (us-west or eu-west) * @example "us-west" */ region?: "us-west" | "eu-west"; /** * Enable partial transcripts for low-latency streaming transcription * @example false */ receivePartialTranscripts?: boolean; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface SpeechmaticsCustomVocabularyItem { /** * The word or phrase to add to the custom vocabulary. * @minLength 1 * @example "Speechmatics" */ content: string; /** * Alternative phonetic representations of how the word might sound. This helps recognition when the word might be pronounced differently. * @example ["speech mattix"] */ soundsLike?: string[]; } export interface SpeechmaticsTranscriber { /** This is the transcription provider that will be used. */ provider: "speechmatics"; /** This is the model that will be used for the transcription. */ model?: "default"; language?: | "auto" | "ar" | "ba" | "eu" | "be" | "bn" | "bg" | "yue" | "ca" | "hr" | "cs" | "da" | "nl" | "en" | "eo" | "et" | "fi" | "fr" | "gl" | "de" | "el" | "he" | "hi" | "hu" | "id" | "ia" | "ga" | "it" | "ja" | "ko" | "lv" | "lt" | "ms" | "mt" | "cmn" | "mr" | "mn" | "no" | "fa" | "pl" | "pt" | "ro" | "ru" | "sk" | "sl" | "es" | "sw" | "sv" | "ta" | "th" | "tr" | "uk" | "ur" | "ug" | "vi" | "cy"; /** * This is the operating point for the transcription. Choose between `standard` for faster turnaround with strong accuracy or `enhanced` for highest accuracy when precision is critical. * * @default 'enhanced' * @default "enhanced" * @example "enhanced" */ operatingPoint?: "standard" | "enhanced"; /** * This is the region for the Speechmatics API. Choose between EU (Europe) and US (United States) regions for lower latency and data sovereignty compliance. * * @default 'eu' * @default "eu" * @example "us" */ region?: "eu" | "us"; /** * This enables speaker diarization, which identifies and separates speakers in the transcription. Essential for multi-speaker conversations and conference calls. * * @default false * @default false * @example true */ enableDiarization?: boolean; /** * This sets the maximum number of speakers to detect when diarization is enabled. Only used when enableDiarization is true. * * @default 2 * @min 2 * @max 50 * @default 2 * @example 4 */ maxSpeakers?: number; /** * Provides friendly speaker labels that map to diarization indices (Speaker 1 -> labels[0]). * @example ["Agent","Customer"] */ speakerLabels?: string[]; /** * This enables partial transcripts during speech recognition. When false, only final transcripts are returned. * * @default true * @default true * @example false */ enablePartials?: boolean; /** * This sets the maximum delay in milliseconds for partial transcripts. Balances latency and accuracy. * * @default 3000 * @min 500 * @max 10000 * @default 3000 * @example 1500 */ maxDelay?: number; /** @example [{"content":"Speechmatics","soundsLike":["speech mattix"]}] */ customVocabulary: SpeechmaticsCustomVocabularyItem[]; /** * This controls how numbers are formatted in the transcription output. * * @default 'written' * @default "written" * @example "spoken" */ numeralStyle?: "written" | "spoken"; /** * This enables detection of non-speech audio events like music, applause, and laughter. * * @default false * @default false * @example true */ enableEntities?: boolean; /** * This enables automatic punctuation in the transcription output. * * @default true * @default true * @example false */ enablePunctuation?: boolean; /** * This enables automatic capitalization in the transcription output. * * @default true * @default true * @example false */ enableCapitalization?: boolean; /** * This is the sensitivity level for end-of-turn detection, which determines when a speaker has finished talking. Higher values are more sensitive. * * @default 0.5 * @min 0 * @max 1 * @default 0.5 * @example 0.8 */ endOfTurnSensitivity?: number; /** * This enables removal of disfluencies (um, uh) from the transcript to create cleaner, more professional output. * * @default false * @default false * @example true */ removeDisfluencies?: boolean; /** * This is the minimum duration in seconds for speech segments. Shorter segments will be filtered out. Helps remove noise and improve accuracy. * * @default 0.0 * @min 0 * @max 5 * @default 0 * @example 0.2 */ minimumSpeechDuration?: number; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface TalkscriberTranscriber { /** This is the transcription provider that will be used. */ provider: "talkscriber"; /** This is the model that will be used for the transcription. */ model?: "whisper"; /** This is the language that will be set for the transcription. The list of languages Whisper supports can be found here: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py */ language?: | "en" | "zh" | "de" | "es" | "ru" | "ko" | "fr" | "ja" | "pt" | "tr" | "pl" | "ca" | "nl" | "ar" | "sv" | "it" | "id" | "hi" | "fi" | "vi" | "he" | "uk" | "el" | "ms" | "cs" | "ro" | "da" | "hu" | "ta" | "no" | "th" | "ur" | "hr" | "bg" | "lt" | "la" | "mi" | "ml" | "cy" | "sk" | "te" | "fa" | "lv" | "bn" | "sr" | "az" | "sl" | "kn" | "et" | "mk" | "br" | "eu" | "is" | "hy" | "ne" | "mn" | "bs" | "kk" | "sq" | "sw" | "gl" | "mr" | "pa" | "si" | "km" | "sn" | "yo" | "so" | "af" | "oc" | "ka" | "be" | "tg" | "sd" | "gu" | "am" | "yi" | "lo" | "uz" | "fo" | "ht" | "ps" | "tk" | "nn" | "mt" | "sa" | "lb" | "my" | "bo" | "tl" | "mg" | "as" | "tt" | "haw" | "ln" | "ha" | "ba" | "jw" | "su" | "yue"; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface GoogleTranscriber { /** This is the transcription provider that will be used. */ provider: "google"; /** This is the model that will be used for the transcription. */ model?: | "gemini-2.5-pro" | "gemini-2.5-flash" | "gemini-2.5-flash-lite" | "gemini-2.0-flash-thinking-exp" | "gemini-2.0-pro-exp-02-05" | "gemini-2.0-flash" | "gemini-2.0-flash-lite" | "gemini-2.0-flash-exp" | "gemini-2.0-flash-realtime-exp" | "gemini-1.5-flash" | "gemini-1.5-flash-002" | "gemini-1.5-pro" | "gemini-1.5-pro-002" | "gemini-1.0-pro"; /** This is the language that will be set for the transcription. */ language?: | "Multilingual" | "Arabic" | "Bengali" | "Bulgarian" | "Chinese" | "Croatian" | "Czech" | "Danish" | "Dutch" | "English" | "Estonian" | "Finnish" | "French" | "German" | "Greek" | "Hebrew" | "Hindi" | "Hungarian" | "Indonesian" | "Italian" | "Japanese" | "Korean" | "Latvian" | "Lithuanian" | "Norwegian" | "Polish" | "Portuguese" | "Romanian" | "Russian" | "Serbian" | "Slovak" | "Slovenian" | "Spanish" | "Swahili" | "Swedish" | "Thai" | "Turkish" | "Ukrainian" | "Vietnamese"; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface OpenAITranscriber { /** This is the transcription provider that will be used. */ provider: "openai"; /** This is the model that will be used for the transcription. */ model: "gpt-4o-transcribe" | "gpt-4o-mini-transcribe"; /** This is the language that will be set for the transcription. */ language?: | "af" | "ar" | "hy" | "az" | "be" | "bs" | "bg" | "ca" | "zh" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fi" | "fr" | "gl" | "de" | "el" | "he" | "hi" | "hu" | "is" | "id" | "it" | "ja" | "kn" | "kk" | "ko" | "lv" | "lt" | "mk" | "ms" | "mr" | "mi" | "ne" | "no" | "fa" | "pl" | "pt" | "ro" | "ru" | "sr" | "sk" | "sl" | "es" | "sw" | "sv" | "tl" | "ta" | "th" | "tr" | "uk" | "ur" | "vi" | "cy"; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackTranscriberPlan; } export interface FallbackAssemblyAITranscriber { /** This is the transcription provider that will be used. */ provider: "assembly-ai"; /** This is the language that will be set for the transcription. */ language?: "en"; /** * Transcripts below this confidence threshold will be discarded. * * @default 0.4 * @min 0 * @max 1 * @example 0.4 */ confidenceThreshold?: number; /** * This enables formatting of transcripts. * * @default true * @example true */ formatTurns?: boolean; /** * This is the end of turn confidence threshold. The minimum confidence that the end of turn is detected. * Note: Only used if startSpeakingPlan.smartEndpointingPlan is not set. * @min 0 * @max 1 * @default 0.7 * @min 0 * @max 1 * @example 0.7 */ endOfTurnConfidenceThreshold?: number; /** * This is the minimum end of turn silence when confident in milliseconds. * Note: Only used if startSpeakingPlan.smartEndpointingPlan is not set. * @default 160 * @min 0 * @example 160 */ minEndOfTurnSilenceWhenConfident?: number; /** * @deprecated * @min 0 * @example 160 */ wordFinalizationMaxWaitTime?: number; /** * This is the maximum turn silence time in milliseconds. * Note: Only used if startSpeakingPlan.smartEndpointingPlan is not set. * @default 400 * @min 0 * @example 400 */ maxTurnSilence?: number; /** * Use VAD to assist with endpointing decisions from the transcriber. * When enabled, transcriber endpointing will be buffered if VAD detects the user is still speaking, preventing premature turn-taking. * When disabled, transcriber endpointing will be used immediately regardless of VAD state, allowing for quicker but more aggressive turn-taking. * Note: Only used if startSpeakingPlan.smartEndpointingPlan is not set. * * @default true * @example true */ vadAssistedEndpointingEnabled?: boolean; /** The WebSocket URL that the transcriber connects to. */ realtimeUrl?: string; /** Add up to 2500 characters of custom vocabulary. */ wordBoost?: string[]; /** * Keyterms prompting improves recognition accuracy for specific words and phrases. * Can include up to 100 keyterms, each up to 50 characters. * Costs an additional $0.04/hour when enabled. */ keytermsPrompt?: string[]; /** The duration of the end utterance silence threshold in milliseconds. */ endUtteranceSilenceThreshold?: number; /** * Disable partial transcripts. * Set to `true` to not receive partial transcripts. Defaults to `false`. */ disablePartialTranscripts?: boolean; } export interface FallbackAzureSpeechTranscriber { /** This is the transcription provider that will be used. */ provider: "azure"; /** This is the language that will be set for the transcription. The list of languages Azure supports can be found here: https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=stt */ language?: | "af-ZA" | "am-ET" | "ar-AE" | "ar-BH" | "ar-DZ" | "ar-EG" | "ar-IL" | "ar-IQ" | "ar-JO" | "ar-KW" | "ar-LB" | "ar-LY" | "ar-MA" | "ar-OM" | "ar-PS" | "ar-QA" | "ar-SA" | "ar-SY" | "ar-TN" | "ar-YE" | "az-AZ" | "bg-BG" | "bn-IN" | "bs-BA" | "ca-ES" | "cs-CZ" | "cy-GB" | "da-DK" | "de-AT" | "de-CH" | "de-DE" | "el-GR" | "en-AU" | "en-CA" | "en-GB" | "en-GH" | "en-HK" | "en-IE" | "en-IN" | "en-KE" | "en-NG" | "en-NZ" | "en-PH" | "en-SG" | "en-TZ" | "en-US" | "en-ZA" | "es-AR" | "es-BO" | "es-CL" | "es-CO" | "es-CR" | "es-CU" | "es-DO" | "es-EC" | "es-ES" | "es-GQ" | "es-GT" | "es-HN" | "es-MX" | "es-NI" | "es-PA" | "es-PE" | "es-PR" | "es-PY" | "es-SV" | "es-US" | "es-UY" | "es-VE" | "et-EE" | "eu-ES" | "fa-IR" | "fi-FI" | "fil-PH" | "fr-BE" | "fr-CA" | "fr-CH" | "fr-FR" | "ga-IE" | "gl-ES" | "gu-IN" | "he-IL" | "hi-IN" | "hr-HR" | "hu-HU" | "hy-AM" | "id-ID" | "is-IS" | "it-CH" | "it-IT" | "ja-JP" | "jv-ID" | "ka-GE" | "kk-KZ" | "km-KH" | "kn-IN" | "ko-KR" | "lo-LA" | "lt-LT" | "lv-LV" | "mk-MK" | "ml-IN" | "mn-MN" | "mr-IN" | "ms-MY" | "mt-MT" | "my-MM" | "nb-NO" | "ne-NP" | "nl-BE" | "nl-NL" | "pa-IN" | "pl-PL" | "ps-AF" | "pt-BR" | "pt-PT" | "ro-RO" | "ru-RU" | "si-LK" | "sk-SK" | "sl-SI" | "so-SO" | "sq-AL" | "sr-RS" | "sv-SE" | "sw-KE" | "sw-TZ" | "ta-IN" | "te-IN" | "th-TH" | "tr-TR" | "uk-UA" | "ur-IN" | "uz-UZ" | "vi-VN" | "wuu-CN" | "yue-CN" | "zh-CN" | "zh-CN-shandong" | "zh-CN-sichuan" | "zh-HK" | "zh-TW" | "zu-ZA"; /** Controls how phrase boundaries are detected, enabling either simple time/silence heuristics or more advanced semantic segmentation. */ segmentationStrategy?: "Default" | "Time" | "Semantic"; /** * Duration of detected silence after which the service finalizes a phrase. Configure to adjust sensitivity to pauses in speech. * @min 100 * @max 5000 */ segmentationSilenceTimeoutMs?: number; /** * Maximum duration a segment can reach before being cut off when using time-based segmentation. * @min 20000 * @max 70000 */ segmentationMaximumTimeMs?: number; } export interface FallbackCartesiaTranscriber { provider: "cartesia"; model?: "ink-whisper"; language?: | "aa" | "ab" | "ae" | "af" | "ak" | "am" | "an" | "ar" | "as" | "av" | "ay" | "az" | "ba" | "be" | "bg" | "bh" | "bi" | "bm" | "bn" | "bo" | "br" | "bs" | "ca" | "ce" | "ch" | "co" | "cr" | "cs" | "cu" | "cv" | "cy" | "da" | "de" | "dv" | "dz" | "ee" | "el" | "en" | "eo" | "es" | "et" | "eu" | "fa" | "ff" | "fi" | "fj" | "fo" | "fr" | "fy" | "ga" | "gd" | "gl" | "gn" | "gu" | "gv" | "ha" | "he" | "hi" | "ho" | "hr" | "ht" | "hu" | "hy" | "hz" | "ia" | "id" | "ie" | "ig" | "ii" | "ik" | "io" | "is" | "it" | "iu" | "ja" | "jv" | "ka" | "kg" | "ki" | "kj" | "kk" | "kl" | "km" | "kn" | "ko" | "kr" | "ks" | "ku" | "kv" | "kw" | "ky" | "la" | "lb" | "lg" | "li" | "ln" | "lo" | "lt" | "lu" | "lv" | "mg" | "mh" | "mi" | "mk" | "ml" | "mn" | "mr" | "ms" | "mt" | "my" | "na" | "nb" | "nd" | "ne" | "ng" | "nl" | "nn" | "no" | "nr" | "nv" | "ny" | "oc" | "oj" | "om" | "or" | "os" | "pa" | "pi" | "pl" | "ps" | "pt" | "qu" | "rm" | "rn" | "ro" | "ru" | "rw" | "sa" | "sc" | "sd" | "se" | "sg" | "si" | "sk" | "sl" | "sm" | "sn" | "so" | "sq" | "sr" | "ss" | "st" | "su" | "sv" | "sw" | "ta" | "te" | "tg" | "th" | "ti" | "tk" | "tl" | "tn" | "to" | "tr" | "ts" | "tt" | "tw" | "ty" | "ug" | "uk" | "ur" | "uz" | "ve" | "vi" | "vo" | "wa" | "wo" | "xh" | "yi" | "yue" | "yo" | "za" | "zh" | "zu"; } export interface FallbackCustomTranscriber { /** This is the transcription provider that will be used. Use `custom-transcriber` for providers that are not natively supported. */ provider: "custom-transcriber"; /** * This is where the transcription request will be sent. * * Usage: * 1. Vapi will initiate a websocket connection with `server.url`. * * 2. Vapi will send an initial text frame with the sample rate. Format: * ``` * { * "type": "start", * "encoding": "linear16", // 16-bit raw PCM format * "container": "raw", * "sampleRate": {{sampleRate}}, * "channels": 2 // customer is channel 0, assistant is channel 1 * } * ``` * * 3. Vapi will send the audio data in 16-bit raw PCM format as binary frames. * * 4. You can read the messages something like this: * ``` * ws.on('message', (data, isBinary) => { * if (isBinary) { * pcmBuffer = Buffer.concat([pcmBuffer, data]); * console.log(`Received PCM data, buffer size: ${pcmBuffer.length}`); * } else { * console.log('Received message:', JSON.parse(data.toString())); * } * }); * ``` * * 5. You will respond with transcriptions as you have them. Format: * ``` * { * "type": "transcriber-response", * "transcription": "Hello, world!", * "channel": "customer" | "assistant" * } * ``` */ server: Server; } export interface FallbackDeepgramTranscriber { /** This is the transcription provider that will be used. */ provider: "deepgram"; /** This is the Deepgram model that will be used. A list of models can be found here: https://developers.deepgram.com/docs/models-languages-overview */ model?: | "nova-3" | "nova-3-general" | "nova-3-medical" | "nova-2" | "nova-2-general" | "nova-2-meeting" | "nova-2-phonecall" | "nova-2-finance" | "nova-2-conversationalai" | "nova-2-voicemail" | "nova-2-video" | "nova-2-medical" | "nova-2-drivethru" | "nova-2-automotive" | "nova" | "nova-general" | "nova-phonecall" | "nova-medical" | "enhanced" | "enhanced-general" | "enhanced-meeting" | "enhanced-phonecall" | "enhanced-finance" | "base" | "base-general" | "base-meeting" | "base-phonecall" | "base-finance" | "base-conversationalai" | "base-voicemail" | "base-video" | "whisper" | "flux-general-en" | string; /** This is the language that will be set for the transcription. The list of languages Deepgram supports can be found here: https://developers.deepgram.com/docs/models-languages-overview */ language?: | "ar" | "az" | "ba" | "bg" | "br" | "ca" | "cs" | "da" | "da-DK" | "de" | "de-CH" | "el" | "en" | "en-AU" | "en-CA" | "en-GB" | "en-IE" | "en-IN" | "en-NZ" | "en-US" | "es" | "es-419" | "es-LATAM" | "et" | "eu" | "fi" | "fr" | "fr-CA" | "ha" | "haw" | "he" | "hi" | "hi-Latn" | "hu" | "id" | "is" | "it" | "ja" | "jw" | "kn" | "ko" | "ko-KR" | "ln" | "lt" | "lv" | "mk" | "ms" | "multi" | "nl" | "nl-BE" | "no" | "pl" | "pt" | "pt-BR" | "pt-PT" | "ro" | "ru" | "sk" | "sl" | "sn" | "so" | "sr" | "su" | "sv" | "sv-SE" | "ta" | "taq" | "th" | "th-TH" | "tr" | "tt" | "uk" | "ur" | "vi" | "yo" | "zh" | "zh-CN" | "zh-HK" | "zh-Hans" | "zh-Hant" | "zh-TW"; /** * This will be use smart format option provided by Deepgram. It's default disabled because it can sometimes format numbers as times but it's getting better. * @example false */ smartFormat?: boolean; /** * If set to true, this will add mip_opt_out=true as a query parameter of all API requests. See https://developers.deepgram.com/docs/the-deepgram-model-improvement-partnership-program#want-to-opt-out * * This will only be used if you are using your own Deepgram API key. * * @default false * @default false * @example false */ mipOptOut?: boolean; /** * If set to true, this will cause deepgram to convert spoken numbers to literal numerals. For example, "my phone number is nine-seven-two..." would become "my phone number is 972..." * * @default false * @example false */ numerals?: boolean; /** * Transcripts below this confidence threshold will be discarded. * * @default 0.4 * @min 0 * @max 1 * @example 0.4 */ confidenceThreshold?: number; /** * Eager end-of-turn confidence required to fire a eager end-of-turn event. Setting a value here will enable EagerEndOfTurn and SpeechResumed events. It is disabled by default. Only used with Flux models. * @min 0 * @max 1 * @example 0.3 */ eagerEotThreshold?: number; /** * End-of-turn confidence required to finish a turn. Only used with Flux models. * * @default 0.7 * @min 0.5 * @max 0.9 * @example 0.7 */ eotThreshold?: number; /** * A turn will be finished when this much time has passed after speech, regardless of EOT confidence. Only used with Flux models. * * @default 5000 * @min 500 * @max 10000 * @example 5000 */ eotTimeoutMs?: number; /** These keywords are passed to the transcription model to help it pick up use-case specific words. Anything that may not be a common word, like your company name, should be added here. */ keywords?: string[]; /** Keyterm Prompting allows you improve Keyword Recall Rate (KRR) for important keyterms or phrases up to 90%. */ keyterm?: string[]; /** * This is the timeout after which Deepgram will send transcription on user silence. You can read in-depth documentation here: https://developers.deepgram.com/docs/endpointing. * * Here are the most important bits: * - Defaults to 10. This is recommended for most use cases to optimize for latency. * - 10 can cause some missing transcriptions since because of the shorter context. This mostly happens for one-word utterances. For those uses cases, it's recommended to try 300. It will add a bit of latency but the quality and reliability of the experience will be better. * - If neither 10 nor 300 work, contact support@vapi.ai and we'll find another solution. * * @default 10 * @min 10 * @max 500 */ endpointing?: number; } export interface FallbackElevenLabsTranscriber { /** This is the transcription provider that will be used. */ provider: "11labs"; /** This is the model that will be used for the transcription. */ model?: "scribe_v1"; language?: | "aa" | "ab" | "ae" | "af" | "ak" | "am" | "an" | "ar" | "as" | "av" | "ay" | "az" | "ba" | "be" | "bg" | "bh" | "bi" | "bm" | "bn" | "bo" | "br" | "bs" | "ca" | "ce" | "ch" | "co" | "cr" | "cs" | "cu" | "cv" | "cy" | "da" | "de" | "dv" | "dz" | "ee" | "el" | "en" | "eo" | "es" | "et" | "eu" | "fa" | "ff" | "fi" | "fj" | "fo" | "fr" | "fy" | "ga" | "gd" | "gl" | "gn" | "gu" | "gv" | "ha" | "he" | "hi" | "ho" | "hr" | "ht" | "hu" | "hy" | "hz" | "ia" | "id" | "ie" | "ig" | "ii" | "ik" | "io" | "is" | "it" | "iu" | "ja" | "jv" | "ka" | "kg" | "ki" | "kj" | "kk" | "kl" | "km" | "kn" | "ko" | "kr" | "ks" | "ku" | "kv" | "kw" | "ky" | "la" | "lb" | "lg" | "li" | "ln" | "lo" | "lt" | "lu" | "lv" | "mg" | "mh" | "mi" | "mk" | "ml" | "mn" | "mr" | "ms" | "mt" | "my" | "na" | "nb" | "nd" | "ne" | "ng" | "nl" | "nn" | "no" | "nr" | "nv" | "ny" | "oc" | "oj" | "om" | "or" | "os" | "pa" | "pi" | "pl" | "ps" | "pt" | "qu" | "rm" | "rn" | "ro" | "ru" | "rw" | "sa" | "sc" | "sd" | "se" | "sg" | "si" | "sk" | "sl" | "sm" | "sn" | "so" | "sq" | "sr" | "ss" | "st" | "su" | "sv" | "sw" | "ta" | "te" | "tg" | "th" | "ti" | "tk" | "tl" | "tn" | "to" | "tr" | "ts" | "tt" | "tw" | "ty" | "ug" | "uk" | "ur" | "uz" | "ve" | "vi" | "vo" | "wa" | "wo" | "xh" | "yi" | "yue" | "yo" | "za" | "zh" | "zu"; } export interface GladiaVocabularyItemDTO { /** The vocabulary word or phrase */ value: string; /** Alternative pronunciations for the vocabulary item */ pronunciations?: string[]; /** * Intensity for this specific vocabulary item (0.0 to 1.0) * @min 0 * @max 1 */ intensity?: number; /** Language code for this vocabulary item (ISO 639-1) */ language?: string; } export interface FallbackGladiaTranscriber { /** This is the transcription provider that will be used. */ provider: "gladia"; /** This is the Gladia model that will be used. Default is 'fast' */ model?: "fast" | "accurate" | "solaria-1"; /** Defines how the transcription model detects the audio language. Default value is 'automatic single language'. */ languageBehaviour?: | "manual" | "automatic single language" | "automatic multiple languages"; /** Defines the language to use for the transcription. Required when languageBehaviour is 'manual'. */ language?: | "af" | "sq" | "am" | "ar" | "hy" | "as" | "az" | "ba" | "eu" | "be" | "bn" | "bs" | "br" | "bg" | "ca" | "zh" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fo" | "fi" | "fr" | "gl" | "ka" | "de" | "el" | "gu" | "ht" | "ha" | "haw" | "he" | "hi" | "hu" | "is" | "id" | "it" | "ja" | "jv" | "kn" | "kk" | "km" | "ko" | "lo" | "la" | "lv" | "ln" | "lt" | "lb" | "mk" | "mg" | "ms" | "ml" | "mt" | "mi" | "mr" | "mn" | "my" | "ne" | "no" | "nn" | "oc" | "ps" | "fa" | "pl" | "pt" | "pa" | "ro" | "ru" | "sa" | "sr" | "sn" | "sd" | "si" | "sk" | "sl" | "so" | "es" | "su" | "sw" | "sv" | "tl" | "tg" | "ta" | "tt" | "te" | "th" | "bo" | "tr" | "tk" | "uk" | "ur" | "uz" | "vi" | "cy" | "yi" | "yo"; /** Defines the languages to use for the transcription. Required when languageBehaviour is 'manual'. */ languages?: | "af" | "sq" | "am" | "ar" | "hy" | "as" | "az" | "ba" | "eu" | "be" | "bn" | "bs" | "br" | "bg" | "ca" | "zh" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fo" | "fi" | "fr" | "gl" | "ka" | "de" | "el" | "gu" | "ht" | "ha" | "haw" | "he" | "hi" | "hu" | "is" | "id" | "it" | "ja" | "jv" | "kn" | "kk" | "km" | "ko" | "lo" | "la" | "lv" | "ln" | "lt" | "lb" | "mk" | "mg" | "ms" | "ml" | "mt" | "mi" | "mr" | "mn" | "my" | "ne" | "no" | "nn" | "oc" | "ps" | "fa" | "pl" | "pt" | "pa" | "ro" | "ru" | "sa" | "sr" | "sn" | "sd" | "si" | "sk" | "sl" | "so" | "es" | "su" | "sw" | "sv" | "tl" | "tg" | "ta" | "tt" | "te" | "th" | "bo" | "tr" | "tk" | "uk" | "ur" | "uz" | "vi" | "cy" | "yi" | "yo"; /** * Provides a custom vocabulary to the model to improve accuracy of transcribing context specific words, technical terms, names, etc. If empty, this argument is ignored. * ⚠️ Warning ⚠️: Please be aware that the transcription_hint field has a character limit of 600. If you provide a transcription_hint longer than 600 characters, it will be automatically truncated to meet this limit. * @maxLength 600 * @example "custom vocabulary" */ transcriptionHint?: string; /** * If prosody is true, you will get a transcription that can contain prosodies i.e. (laugh) (giggles) (malefic laugh) (toss) (music)… Default value is false. * @example false */ prosody?: boolean; /** * If true, audio will be pre-processed to improve accuracy but latency will increase. Default value is false. * @example false */ audioEnhancer?: boolean; /** * Transcripts below this confidence threshold will be discarded. * * @default 0.4 * @min 0 * @max 1 * @example 0.4 */ confidenceThreshold?: number; /** * Endpointing time in seconds - time to wait before considering speech ended * @min 0.01 * @max 10 * @example 0.05 */ endpointing?: number; /** * Speech threshold - sensitivity configuration for speech detection (0.0 to 1.0) * @min 0 * @max 1 * @example 0.6 */ speechThreshold?: number; /** * Enable custom vocabulary for improved accuracy * @example false */ customVocabularyEnabled?: boolean; /** Custom vocabulary configuration */ customVocabularyConfig?: GladiaCustomVocabularyConfigDTO; /** * Region for processing audio (us-west or eu-west) * @example "us-west" */ region?: "us-west" | "eu-west"; /** * Enable partial transcripts for low-latency streaming transcription * @example false */ receivePartialTranscripts?: boolean; } export interface FallbackSpeechmaticsTranscriber { /** This is the transcription provider that will be used. */ provider: "speechmatics"; /** This is the model that will be used for the transcription. */ model?: "default"; language?: | "auto" | "ar" | "ba" | "eu" | "be" | "bn" | "bg" | "yue" | "ca" | "hr" | "cs" | "da" | "nl" | "en" | "eo" | "et" | "fi" | "fr" | "gl" | "de" | "el" | "he" | "hi" | "hu" | "id" | "ia" | "ga" | "it" | "ja" | "ko" | "lv" | "lt" | "ms" | "mt" | "cmn" | "mr" | "mn" | "no" | "fa" | "pl" | "pt" | "ro" | "ru" | "sk" | "sl" | "es" | "sw" | "sv" | "ta" | "th" | "tr" | "uk" | "ur" | "ug" | "vi" | "cy"; /** * This is the operating point for the transcription. Choose between `standard` for faster turnaround with strong accuracy or `enhanced` for highest accuracy when precision is critical. * * @default 'enhanced' * @default "enhanced" * @example "enhanced" */ operatingPoint?: "standard" | "enhanced"; /** * This is the region for the Speechmatics API. Choose between EU (Europe) and US (United States) regions for lower latency and data sovereignty compliance. * * @default 'eu' * @default "eu" * @example "us" */ region?: "eu" | "us"; /** * This enables speaker diarization, which identifies and separates speakers in the transcription. Essential for multi-speaker conversations and conference calls. * * @default false * @default false * @example true */ enableDiarization?: boolean; /** * This sets the maximum number of speakers to detect when diarization is enabled. Only used when enableDiarization is true. * * @default 2 * @min 2 * @max 50 * @default 2 * @example 4 */ maxSpeakers?: number; /** * Provides friendly speaker labels that map to diarization indices (Speaker 1 -> labels[0]). * @example ["Agent","Customer"] */ speakerLabels?: string[]; /** * This enables partial transcripts during speech recognition. When false, only final transcripts are returned. * * @default true * @default true * @example false */ enablePartials?: boolean; /** * This sets the maximum delay in milliseconds for partial transcripts. Balances latency and accuracy. * * @default 3000 * @min 500 * @max 10000 * @default 3000 * @example 1500 */ maxDelay?: number; /** @example [{"content":"Speechmatics","soundsLike":["speech mattix"]}] */ customVocabulary: SpeechmaticsCustomVocabularyItem[]; /** * This controls how numbers are formatted in the transcription output. * * @default 'written' * @default "written" * @example "spoken" */ numeralStyle?: "written" | "spoken"; /** * This enables detection of non-speech audio events like music, applause, and laughter. * * @default false * @default false * @example true */ enableEntities?: boolean; /** * This enables automatic punctuation in the transcription output. * * @default true * @default true * @example false */ enablePunctuation?: boolean; /** * This enables automatic capitalization in the transcription output. * * @default true * @default true * @example false */ enableCapitalization?: boolean; /** * This is the sensitivity level for end-of-turn detection, which determines when a speaker has finished talking. Higher values are more sensitive. * * @default 0.5 * @min 0 * @max 1 * @default 0.5 * @example 0.8 */ endOfTurnSensitivity?: number; /** * This enables removal of disfluencies (um, uh) from the transcript to create cleaner, more professional output. * * @default false * @default false * @example true */ removeDisfluencies?: boolean; /** * This is the minimum duration in seconds for speech segments. Shorter segments will be filtered out. Helps remove noise and improve accuracy. * * @default 0.0 * @min 0 * @max 5 * @default 0 * @example 0.2 */ minimumSpeechDuration?: number; } export interface FallbackTalkscriberTranscriber { /** This is the transcription provider that will be used. */ provider: "talkscriber"; /** This is the model that will be used for the transcription. */ model?: "whisper"; /** This is the language that will be set for the transcription. The list of languages Whisper supports can be found here: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py */ language?: | "en" | "zh" | "de" | "es" | "ru" | "ko" | "fr" | "ja" | "pt" | "tr" | "pl" | "ca" | "nl" | "ar" | "sv" | "it" | "id" | "hi" | "fi" | "vi" | "he" | "uk" | "el" | "ms" | "cs" | "ro" | "da" | "hu" | "ta" | "no" | "th" | "ur" | "hr" | "bg" | "lt" | "la" | "mi" | "ml" | "cy" | "sk" | "te" | "fa" | "lv" | "bn" | "sr" | "az" | "sl" | "kn" | "et" | "mk" | "br" | "eu" | "is" | "hy" | "ne" | "mn" | "bs" | "kk" | "sq" | "sw" | "gl" | "mr" | "pa" | "si" | "km" | "sn" | "yo" | "so" | "af" | "oc" | "ka" | "be" | "tg" | "sd" | "gu" | "am" | "yi" | "lo" | "uz" | "fo" | "ht" | "ps" | "tk" | "nn" | "mt" | "sa" | "lb" | "my" | "bo" | "tl" | "mg" | "as" | "tt" | "haw" | "ln" | "ha" | "ba" | "jw" | "su" | "yue"; } export interface FallbackGoogleTranscriber { /** This is the transcription provider that will be used. */ provider: "google"; /** This is the model that will be used for the transcription. */ model?: | "gemini-2.5-pro" | "gemini-2.5-flash" | "gemini-2.5-flash-lite" | "gemini-2.0-flash-thinking-exp" | "gemini-2.0-pro-exp-02-05" | "gemini-2.0-flash" | "gemini-2.0-flash-lite" | "gemini-2.0-flash-exp" | "gemini-2.0-flash-realtime-exp" | "gemini-1.5-flash" | "gemini-1.5-flash-002" | "gemini-1.5-pro" | "gemini-1.5-pro-002" | "gemini-1.0-pro"; /** This is the language that will be set for the transcription. */ language?: | "Multilingual" | "Arabic" | "Bengali" | "Bulgarian" | "Chinese" | "Croatian" | "Czech" | "Danish" | "Dutch" | "English" | "Estonian" | "Finnish" | "French" | "German" | "Greek" | "Hebrew" | "Hindi" | "Hungarian" | "Indonesian" | "Italian" | "Japanese" | "Korean" | "Latvian" | "Lithuanian" | "Norwegian" | "Polish" | "Portuguese" | "Romanian" | "Russian" | "Serbian" | "Slovak" | "Slovenian" | "Spanish" | "Swahili" | "Swedish" | "Thai" | "Turkish" | "Ukrainian" | "Vietnamese"; } export interface FallbackOpenAITranscriber { /** This is the transcription provider that will be used. */ provider: "openai"; /** This is the model that will be used for the transcription. */ model: "gpt-4o-transcribe" | "gpt-4o-mini-transcribe"; /** This is the language that will be set for the transcription. */ language?: | "af" | "ar" | "hy" | "az" | "be" | "bs" | "bg" | "ca" | "zh" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fi" | "fr" | "gl" | "de" | "el" | "he" | "hi" | "hu" | "is" | "id" | "it" | "ja" | "kn" | "kk" | "ko" | "lv" | "lt" | "mk" | "ms" | "mr" | "mi" | "ne" | "no" | "fa" | "pl" | "pt" | "ro" | "ru" | "sr" | "sk" | "sl" | "es" | "sw" | "sv" | "tl" | "ta" | "th" | "tr" | "uk" | "ur" | "vi" | "cy"; } export interface LangfuseObservabilityPlan { provider: "langfuse"; /** This is an array of tags to be added to the Langfuse trace. Tags allow you to categorize and filter traces. https://langfuse.com/docs/tracing-features/tags */ tags: string[]; /** * This is a JSON object that will be added to the Langfuse trace. Traces can be enriched with metadata to better understand your users, application, and experiments. https://langfuse.com/docs/tracing-features/metadata * By default it includes the call metadata, assistant metadata, and assistant overrides. */ metadata?: object; } export interface TextContent { type: "text"; text: string; language: | "aa" | "ab" | "ae" | "af" | "ak" | "am" | "an" | "ar" | "as" | "av" | "ay" | "az" | "ba" | "be" | "bg" | "bh" | "bi" | "bm" | "bn" | "bo" | "br" | "bs" | "ca" | "ce" | "ch" | "co" | "cr" | "cs" | "cu" | "cv" | "cy" | "da" | "de" | "dv" | "dz" | "ee" | "el" | "en" | "eo" | "es" | "et" | "eu" | "fa" | "ff" | "fi" | "fj" | "fo" | "fr" | "fy" | "ga" | "gd" | "gl" | "gn" | "gu" | "gv" | "ha" | "he" | "hi" | "ho" | "hr" | "ht" | "hu" | "hy" | "hz" | "ia" | "id" | "ie" | "ig" | "ii" | "ik" | "io" | "is" | "it" | "iu" | "ja" | "jv" | "ka" | "kg" | "ki" | "kj" | "kk" | "kl" | "km" | "kn" | "ko" | "kr" | "ks" | "ku" | "kv" | "kw" | "ky" | "la" | "lb" | "lg" | "li" | "ln" | "lo" | "lt" | "lu" | "lv" | "mg" | "mh" | "mi" | "mk" | "ml" | "mn" | "mr" | "ms" | "mt" | "my" | "na" | "nb" | "nd" | "ne" | "ng" | "nl" | "nn" | "no" | "nr" | "nv" | "ny" | "oc" | "oj" | "om" | "or" | "os" | "pa" | "pi" | "pl" | "ps" | "pt" | "qu" | "rm" | "rn" | "ro" | "ru" | "rw" | "sa" | "sc" | "sd" | "se" | "sg" | "si" | "sk" | "sl" | "sm" | "sn" | "so" | "sq" | "sr" | "ss" | "st" | "su" | "sv" | "sw" | "ta" | "te" | "tg" | "th" | "ti" | "tk" | "tl" | "tn" | "to" | "tr" | "ts" | "tt" | "tw" | "ty" | "ug" | "uk" | "ur" | "uz" | "ve" | "vi" | "vo" | "wa" | "wo" | "xh" | "yi" | "yue" | "yo" | "za" | "zh" | "zu"; } export interface Condition { /** This is the operator you want to use to compare the parameter and value. */ operator: "eq" | "neq" | "gt" | "gte" | "lt" | "lte"; /** * This is the name of the parameter that you want to check. * @maxLength 1000 */ param: string; /** * This is the value you want to compare against the parameter. * @maxLength 1000 */ value: string; } export interface ToolMessageStart { /** * This is an alternative to the `content` property. It allows to specify variants of the same content, one per language. * * Usage: * - If your assistants are multilingual, you can provide content for each language. * - If you don't provide content for a language, the first item in the array will be automatically translated to the active language at that moment. * * This will override the `content` property. */ contents?: TextContent[]; /** * This message is triggered when the tool call starts. * * This message is never triggered for async tools. * * If this message is not provided, one of the default filler messages "Hold on a sec", "One moment", "Just a sec", "Give me a moment" or "This'll just take a sec" will be used. */ type: "request-start"; /** * This is an optional boolean that if true, the tool call will only trigger after the message is spoken. Default is false. * * @default false * @default false * @example false */ blocking?: boolean; /** * This is the content that the assistant says when this message is triggered. * @maxLength 1000 */ content?: string; /** This is an optional array of conditions that the tool call arguments must meet in order for this message to be triggered. */ conditions?: Condition[]; } export interface ToolMessageComplete { /** * This is an alternative to the `content` property. It allows to specify variants of the same content, one per language. * * Usage: * - If your assistants are multilingual, you can provide content for each language. * - If you don't provide content for a language, the first item in the array will be automatically translated to the active language at that moment. * * This will override the `content` property. */ contents?: TextContent[]; /** * This message is triggered when the tool call is complete. * * This message is triggered immediately without waiting for your server to respond for async tool calls. * * If this message is not provided, the model will be requested to respond. * * If this message is provided, only this message will be spoken and the model will not be requested to come up with a response. It's an exclusive OR. */ type: "request-complete"; /** * This is optional and defaults to "assistant". * * When role=assistant, `content` is said out loud. * * When role=system, `content` is passed to the model in a system message. Example: * system: default one * assistant: * user: * assistant: * user: * assistant: * user: * assistant: tool called * tool: your server response * <--- system prompt as hint * ---> model generates response which is spoken * This is useful when you want to provide a hint to the model about what to say next. */ role?: "assistant" | "system"; /** * This is an optional boolean that if true, the call will end after the message is spoken. Default is false. * * This is ignored if `role` is set to `system`. * * @default false * @example false */ endCallAfterSpokenEnabled?: boolean; /** * This is the content that the assistant says when this message is triggered. * @maxLength 1000 */ content?: string; /** This is an optional array of conditions that the tool call arguments must meet in order for this message to be triggered. */ conditions?: Condition[]; } export interface ToolMessageFailed { /** * This is an alternative to the `content` property. It allows to specify variants of the same content, one per language. * * Usage: * - If your assistants are multilingual, you can provide content for each language. * - If you don't provide content for a language, the first item in the array will be automatically translated to the active language at that moment. * * This will override the `content` property. */ contents?: TextContent[]; /** * This message is triggered when the tool call fails. * * This message is never triggered for async tool calls. * * If this message is not provided, the model will be requested to respond. * * If this message is provided, only this message will be spoken and the model will not be requested to come up with a response. It's an exclusive OR. */ type: "request-failed"; /** * This is an optional boolean that if true, the call will end after the message is spoken. Default is false. * * @default false * @example false */ endCallAfterSpokenEnabled?: boolean; /** * This is the content that the assistant says when this message is triggered. * @maxLength 1000 */ content?: string; /** This is an optional array of conditions that the tool call arguments must meet in order for this message to be triggered. */ conditions?: Condition[]; } export interface ToolMessageDelayed { /** * This is an alternative to the `content` property. It allows to specify variants of the same content, one per language. * * Usage: * - If your assistants are multilingual, you can provide content for each language. * - If you don't provide content for a language, the first item in the array will be automatically translated to the active language at that moment. * * This will override the `content` property. */ contents?: TextContent[]; /** * This message is triggered when the tool call is delayed. * * There are the two things that can trigger this message: * 1. The user talks with the assistant while your server is processing the request. Default is "Sorry, a few more seconds." * 2. The server doesn't respond within `timingMilliseconds`. * * This message is never triggered for async tool calls. */ type: "request-response-delayed"; /** * The number of milliseconds to wait for the server response before saying this message. * @min 100 * @max 120000 * @example 1000 */ timingMilliseconds?: number; /** * This is the content that the assistant says when this message is triggered. * @maxLength 1000 */ content?: string; /** This is an optional array of conditions that the tool call arguments must meet in order for this message to be triggered. */ conditions?: Condition[]; } export interface MessageTarget { /** * This is the role of the message to target. * * If not specified, will find the position in the message history ignoring role (effectively `any`). * @example "user" */ role?: "user" | "assistant"; /** * This is the position of the message to target. * - Negative numbers: Count from end (-1 = most recent, -2 = second most recent) * - 0: First/oldest message in history * - Positive numbers: Specific position (0-indexed from start) * * @default -1 (most recent message) * @example -1 */ position?: number; } export interface RegexCondition { /** * This is the type discriminator for regex condition * @example "regex" */ type: "regex"; /** * This is the regular expression pattern to match against message content. * * Note: * - This works by using the RegExp.test method in Node.JS. Eg. /hello/.test("hello there") will return true. * * Hot tips: * - In JavaScript, escape \ when sending the regex pattern. Eg. "hello\sthere" will be sent over the wire as "hellosthere". Send "hello\\sthere" instead. * - RegExp.test does substring matching, so /cat/.test("I love cats") will return true. To do full string matching, use anchors: /^cat$/ will only match exactly "cat". * - Word boundaries \b are useful for matching whole words: /\bcat\b/ matches "cat" but not "cats" or "category". * - Use inline flags for portability: (?i) for case insensitive, (?m) for multiline */ regex: string; /** * This is the target for messages to check against. * If not specified, the condition will run on the last message (position: -1). * If role is not specified, it will look at the last message regardless of role. * @default { position: -1 } */ target?: MessageTarget; /** * This is the flag that when true, the condition matches if the pattern does NOT match. * Useful for ensuring certain words/phrases are absent. * * @default false * @example "true - Reject if user hasn"t said goodbye: { regex: "\\b(bye|goodbye)\\b", negate: true }" */ negate?: boolean; } export interface LiquidCondition { /** * This is the type discriminator for liquid condition * @example "liquid" */ type: "liquid"; /** * This is the Liquid template that must return exactly "true" or "false" as a string. * The template is evaluated and the entire output must be either "true" or "false" - nothing else. * * Available variables: * - `messages`: Array of recent messages in OpenAI chat completions format (ChatCompletionMessageParam[]) * Each message has properties like: role ('user', 'assistant', 'system'), content (string), etc. * - `now`: Current timestamp in milliseconds (built-in Liquid variable) * - Any assistant variable values (e.g., `userName`, `accountStatus`) * * Useful Liquid filters for messages: * - `messages | last: 5` - Get the 5 most recent messages * - `messages | where: 'role', 'user'` - Filter to only user messages * - `messages | reverse` - Reverse the order of messages */ liquid: string; } export interface GroupCondition { /** * This is the type discriminator for group condition * @example "group" */ type: "group"; /** This is the logical operator for combining conditions in this group */ operator: "AND" | "OR"; /** * This is the list of nested conditions to evaluate. * Supports recursive nesting of groups for complex logic. */ conditions: (RegexCondition | LiquidCondition | GroupCondition)[]; } export interface ToolRejectionPlan { /** * This is the list of conditions that must be evaluated. * * Usage: * - If all conditions match (AND logic), the tool call is rejected. * - For OR logic at the top level, use a single 'group' condition with operator: 'OR'. * * @default [] - Empty array means tool always executes */ conditions?: (RegexCondition | LiquidCondition | GroupCondition)[]; } export interface CreateDtmfToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "dtmf" for DTMF tool. */ type: "dtmf"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateEndCallToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "endCall" for End Call tool. */ type: "endCall"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateVoicemailToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "voicemail" for Voicemail tool. */ type: "voicemail"; /** * This is the flag that enables beep detection for voicemail detection and applies only for twilio based calls. * * @default false * @default false * @example false */ beepDetectionEnabled?: boolean; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface JsonSchema { /** * This is the type of output you'd like. * * `string`, `number`, `integer`, `boolean` are the primitive types and should be obvious. * * `array` and `object` are more interesting and quite powerful. They allow you to define nested structures. * * For `array`, you can define the schema of the items in the array using the `items` property. * * For `object`, you can define the properties of the object using the `properties` property. */ type: "string" | "number" | "integer" | "boolean" | "array" | "object"; /** * This is required if the type is "array". This is the schema of the items in the array. * * This is of type JsonSchema. However, Swagger doesn't support circular references. */ items?: object; /** * This is required if the type is "object". This specifies the properties of the object. * * This is a map of string to JsonSchema. However, Swagger doesn't support circular references. */ properties?: object; /** This is the description to help the model understand what it needs to output. */ description?: string; /** * This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the `format` property instead. * * OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties */ pattern?: string; /** * This is the format of the string. To pass a regex, use the `pattern` property instead. * * OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions */ format?: | "date-time" | "time" | "date" | "duration" | "email" | "hostname" | "ipv4" | "ipv6" | "uuid"; /** * This is a list of properties that are required. * * This only makes sense if the type is "object". */ required?: string[]; /** This array specifies the allowed values that can be used to restrict the output of the model. */ enum?: string[]; /** This is the title of the schema. */ title?: string; } export interface OpenAIFunctionParameters { /** This must be set to 'object'. It instructs the model to return a JSON object containing the function call properties. */ type: "object"; /** * This provides a description of the properties required by the function. * JSON Schema can be used to specify expectations for each property. * Refer to [this doc](https://ajv.js.org/json-schema.html#json-data-type) for a comprehensive guide on JSON Schema. */ properties: Record; /** This specifies the properties that are required by the function. */ required?: string[]; } export interface OpenAIFunction { /** * This is a boolean that controls whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the [OpenAI guide](https://openai.com/index/introducing-structured-outputs-in-the-api/). * * @default false * @default false */ strict?: boolean; /** * This is the the name of the function to be called. * * Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. * @maxLength 64 * @pattern /^[a-zA-Z0-9_-]{1,64}$/ */ name: string; /** This is the description of what the function does, used by the AI to choose when and how to call the function. */ description?: string; /** * These are the parameters the functions accepts, described as a JSON Schema object. * * See the [OpenAI guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format. * * Omitting parameters defines a function with an empty parameter list. */ parameters?: OpenAIFunctionParameters; } export interface CreateFunctionToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "function" for Function tool. */ type: "function"; /** * This determines if the tool is async. * * If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server. * * If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server. * * Defaults to synchronous (`false`). * @example false */ async?: boolean; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** This is the function definition of the tool. */ function?: OpenAIFunction; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GhlToolMetadata { workflowId?: string; locationId?: string; } export interface CreateGhlToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "ghl" for GHL tool. */ type: "ghl"; metadata: GhlToolMetadata; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface MakeToolMetadata { scenarioId?: number; triggerHookId?: number; } export interface CreateMakeToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "make" for Make tool. */ type: "make"; metadata: MakeToolMetadata; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CustomMessage { /** * This is an alternative to the `content` property. It allows to specify variants of the same content, one per language. * * Usage: * - If your assistants are multilingual, you can provide content for each language. * - If you don't provide content for a language, the first item in the array will be automatically translated to the active language at that moment. * * This will override the `content` property. */ contents?: TextContent[]; /** This is a custom message. */ type: "custom-message"; /** * This is the content that the assistant will say when this message is triggered. * @maxLength 1000 */ content?: string; } export interface TransferDestinationAssistant { /** * This is spoken to the customer before connecting them to the destination. * * Usage: * - If this is not provided and transfer tool messages is not provided, default is "Transferring the call now". * - If set to "", nothing is spoken. This is useful when you want to silently transfer. This is especially useful when transferring between assistants in a squad. In this scenario, you likely also want to set `assistant.firstMessageMode=assistant-speaks-first-with-model-generated-message` for the destination assistant. * * This accepts a string or a ToolMessageStart class. Latter is useful if you want to specify multiple messages for different languages through the `contents` field. */ message?: string | CustomMessage; type: "assistant"; /** * This is the mode to use for the transfer. Defaults to `rolling-history`. * * - `rolling-history`: This is the default mode. It keeps the entire conversation history and appends the new assistant's system message on transfer. * * Example: * * Pre-transfer: * system: assistant1 system message * assistant: assistant1 first message * user: hey, good morning * assistant: how can i help? * user: i need help with my account * assistant: (destination.message) * * Post-transfer: * system: assistant1 system message * assistant: assistant1 first message * user: hey, good morning * assistant: how can i help? * user: i need help with my account * assistant: (destination.message) * system: assistant2 system message * assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`) * * - `swap-system-message-in-history`: This replaces the original system message with the new assistant's system message on transfer. * * Example: * * Pre-transfer: * system: assistant1 system message * assistant: assistant1 first message * user: hey, good morning * assistant: how can i help? * user: i need help with my account * assistant: (destination.message) * * Post-transfer: * system: assistant2 system message * assistant: assistant1 first message * user: hey, good morning * assistant: how can i help? * user: i need help with my account * assistant: (destination.message) * assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`) * * - `delete-history`: This deletes the entire conversation history on transfer. * * Example: * * Pre-transfer: * system: assistant1 system message * assistant: assistant1 first message * user: hey, good morning * assistant: how can i help? * user: i need help with my account * assistant: (destination.message) * * Post-transfer: * system: assistant2 system message * assistant: assistant2 first message * user: Yes, please * assistant: how can i help? * user: i need help with my account * * - `swap-system-message-in-history-and-remove-transfer-tool-messages`: This replaces the original system message with the new assistant's system message on transfer and removes transfer tool messages from conversation history sent to the LLM. * * Example: * * Pre-transfer: * system: assistant1 system message * assistant: assistant1 first message * user: hey, good morning * assistant: how can i help? * user: i need help with my account * transfer-tool * transfer-tool-result * assistant: (destination.message) * * Post-transfer: * system: assistant2 system message * assistant: assistant1 first message * user: hey, good morning * assistant: how can i help? * user: i need help with my account * assistant: (destination.message) * assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`) * * @default 'rolling-history' */ transferMode?: | "rolling-history" | "swap-system-message-in-history" | "swap-system-message-in-history-and-remove-transfer-tool-messages" | "delete-history"; /** This is the assistant to transfer the call to. */ assistantName: string; /** This is the description of the destination, used by the AI to choose when and how to transfer the call. */ description?: string; } export interface TransferFallbackPlan { /** This is the message the assistant will deliver to the customer if the transfer fails. */ message: string | CustomMessage; /** * This controls what happens after delivering the failure message to the customer. * - true: End the call after delivering the failure message (default) * - false: Keep the assistant on the call to continue handling the customer's request * * @default true * @default true */ endCallEnabled?: boolean; } export interface TransferAssistantModel { /** The model provider for the transfer assistant */ provider: "openai" | "anthropic" | "google" | "custom-llm"; /** * The model name - must be compatible with the selected provider * @example "gpt-4o" */ model: string; /** * These are the messages used to configure the transfer assistant. * * @default: ``` * [ * { * role: 'system', * content: 'You are a transfer assistant designed to facilitate call transfers. Your core responsibility is to manage the transfer process efficiently.\n\n## Core Responsibility\n- Facilitate the transfer process by using transferSuccessful or transferCancel tools appropriately\n\n## When to Respond\n- Answer questions about the transfer process or provide summaries when specifically asked by the operator\n- Respond to direct questions about the current transfer situation\n\n## What to Avoid\n- Do not discuss topics unrelated to the transfer\n- Do not engage in general conversation\n- Keep all interactions focused on facilitating the transfer\n\n## Transfer Tools\n- Use transferSuccessful when the transfer should proceed\n- Use transferCancel when the transfer cannot be completed\n\nStay focused on your core responsibility of facilitating transfers.' * } * ]``` * * **Default Behavior:** If you don't provide any messages or don't include a system message as the first message, the default system message above will be automatically added. * * **Override Default:** To replace the default system message, provide your own system message as the first message in the array. * * **Add Context:** You can provide additional messages (user, assistant, etc.) to add context while keeping the default system message, or combine them with your custom system message. */ messages?: any[]; /** * Tools available to the transfer assistant during warm-transfer-experimental. * * **Default Behavior:** The transfer assistant will ALWAYS have both `transferSuccessful` and `transferCancel` tools automatically added, regardless of what you provide here. * * **Default Tools:** * - `transferSuccessful`: "Call this function to confirm the transfer is successful and connect the customer. Use this when you detect a human has answered and is ready to take the call." * - `transferCancel`: "Call this function to cancel the transfer when no human answers or transfer should not proceed. Use this when you detect voicemail, busy signal, or no answer." * * **Customization:** You can override the default tools by providing `transferSuccessful` and/or `transferCancel` tools with custom `function` or `messages` configurations. * * **Additional Tools:** You can also provide other tools, but the two transfer tools will always be present and available to the assistant. */ tools?: any[]; } export interface RegexOption { /** * This is the type of the regex option. Options are: * - `ignore-case`: Ignores the case of the text being matched. Add * - `whole-word`: Matches whole words only. * - `multi-line`: Matches across multiple lines. */ type: "ignore-case" | "whole-word" | "multi-line"; /** * This is whether to enable the option. * * @default false */ enabled: boolean; } export interface AssistantCustomEndpointingRule { /** * This endpointing rule is based on the last assistant message before customer started speaking. * * Flow: * - Assistant speaks * - Customer starts speaking * - Customer transcription comes in * - This rule is evaluated on the last assistant message * - If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds` * * Usage: * - If you have yes/no questions in your use case like "are you interested in a loan?", you can set a shorter timeout. * - If you have questions where the customer may pause to look up information like "what's my account number?", you can set a longer timeout. */ type: "assistant"; /** * This is the regex pattern to match. * * Note: * - This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test("hello there")` will return `true`. * * Hot tip: * - In JavaScript, escape `\` when sending the regex pattern. Eg. `"hello\sthere"` will be sent over the wire as `"hellosthere"`. Send `"hello\\sthere"` instead. * - `RegExp.test` does substring matching, so `/cat/.test("I love cats")` will return `true`. To do full string matching, send "^cat$". */ regex: string; /** * These are the options for the regex match. Defaults to all disabled. * * @default [] */ regexOptions?: RegexOption[]; /** * This is the endpointing timeout in seconds, if the rule is matched. * @min 0 * @max 15 */ timeoutSeconds: number; } export interface CustomerCustomEndpointingRule { /** * This endpointing rule is based on current customer message as they are speaking. * * Flow: * - Assistant speaks * - Customer starts speaking * - Customer transcription comes in * - This rule is evaluated on the current customer transcription * - If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds` * * Usage: * - If you want to wait longer while customer is speaking numbers, you can set a longer timeout. */ type: "customer"; /** * This is the regex pattern to match. * * Note: * - This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test("hello there")` will return `true`. * * Hot tip: * - In JavaScript, escape `\` when sending the regex pattern. Eg. `"hello\sthere"` will be sent over the wire as `"hellosthere"`. Send `"hello\\sthere"` instead. * - `RegExp.test` does substring matching, so `/cat/.test("I love cats")` will return `true`. To do full string matching, send "^cat$". */ regex: string; /** * These are the options for the regex match. Defaults to all disabled. * * @default [] */ regexOptions?: RegexOption[]; /** * This is the endpointing timeout in seconds, if the rule is matched. * @min 0 * @max 15 */ timeoutSeconds: number; } export interface BothCustomEndpointingRule { /** * This endpointing rule is based on both the last assistant message and the current customer message as they are speaking. * * Flow: * - Assistant speaks * - Customer starts speaking * - Customer transcription comes in * - This rule is evaluated on the last assistant message and the current customer transcription * - If assistant message matches `assistantRegex` AND customer message matches `customerRegex`, the endpointing timeout is set to `timeoutSeconds` * * Usage: * - If you want to wait longer while customer is speaking numbers, you can set a longer timeout. */ type: "both"; /** * This is the regex pattern to match the assistant's message. * * Note: * - This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test("hello there")` will return `true`. * * Hot tip: * - In JavaScript, escape `\` when sending the regex pattern. Eg. `"hello\sthere"` will be sent over the wire as `"hellosthere"`. Send `"hello\\sthere"` instead. * - `RegExp.test` does substring matching, so `/cat/.test("I love cats")` will return `true`. To do full string matching, send "^cat$". */ assistantRegex: string; /** * These are the options for the assistant's message regex match. Defaults to all disabled. * * @default [] */ assistantRegexOptions?: RegexOption[]; customerRegex: string; /** * These are the options for the customer's message regex match. Defaults to all disabled. * * @default [] */ customerRegexOptions?: RegexOption[]; /** * This is the endpointing timeout in seconds, if the rule is matched. * @min 0 * @max 15 */ timeoutSeconds: number; } export interface VapiSmartEndpointingPlan { /** * This is the provider for the smart endpointing plan. * @example "vapi" */ provider: "vapi" | "livekit" | "custom-endpointing-model"; } export interface LivekitSmartEndpointingPlan { /** * This is the provider for the smart endpointing plan. * @example "livekit" */ provider: "vapi" | "livekit" | "custom-endpointing-model"; /** * This expression describes how long the bot will wait to start speaking based on the likelihood that the user has reached an endpoint. * * This is a millisecond valued function. It maps probabilities (real numbers on [0,1]) to milliseconds that the bot should wait before speaking ([0, \infty]). Any negative values that are returned are set to zero (the bot can't start talking in the past). * * A probability of zero represents very high confidence that the caller has stopped speaking, and would like the bot to speak to them. A probability of one represents very high confidence that the caller is still speaking. * * Under the hood, this is parsed into a mathjs expression. Whatever you use to write your expression needs to be valid with respect to mathjs * * @default "20 + 500 * sqrt(x) + 2500 * x^3" */ waitFunction?: string; } export interface CustomEndpointingModelSmartEndpointingPlan { /** * This is the provider for the smart endpointing plan. Use `custom-endpointing-model` for custom endpointing providers that are not natively supported. * @example "custom-endpointing-model" */ provider: "vapi" | "livekit" | "custom-endpointing-model"; /** * This is where the endpointing request will be sent. If not provided, will be sent to `assistant.server`. If that does not exist either, will be sent to `org.server`. * * Request Example: * * POST https://{server.url} * Content-Type: application/json * * { * "message": { * "type": "call.endpointing.request", * "messages": [ * { * "role": "user", * "message": "Hello, how are you?", * "time": 1234567890, * "secondsFromStart": 0 * } * ], * ...other metadata about the call... * } * } * * Response Expected: * { * "timeoutSeconds": 0.5 * } * * The timeout is the number of seconds to wait before considering the user's speech as finished. The endpointing timeout is automatically reset each time a new transcript is received (and another `call.endpointing.request` is sent). */ server?: Server; } export interface TranscriptionEndpointingPlan { /** * The minimum number of seconds to wait after transcription ending with punctuation before sending a request to the model. Defaults to 0.1. * * This setting exists because the transcriber punctuates the transcription when it's more confident that customer has completed a thought. * * @default 0.1 * @min 0 * @max 3 * @example 0.1 */ onPunctuationSeconds?: number; /** * The minimum number of seconds to wait after transcription ending without punctuation before sending a request to the model. Defaults to 1.5. * * This setting exists to catch the cases where the transcriber was not confident enough to punctuate the transcription, but the customer is done and has been silent for a long time. * * @default 1.5 * @min 0 * @max 3 * @example 1.5 */ onNoPunctuationSeconds?: number; /** * The minimum number of seconds to wait after transcription ending with a number before sending a request to the model. Defaults to 0.4. * * This setting exists because the transcriber will sometimes punctuate the transcription ending with a number, even though the customer hasn't uttered the full number. This happens commonly for long numbers when the customer reads the number in chunks. * * @default 0.5 * @min 0 * @max 3 * @example 0.5 */ onNumberSeconds?: number; } export interface StartSpeakingPlan { /** * This is how long assistant waits before speaking. Defaults to 0.4. * * This is the minimum it will wait but if there is latency is the pipeline, this minimum will be exceeded. This is intended as a stopgap in case the pipeline is moving too fast. * * Example: * - If model generates tokens and voice generates bytes within 100ms, the pipeline still waits 300ms before outputting speech. * * Usage: * - If the customer is taking long pauses, set this to a higher value. * - If the assistant is accidentally jumping in too much, set this to a higher value. * * @default 0.4 * @min 0 * @max 5 * @example 0.4 */ waitSeconds?: number; /** * @deprecated * @example false */ smartEndpointingEnabled?: boolean | "livekit"; /** * This is the plan for smart endpointing. Pick between Vapi smart endpointing, LiveKit, or custom endpointing model (or nothing). We strongly recommend using livekit endpointing when working in English. LiveKit endpointing is not supported in other languages, yet. * * If this is set, it will override and take precedence over `transcriptionEndpointingPlan`. * This plan will still be overridden by any matching `customEndpointingRules`. * * If this is not set, the system will automatically use the transcriber's built-in endpointing capabilities if available. */ smartEndpointingPlan?: | VapiSmartEndpointingPlan | LivekitSmartEndpointingPlan | CustomEndpointingModelSmartEndpointingPlan; /** * These are the custom endpointing rules to set an endpointing timeout based on a regex on the customer's speech or the assistant's last message. * * Usage: * - If you have yes/no questions like "are you interested in a loan?", you can set a shorter timeout. * - If you have questions where the customer may pause to look up information like "what's my account number?", you can set a longer timeout. * - If you want to wait longer while customer is enumerating a list of numbers, you can set a longer timeout. * * These rules have the highest precedence and will override both `smartEndpointingPlan` and `transcriptionEndpointingPlan` when a rule is matched. * * The rules are evaluated in order and the first one that matches will be used. * * Order of precedence for endpointing: * 1. customEndpointingRules (if any match) * 2. smartEndpointingPlan (if set) * 3. transcriptionEndpointingPlan * * @default [] */ customEndpointingRules?: ( | AssistantCustomEndpointingRule | CustomerCustomEndpointingRule | BothCustomEndpointingRule )[]; /** * This determines how a customer speech is considered done (endpointing) using the transcription of customer's speech. * * Once an endpoint is triggered, the request is sent to `assistant.model`. * * Note: This plan is only used if `smartEndpointingPlan` is not set and transcriber does not have built-in endpointing capabilities. If both are provided, `smartEndpointingPlan` takes precedence. * This plan will also be overridden by any matching `customEndpointingRules`. */ transcriptionEndpointingPlan?: TranscriptionEndpointingPlan; } export interface TransferAssistant { /** * Optional name for the transfer assistant * @maxLength 100 * @default "transfer-assistant" * @example "Sales Transfer Assistant" */ name?: string; /** Model configuration for the transfer assistant */ model: TransferAssistantModel; /** These are the options for the transfer assistant's voice. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** These are the options for the transfer assistant's transcriber. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** * This is the first message that the transfer assistant will say. * This can also be a URL to a custom audio file. * * If unspecified, assistant will wait for user to speak and use the model to respond once they speak. * @example "Hello! I understand you need to be transferred. Let me connect you." */ firstMessage?: string; /** * This is the background sound in the transfer assistant call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** * This is the plan for when the transfer assistant should start talking. * * You should configure this if the transfer assistant needs different endpointing behavior than the base assistant. * * If this is not set, the transfer assistant will inherit the start speaking plan from the base assistant. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the mode for the first message. Default is 'assistant-speaks-first'. * * Use: * - 'assistant-speaks-first' to have the assistant speak first. * - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. * - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. * * @default 'assistant-speaks-first' * @example "assistant-speaks-first" */ firstMessageMode?: | "assistant-speaks-first" | "assistant-speaks-first-with-model-generated-message" | "assistant-waits-for-user"; /** * This is the maximum duration in seconds for the transfer assistant conversation. * After this time, the transfer will be cancelled automatically. * @default 120 * @min 10 * @max 43200 * @example 120 */ maxDurationSeconds?: number; /** * This is the number of seconds of silence to wait before ending the call. Defaults to 30. * * @default 30 * @min 10 * @max 3600 */ silenceTimeoutSeconds?: number; } export interface TransferCancelToolUserEditable { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "transferCancel" for Transfer Cancel tool. This tool can only be used during warm-transfer-experimental by the transfer assistant to cancel an ongoing transfer and return the call back to the original assistant when the transfer cannot be completed. */ type: "transferCancel"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface TransferSuccessfulToolUserEditable { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "transferSuccessful" for Transfer Successful tool. This tool can only be used during warm-transfer-experimental by the transfer assistant to confirm that the transfer should proceed and finalize the handoff to the destination. */ type: "transferSuccessful"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface SummaryPlan { /** * These are the messages used to generate the summary. * * @default: ``` * [ * { * "role": "system", * "content": "You are an expert note-taker. You will be given a transcript of a call. Summarize the call in 2-3 sentences. DO NOT return anything except the summary." * }, * { * "role": "user", * "content": "Here is the transcript:\n\n{{transcript}}\n\n. Here is the ended reason of the call:\n\n{{endedReason}}\n\n" * } * ]``` * * You can customize by providing any messages you want. * * Here are the template variables available: * - {{transcript}}: The transcript of the call from `call.artifact.transcript` * - {{systemPrompt}}: The system prompt of the call from `assistant.model.messages[type=system].content` * - {{messages}}: The messages of the call from `assistant.model.messages` * - {{endedReason}}: The ended reason of the call from `call.endedReason` */ messages?: object[]; /** * This determines whether a summary is generated and stored in `call.analysis.summary`. Defaults to true. * * Usage: * - If you want to disable the summary, set this to false. * * @default true */ enabled?: boolean; /** * This is how long the request is tried before giving up. When request times out, `call.analysis.summary` will be empty. * * Usage: * - To guarantee the summary is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond. * * @default 5 seconds * @min 1 * @max 60 */ timeoutSeconds?: number; } export interface TransferPlan { /** * This configures how transfer is executed and the experience of the destination party receiving the call. * * Usage: * - `blind-transfer`: The assistant forwards the call to the destination without any message or summary. * - `blind-transfer-add-summary-to-sip-header`: The assistant forwards the call to the destination and adds a SIP header X-Transfer-Summary to the call to include the summary. * - `warm-transfer-say-message`: The assistant dials the destination, delivers the `message` to the destination party, connects the customer, and leaves the call. * - `warm-transfer-say-summary`: The assistant dials the destination, provides a summary of the call to the destination party, connects the customer, and leaves the call. * - `warm-transfer-wait-for-operator-to-speak-first-and-then-say-message`: The assistant dials the destination, waits for the operator to speak, delivers the `message` to the destination party, and then connects the customer. * - `warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary`: The assistant dials the destination, waits for the operator to speak, provides a summary of the call to the destination party, and then connects the customer. * - `warm-transfer-twiml`: The assistant dials the destination, executes the twiml instructions on the destination call leg, connects the customer, and leaves the call. * - `warm-transfer-experimental`: The assistant puts the customer on hold, dials the destination, and if the destination answers (and is human), delivers a message or summary before connecting the customer. If the destination is unreachable or not human (e.g., with voicemail detection), the assistant delivers the `fallbackMessage` to the customer and optionally ends the call. * * @default 'blind-transfer' */ mode: | "blind-transfer" | "blind-transfer-add-summary-to-sip-header" | "warm-transfer-say-message" | "warm-transfer-say-summary" | "warm-transfer-twiml" | "warm-transfer-wait-for-operator-to-speak-first-and-then-say-message" | "warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary" | "warm-transfer-experimental"; /** * This is the message the assistant will deliver to the destination party before connecting the customer. * * Usage: * - Used only when `mode` is `blind-transfer-add-summary-to-sip-header`, `warm-transfer-say-message`, `warm-transfer-wait-for-operator-to-speak-first-and-then-say-message`, or `warm-transfer-experimental`. */ message?: string | CustomMessage; /** * This is the timeout in seconds for the warm-transfer-wait-for-operator-to-speak-first-and-then-say-message/summary * * @default 60 * @min 1 * @max 600 * @default 60 */ timeout?: number; /** * This specifies the SIP verb to use while transferring the call. * - 'refer': Uses SIP REFER to transfer the call (default) * - 'bye': Ends current call with SIP BYE * - 'dial': Uses SIP DIAL to transfer the call * @default "refer" */ sipVerb?: "refer" | "bye" | "dial"; /** * This sets the timeout for the dial operation in seconds. This is the duration the call will ring before timing out. * * Only applicable when `sipVerb='dial'`. Not applicable for SIP REFER or BYE. * * @default 60 * @min 1 * @max 600 * @default 60 */ dialTimeout?: number; /** * This is the URL to an audio file played while the customer is on hold during transfer. * * Usage: * - Used only when `mode` is `warm-transfer-experimental`. * - Used when transferring calls to play hold audio for the customer. * - Must be a publicly accessible URL to an audio file. * - Supported formats: MP3 and WAV. * - If not provided, the default hold audio will be used. */ holdAudioUrl?: string; /** * This is the URL to an audio file played after the warm transfer message or summary is delivered to the destination party. * It can be used to play a custom sound like 'beep' to notify that the transfer is complete. * * Usage: * - Used only when `mode` is `warm-transfer-experimental`. * - Used when transferring calls to play hold audio for the destination party. * - Must be a publicly accessible URL to an audio file. * - Supported formats: MP3 and WAV. */ transferCompleteAudioUrl?: string; /** * This is the plan for manipulating the message context before initiating the warm transfer. * Usage: * - Used only when `mode` is `warm-transfer-experimental`. * - These messages will automatically be added to the transferAssistant's system message. * - If 'none', we will not add any transcript to the transferAssistant's system message. * - If you want to provide your own messages, use transferAssistant.model.messages instead. * * @default { type: 'all' } */ contextEngineeringPlan?: | ContextEngineeringPlanLastNMessages | ContextEngineeringPlanNone | ContextEngineeringPlanAll; /** * This is the TwiML instructions to execute on the destination call leg before connecting the customer. * * Usage: * - Used only when `mode` is `warm-transfer-twiml`. * - Supports only `Play`, `Say`, `Gather`, `Hangup` and `Pause` verbs. * - Maximum length is 4096 characters. * * Example: * ``` * Hello, transferring a customer to you. * * They called about billing questions. * ``` * @maxLength 4096 */ twiml?: string; /** * This is the plan for generating a summary of the call to present to the destination party. * * Usage: * - Used only when `mode` is `blind-transfer-add-summary-to-sip-header` or `warm-transfer-say-summary` or `warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary` or `warm-transfer-experimental`. */ summaryPlan?: SummaryPlan; /** * This flag includes the sipHeaders from above in the refer to sip uri as url encoded query params. * * @default false */ sipHeadersInReferToEnabled?: boolean; /** * This configures the fallback plan when the transfer fails (destination unreachable, busy, or not human). * * Usage: * - Used only when `mode` is `warm-transfer-experimental`. * - If not provided when using `warm-transfer-experimental`, a default message will be used. */ fallbackPlan?: TransferFallbackPlan; } export interface TransferDestinationNumber { /** * This is spoken to the customer before connecting them to the destination. * * Usage: * - If this is not provided and transfer tool messages is not provided, default is "Transferring the call now". * - If set to "", nothing is spoken. This is useful when you want to silently transfer. This is especially useful when transferring between assistants in a squad. In this scenario, you likely also want to set `assistant.firstMessageMode=assistant-speaks-first-with-model-generated-message` for the destination assistant. * * This accepts a string or a ToolMessageStart class. Latter is useful if you want to specify multiple messages for different languages through the `contents` field. */ message?: string | CustomMessage; type: "number"; /** * This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it. * * Use cases: * - `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks. * - `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls. * * If `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\+?[a-zA-Z0-9]+$/`). * * @default true (E164 check is enabled) * @default true */ numberE164CheckEnabled?: boolean; /** * This is the phone number to transfer the call to. * @minLength 3 * @maxLength 40 */ number: string; /** * This is the extension to dial after transferring the call to the `number`. * @minLength 1 * @maxLength 10 */ extension?: string; /** * This is the caller ID to use when transferring the call to the `number`. * * Usage: * - If not provided, the caller ID will be the number the call is coming from. Example, +14151111111 calls in to and the assistant transfers out to +16470000000. +16470000000 will see +14151111111 as the caller. * - To change this behavior, provide a `callerId`. * - Set to '{{customer.number}}' to always use the customer's number as the caller ID. * - Set to '{{phoneNumber.number}}' to always use the phone number of the assistant as the caller ID. * - Set to any E164 number to always use that number as the caller ID. This needs to be a number that is owned or verified by your Transport provider like Twilio. * * For Twilio, you can read up more here: https://www.twilio.com/docs/voice/twiml/dial#callerid * @maxLength 40 */ callerId?: string; /** * This configures how transfer is executed and the experience of the destination party receiving the call. Defaults to `blind-transfer`. * * @default `transferPlan.mode='blind-transfer'` */ transferPlan?: TransferPlan; /** This is the description of the destination, used by the AI to choose when and how to transfer the call. */ description?: string; } export interface TransferDestinationSip { /** * This is spoken to the customer before connecting them to the destination. * * Usage: * - If this is not provided and transfer tool messages is not provided, default is "Transferring the call now". * - If set to "", nothing is spoken. This is useful when you want to silently transfer. This is especially useful when transferring between assistants in a squad. In this scenario, you likely also want to set `assistant.firstMessageMode=assistant-speaks-first-with-model-generated-message` for the destination assistant. * * This accepts a string or a ToolMessageStart class. Latter is useful if you want to specify multiple messages for different languages through the `contents` field. */ message?: string | CustomMessage; type: "sip"; /** This is the SIP URI to transfer the call to. */ sipUri: string; /** * This configures how transfer is executed and the experience of the destination party receiving the call. Defaults to `blind-transfer`. * * @default `transferPlan.mode='blind-transfer'` */ transferPlan?: TransferPlan; /** These are custom headers to be added to SIP refer during transfer call. */ sipHeaders?: object; /** This is the description of the destination, used by the AI to choose when and how to transfer the call. */ description?: string; } export interface CreateTransferCallToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; type: "transferCall"; /** These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called. */ destinations?: ( | TransferDestinationAssistant | TransferDestinationNumber | TransferDestinationSip )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface ContextEngineeringPlanLastNMessages { type: "lastNMessages"; /** * This is the maximum number of messages to include in the context engineering plan. * @min 0 */ maxMessages: number; } export interface ContextEngineeringPlanNone { type: "none"; } export interface ContextEngineeringPlanAll { type: "all"; } export interface VariableExtractionAlias { /** * This is the key of the variable. * * This variable will be accessible during the call as `{{key}}` and stored in `call.artifact.variableValues` after the call. * * Rules: * - Must start with a letter (a-z, A-Z). * - Subsequent characters can be letters, numbers, or underscores. * - Minimum length of 1 and maximum length of 40. * @minLength 1 * @maxLength 40 * @pattern /^[a-zA-Z][a-zA-Z0-9_]*$/ */ key: string; /** * This is the value of the variable. * * This can reference existing variables, use filters, and perform transformations. * * Examples: "{{name}}", "{{customer.email}}", "Hello {{name | upcase}}" * @maxLength 10000 */ value: string; } export interface VariableExtractionPlan { /** * This is the schema to extract. * * Examples: * 1. To extract object properties, you can use the following schema: * ```json * { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * } * } * } * ``` * * These will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables. * * 2. To extract nested properties, you can use the following schema: * ```json * { * "type": "object", * "properties": { * "name": { * "type": "object", * "properties": { * "first": { * "type": "string" * }, * "last": { * "type": "string" * } * } * } * } * } * ``` * * These will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible. * * 3. To extract array items, you can use the following schema: * ```json * { * "type": "array", * "title": "zipCodes", * "items": { * "type": "string" * } * } * ``` * * This will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`. * * 4. To extract array of objects, you can use the following schema: * * ```json * { * "type": "array", * "name": "people", * "items": { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * }, * "zipCodes": { * "type": "array", * "items": { * "type": "string" * } * } * } * } * } * ``` * * This will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`. */ schema?: JsonSchema; /** * These are additional variables to create. * * These will be accessible during the call as `{{key}}` and stored in `call.artifact.variableValues` after the call. * * Example: * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{name}}" * }, * { * "key": "fullName", * "value": "{{firstName}} {{lastName}}" * }, * { * "key": "greeting", * "value": "Hello {{name}}, welcome to {{company}}!" * }, * { * "key": "customerCity", * "value": "{{addresses[0].city}}" * }, * { * "key": "something", * "value": "{{any liquid}}" * } * ] * } * ``` * * This will create variables `customerName`, `fullName`, `greeting`, `customerCity`, and `something`. To access these variables, you can reference them as `{{customerName}}`, `{{fullName}}`, `{{greeting}}`, `{{customerCity}}`, and `{{something}}`. */ aliases?: VariableExtractionAlias[]; } export interface HandoffDestinationAssistant { type: "assistant"; /** This is the plan for manipulating the message context before handing off the call to the next assistant. */ contextEngineeringPlan?: | ContextEngineeringPlanLastNMessages | ContextEngineeringPlanNone | ContextEngineeringPlanAll; /** This is the assistant to transfer the call to. You must provide either assistantName or assistantId. */ assistantName?: string; /** This is the assistant id to transfer the call to. You must provide either assistantName or assistantId. */ assistantId?: string; /** This is a transient assistant to transfer the call to. You may provide a transient assistant in the response `handoff-destination-request` in a dynamic handoff. */ assistant?: CreateAssistantDTO; /** This is the variable extraction plan for the handoff tool. */ variableExtractionPlan?: VariableExtractionPlan; /** These are the assistant overrides to apply to the destination assistant. */ assistantOverrides?: AssistantOverrides; /** This is the description of the destination, used by the AI to choose when and how to transfer the call. */ description?: string; } export interface HandoffDestinationDynamic { type: "dynamic"; /** * This is where Vapi will send the handoff-destination-request webhook in a dynamic handoff. * * The order of precedence is: * * 1. tool.server.url * 2. assistant.server.url * 3. phoneNumber.server.url * 4. org.server.url */ server?: Server; /** This is the description of the destination, used by the AI to choose when and how to transfer the call. */ description?: string; } export interface CreateHandoffToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the type of the tool. * When you're using handoff tool, we recommend adding this to your system prompt * --- * # System context * * You are part of a multi-agent system designed to make agent coordination and execution easy. Agents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate. Handoffs are achieved by calling a handoff function, generally named `handoff_to_`. Handoffs between agents are handled seamlessly in the background; do not mention or draw attention to these handoffs in your conversation with the user. * * # Agent context * * {put your agent system prompt here} * --- */ type: "handoff"; /** * These are the destinations that the call can be handed off to. * * Usage: * 1. Single destination * * Use `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad. * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", // or "assistantName": "Assistant123" * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 2. Multiple destinations * * 2.1. Multiple Tools, Each With One Destination (OpenAI recommended) * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * ], * }, * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 2.2. One Tool, Multiple Destinations (Anthropic recommended) * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 3. Dynamic destination * * 3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object. * VAPI will send a handoff-destination-request webhook to the `server.url`. * The response from the server will be used as the destination (if valid). * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "dynamic", * "server": { * "url": "https://example.com" * } * } * ], * } * ] * } * ``` * * 3.2. To pass custom parameters to the server, you can use the `function` object. * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "dynamic", * "server": { * "url": "https://example.com" * }, * } * ], * "function": { * "name": "handoff", * "description": "Call this function when the customer is ready to be handed off to the next assistant", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "description": "Use dynamic when customer is ready to be handed off to the next assistant", * "enum": ["dynamic"] * }, * "customerAreaCode": { * "type": "number", * "description": "Area code of the customer" * }, * "customerIntent": { * "type": "string", * "enum": ["new-customer", "existing-customer"], * "description": "Use new-customer when customer is a new customer, existing-customer when customer is an existing customer" * }, * "customerSentiment": { * "type": "string", * "enum": ["positive", "negative", "neutral"], * "description": "Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral" * } * } * } * } * } * ] * } * ``` * * The properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body. */ destinations?: (HandoffDestinationAssistant | HandoffDestinationDynamic)[]; /** * This is the optional function definition that will be passed to the LLM. * If this is not defined, we will construct this based on the other properties. * * For example, given the following tools definition: * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * We will construct the following function definition: * ```json * { * "function": { * "name": "handoff_to_assistant-123", * "description": " * Use this function to handoff the call to the next assistant. * Only use it when instructions explicitly ask you to use the handoff_to_assistant function. * DO NOT call this function unless you are instructed to do so. * Here are the destinations you can handoff the call to: * 1. assistant-123. When: customer wants to be handed off to assistant-123 * 2. assistant-456. When: customer wants to be handed off to assistant-456 * ", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "description": "Options: assistant-123 (customer wants to be handed off to assistant-123), assistant-456 (customer wants to be handed off to assistant-456)", * "enum": ["assistant-123", "assistant-456"] * }, * }, * "required": ["destination"] * } * } * } * ``` * * To override this function, please provide an OpenAI function definition and refer to it in the system prompt. * You may override parts of the function definition (i.e. you may only want to change the function name for your prompt). * If you choose to override the function parameters, it must include `destination` as a required parameter, and it must evaluate to either an assistantId, assistantName, or a the string literal `dynamic`. * * To pass custom parameters to the server in a dynamic handoff, you can use the function parameters, with `dynamic` as the destination. * ```json * { * "function": { * "name": "dynamic_handoff", * "description": " * Call this function when the customer is ready to be handed off to the next assistant * ", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "enum": ["dynamic"] * }, * "customerAreaCode": { * "type": "number", * "description": "Area code of the customer" * }, * "customerIntent": { * "type": "string", * "enum": ["new-customer", "existing-customer"], * "description": "Use new-customer when customer is a new customer, existing-customer when customer is an existing customer" * }, * "customerSentiment": { * "type": "string", * "enum": ["positive", "negative", "neutral"], * "description": "Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral" * } * }, * "required": ["destination", "customerAreaCode", "customerIntent", "customerSentiment"] * } * } * } * ``` */ function?: OpenAIFunction; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateCustomKnowledgeBaseDTO { /** This knowledge base is bring your own knowledge base implementation. */ provider: "custom-knowledge-base"; /** * This is where the knowledge base request will be sent. * * Request Example: * * POST https://{server.url} * Content-Type: application/json * * { * "messsage": { * "type": "knowledge-base-request", * "messages": [ * { * "role": "user", * "content": "Why is ocean blue?" * } * ], * ...other metadata about the call... * } * } * * Response Expected: * ``` * { * "message": { * "role": "assistant", * "content": "The ocean is blue because water absorbs everything but blue.", * }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK * "documents": [ * { * "content": "The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.", * "similarity": 1 * }, * { * "content": "Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.", * "similarity": .5 * } * ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL * } * ``` */ server: Server; } export interface KnowledgeBase { /** * The name of the knowledge base * @example "My Knowledge Base" */ name: string; /** * The provider of the knowledge base * @example "google" */ provider: "google"; /** The model to use for the knowledge base */ model?: | "gemini-2.5-pro" | "gemini-2.5-flash" | "gemini-2.5-flash-lite" | "gemini-2.0-flash-thinking-exp" | "gemini-2.0-pro-exp-02-05" | "gemini-2.0-flash" | "gemini-2.0-flash-lite" | "gemini-2.0-flash-exp" | "gemini-2.0-flash-realtime-exp" | "gemini-1.5-flash" | "gemini-1.5-flash-002" | "gemini-1.5-pro" | "gemini-1.5-pro-002" | "gemini-1.0-pro"; /** A description of the knowledge base */ description: string; /** The file IDs associated with this knowledge base */ fileIds: string[]; } export interface CreateQueryToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "query" for Query tool. */ type: "query"; /** The knowledge bases to query */ knowledgeBases?: KnowledgeBase[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateGoogleCalendarCreateEventToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "google.calendar.event.create" for Google Calendar Create Event tool. */ type: "google.calendar.event.create"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateGoogleSheetsRowAppendToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "google.sheets.row.append" for Google Sheets Row Append tool. */ type: "google.sheets.row.append"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateGoogleCalendarCheckAvailabilityToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "google.calendar.availability.check" for Google Calendar Check Availability tool. */ type: "google.calendar.availability.check"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateSlackSendMessageToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "slack.message.send" for Slack Send Message tool. */ type: "slack.message.send"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface McpToolMetadata { /** This is the protocol used for MCP communication. Defaults to Streamable HTTP. */ protocol?: "sse" | "shttp"; } export interface CreateMcpToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "mcp" for MCP tool. */ type: "mcp"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; metadata?: McpToolMetadata; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateGoHighLevelCalendarAvailabilityToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.calendar.availability.check" for GoHighLevel Calendar Availability Check tool. */ type: "gohighlevel.calendar.availability.check"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateGoHighLevelCalendarEventCreateToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.calendar.event.create" for GoHighLevel Calendar Event Create tool. */ type: "gohighlevel.calendar.event.create"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateGoHighLevelContactCreateToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.contact.create" for GoHighLevel Contact Create tool. */ type: "gohighlevel.contact.create"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateGoHighLevelContactGetToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.contact.get" for GoHighLevel Contact Get tool. */ type: "gohighlevel.contact.get"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface OpenAIMessage { /** @maxLength 100000000 */ content: string | null; role: "assistant" | "function" | "user" | "system" | "tool"; } export interface AnyscaleModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; provider: "anyscale"; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: string; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface AnthropicThinkingConfig { type: "enabled"; /** * The maximum number of tokens to allocate for thinking. * Must be between 1024 and 100000 tokens. * @min 1024 * @max 100000 */ budgetTokens: number; } export interface AnthropicModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** The specific Anthropic/Claude model that will be used. */ model: | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-3-5-sonnet-20240620" | "claude-3-5-sonnet-20241022" | "claude-3-5-haiku-20241022" | "claude-3-7-sonnet-20250219" | "claude-opus-4-20250514" | "claude-opus-4-5-20251101" | "claude-sonnet-4-20250514" | "claude-sonnet-4-5-20250929" | "claude-haiku-4-5-20251001"; /** The provider identifier for Anthropic. */ provider: "anthropic"; /** * Optional configuration for Anthropic's thinking feature. * Only applicable for claude-3-7-sonnet-20250219 model. * If provided, maxTokens must be greater than thinking.budgetTokens. */ thinking?: AnthropicThinkingConfig; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface CerebrasModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: "llama3.1-8b" | "llama-3.3-70b"; provider: "cerebras"; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface CustomLLMModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** This is the provider that will be used for the model. Any service, including your own server, that is compatible with the OpenAI API can be used. */ provider: "custom-llm"; /** * This determines whether metadata is sent in requests to the custom provider. * * - `off` will not send any metadata. payload will look like `{ messages }` * - `variable` will send `assistant.metadata` as a variable on the payload. payload will look like `{ messages, metadata }` * - `destructured` will send `assistant.metadata` fields directly on the payload. payload will look like `{ messages, ...metadata }` * * Further, `variable` and `destructured` will send `call`, `phoneNumber`, and `customer` objects in the payload. * * Default is `variable`. */ metadataSendMode?: "off" | "variable" | "destructured"; /** * Custom headers to send with requests. These headers can override default OpenAI headers except for Authorization (which should be specified using a custom-llm credential). * @example {"X-Custom-Header":"value"} */ headers?: Record; /** These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1 */ url: string; /** * This determines whether the transcriber's word level confidence is sent in requests to the custom provider. Default is false. * This only works for Deepgram transcribers. */ wordLevelConfidenceEnabled?: boolean; /** * This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds. * @min 0 * @max 300 */ timeoutSeconds?: number; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: string; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface DeepInfraModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; provider: "deepinfra"; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: string; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface DeepSeekModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: "deepseek-chat" | "deepseek-reasoner"; provider: "deep-seek"; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface GeminiMultimodalLivePrebuiltVoiceConfig { voiceName: "Puck" | "Charon" | "Kore" | "Fenrir" | "Aoede"; } export interface GeminiMultimodalLiveVoiceConfig { prebuiltVoiceConfig: GeminiMultimodalLivePrebuiltVoiceConfig; } export interface GeminiMultimodalLiveSpeechConfig { voiceConfig: GeminiMultimodalLiveVoiceConfig; } export interface GoogleRealtimeConfig { /** * This is the nucleus sampling parameter that controls the cumulative probability of tokens considered during text generation. * Only applicable with the Gemini Flash 2.0 Multimodal Live API. */ topP?: number; /** * This is the top-k sampling parameter that limits the number of highest probability tokens considered during text generation. * Only applicable with the Gemini Flash 2.0 Multimodal Live API. */ topK?: number; /** * This is the presence penalty parameter that influences the model's likelihood to repeat information by penalizing tokens based on their presence in the text. * Only applicable with the Gemini Flash 2.0 Multimodal Live API. */ presencePenalty?: number; /** * This is the frequency penalty parameter that influences the model's likelihood to repeat tokens by penalizing them based on their frequency in the text. * Only applicable with the Gemini Flash 2.0 Multimodal Live API. */ frequencyPenalty?: number; /** * This is the speech configuration object that defines the voice settings to be used for the model's speech output. * Only applicable with the Gemini Flash 2.0 Multimodal Live API. */ speechConfig?: GeminiMultimodalLiveSpeechConfig; } export interface GoogleModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** This is the Google model that will be used. */ model: | "gemini-2.5-pro" | "gemini-2.5-flash" | "gemini-2.5-flash-lite" | "gemini-2.0-flash-thinking-exp" | "gemini-2.0-pro-exp-02-05" | "gemini-2.0-flash" | "gemini-2.0-flash-lite" | "gemini-2.0-flash-exp" | "gemini-2.0-flash-realtime-exp" | "gemini-1.5-flash" | "gemini-1.5-flash-002" | "gemini-1.5-pro" | "gemini-1.5-pro-002" | "gemini-1.0-pro"; provider: "google"; /** * This is the session configuration for the Gemini Flash 2.0 Multimodal Live API. * Only applicable if the model `gemini-2.0-flash-realtime-exp` is selected. */ realtimeConfig?: GoogleRealtimeConfig; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface GroqModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: | "openai/gpt-oss-20b" | "openai/gpt-oss-120b" | "deepseek-r1-distill-llama-70b" | "llama-3.3-70b-versatile" | "llama-3.1-405b-reasoning" | "llama-3.1-8b-instant" | "llama3-8b-8192" | "llama3-70b-8192" | "gemma2-9b-it" | "moonshotai/kimi-k2-instruct-0905" | "meta-llama/llama-4-maverick-17b-128e-instruct" | "meta-llama/llama-4-scout-17b-16e-instruct" | "mistral-saba-24b" | "compound-beta" | "compound-beta-mini"; provider: "groq"; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface InflectionAIModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: "inflection_3_pi"; provider: "inflection-ai"; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface OpenAIModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** This is the provider that will be used for the model. */ provider: "openai"; /** * This is the OpenAI model that will be used. * * When using Vapi OpenAI or your own Azure Credentials, you have the option to specify the region for the selected model. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest region that make sense. * This is helpful when you are required to comply with Data Residency rules. Learn more about Azure regions here https://azure.microsoft.com/en-us/explore/global-infrastructure/data-residency/. * * @default undefined */ model: | "gpt-5.1" | "gpt-5.1-chat-latest" | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gpt-4.1-2025-04-14" | "gpt-4.1-mini-2025-04-14" | "gpt-4.1-nano-2025-04-14" | "gpt-4.1" | "gpt-4.1-mini" | "gpt-4.1-nano" | "chatgpt-4o-latest" | "o3" | "o3-mini" | "o4-mini" | "o1-mini" | "o1-mini-2024-09-12" | "gpt-4o-realtime-preview-2024-10-01" | "gpt-4o-realtime-preview-2024-12-17" | "gpt-4o-mini-realtime-preview-2024-12-17" | "gpt-realtime-2025-08-28" | "gpt-4o-mini-2024-07-18" | "gpt-4o-mini" | "gpt-4o" | "gpt-4o-2024-05-13" | "gpt-4o-2024-08-06" | "gpt-4o-2024-11-20" | "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-turbo-preview" | "gpt-4-0125-preview" | "gpt-4-1106-preview" | "gpt-4" | "gpt-4-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0613" | "gpt-4.1-2025-04-14:westus" | "gpt-4.1-2025-04-14:eastus2" | "gpt-4.1-2025-04-14:eastus" | "gpt-4.1-2025-04-14:westus3" | "gpt-4.1-2025-04-14:northcentralus" | "gpt-4.1-2025-04-14:southcentralus" | "gpt-4.1-mini-2025-04-14:westus" | "gpt-4.1-mini-2025-04-14:eastus2" | "gpt-4.1-mini-2025-04-14:eastus" | "gpt-4.1-mini-2025-04-14:westus3" | "gpt-4.1-mini-2025-04-14:northcentralus" | "gpt-4.1-mini-2025-04-14:southcentralus" | "gpt-4.1-nano-2025-04-14:westus" | "gpt-4.1-nano-2025-04-14:eastus2" | "gpt-4.1-nano-2025-04-14:westus3" | "gpt-4.1-nano-2025-04-14:northcentralus" | "gpt-4.1-nano-2025-04-14:southcentralus" | "gpt-4o-2024-11-20:swedencentral" | "gpt-4o-2024-11-20:westus" | "gpt-4o-2024-11-20:eastus2" | "gpt-4o-2024-11-20:eastus" | "gpt-4o-2024-11-20:westus3" | "gpt-4o-2024-11-20:southcentralus" | "gpt-4o-2024-08-06:westus" | "gpt-4o-2024-08-06:westus3" | "gpt-4o-2024-08-06:eastus" | "gpt-4o-2024-08-06:eastus2" | "gpt-4o-2024-08-06:northcentralus" | "gpt-4o-2024-08-06:southcentralus" | "gpt-4o-mini-2024-07-18:westus" | "gpt-4o-mini-2024-07-18:westus3" | "gpt-4o-mini-2024-07-18:eastus" | "gpt-4o-mini-2024-07-18:eastus2" | "gpt-4o-mini-2024-07-18:northcentralus" | "gpt-4o-mini-2024-07-18:southcentralus" | "gpt-4o-2024-05-13:eastus2" | "gpt-4o-2024-05-13:eastus" | "gpt-4o-2024-05-13:northcentralus" | "gpt-4o-2024-05-13:southcentralus" | "gpt-4o-2024-05-13:westus3" | "gpt-4o-2024-05-13:westus" | "gpt-4-turbo-2024-04-09:eastus2" | "gpt-4-0125-preview:eastus" | "gpt-4-0125-preview:northcentralus" | "gpt-4-0125-preview:southcentralus" | "gpt-4-1106-preview:australia" | "gpt-4-1106-preview:canadaeast" | "gpt-4-1106-preview:france" | "gpt-4-1106-preview:india" | "gpt-4-1106-preview:norway" | "gpt-4-1106-preview:swedencentral" | "gpt-4-1106-preview:uk" | "gpt-4-1106-preview:westus" | "gpt-4-1106-preview:westus3" | "gpt-4-0613:canadaeast" | "gpt-3.5-turbo-0125:canadaeast" | "gpt-3.5-turbo-0125:northcentralus" | "gpt-3.5-turbo-0125:southcentralus" | "gpt-3.5-turbo-1106:canadaeast" | "gpt-3.5-turbo-1106:westus"; /** * These are the fallback models that will be used if the primary model fails. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest fallbacks that make sense. * @example ["gpt-4-0125-preview","gpt-4-0613"] */ fallbackModels?: | "gpt-5.1" | "gpt-5.1-chat-latest" | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gpt-4.1-2025-04-14" | "gpt-4.1-mini-2025-04-14" | "gpt-4.1-nano-2025-04-14" | "gpt-4.1" | "gpt-4.1-mini" | "gpt-4.1-nano" | "chatgpt-4o-latest" | "o3" | "o3-mini" | "o4-mini" | "o1-mini" | "o1-mini-2024-09-12" | "gpt-4o-realtime-preview-2024-10-01" | "gpt-4o-realtime-preview-2024-12-17" | "gpt-4o-mini-realtime-preview-2024-12-17" | "gpt-realtime-2025-08-28" | "gpt-4o-mini-2024-07-18" | "gpt-4o-mini" | "gpt-4o" | "gpt-4o-2024-05-13" | "gpt-4o-2024-08-06" | "gpt-4o-2024-11-20" | "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-turbo-preview" | "gpt-4-0125-preview" | "gpt-4-1106-preview" | "gpt-4" | "gpt-4-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0613" | "gpt-4.1-2025-04-14:westus" | "gpt-4.1-2025-04-14:eastus2" | "gpt-4.1-2025-04-14:eastus" | "gpt-4.1-2025-04-14:westus3" | "gpt-4.1-2025-04-14:northcentralus" | "gpt-4.1-2025-04-14:southcentralus" | "gpt-4.1-mini-2025-04-14:westus" | "gpt-4.1-mini-2025-04-14:eastus2" | "gpt-4.1-mini-2025-04-14:eastus" | "gpt-4.1-mini-2025-04-14:westus3" | "gpt-4.1-mini-2025-04-14:northcentralus" | "gpt-4.1-mini-2025-04-14:southcentralus" | "gpt-4.1-nano-2025-04-14:westus" | "gpt-4.1-nano-2025-04-14:eastus2" | "gpt-4.1-nano-2025-04-14:westus3" | "gpt-4.1-nano-2025-04-14:northcentralus" | "gpt-4.1-nano-2025-04-14:southcentralus" | "gpt-4o-2024-11-20:swedencentral" | "gpt-4o-2024-11-20:westus" | "gpt-4o-2024-11-20:eastus2" | "gpt-4o-2024-11-20:eastus" | "gpt-4o-2024-11-20:westus3" | "gpt-4o-2024-11-20:southcentralus" | "gpt-4o-2024-08-06:westus" | "gpt-4o-2024-08-06:westus3" | "gpt-4o-2024-08-06:eastus" | "gpt-4o-2024-08-06:eastus2" | "gpt-4o-2024-08-06:northcentralus" | "gpt-4o-2024-08-06:southcentralus" | "gpt-4o-mini-2024-07-18:westus" | "gpt-4o-mini-2024-07-18:westus3" | "gpt-4o-mini-2024-07-18:eastus" | "gpt-4o-mini-2024-07-18:eastus2" | "gpt-4o-mini-2024-07-18:northcentralus" | "gpt-4o-mini-2024-07-18:southcentralus" | "gpt-4o-2024-05-13:eastus2" | "gpt-4o-2024-05-13:eastus" | "gpt-4o-2024-05-13:northcentralus" | "gpt-4o-2024-05-13:southcentralus" | "gpt-4o-2024-05-13:westus3" | "gpt-4o-2024-05-13:westus" | "gpt-4-turbo-2024-04-09:eastus2" | "gpt-4-0125-preview:eastus" | "gpt-4-0125-preview:northcentralus" | "gpt-4-0125-preview:southcentralus" | "gpt-4-1106-preview:australia" | "gpt-4-1106-preview:canadaeast" | "gpt-4-1106-preview:france" | "gpt-4-1106-preview:india" | "gpt-4-1106-preview:norway" | "gpt-4-1106-preview:swedencentral" | "gpt-4-1106-preview:uk" | "gpt-4-1106-preview:westus" | "gpt-4-1106-preview:westus3" | "gpt-4-0613:canadaeast" | "gpt-3.5-turbo-0125:canadaeast" | "gpt-3.5-turbo-0125:northcentralus" | "gpt-3.5-turbo-0125:southcentralus" | "gpt-3.5-turbo-1106:canadaeast" | "gpt-3.5-turbo-1106:westus"; /** * Azure OpenAI doesn't support `maxLength` right now https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/structured-outputs?tabs=python-secure%2Cdotnet-entra-id&pivots=programming-language-csharp#unsupported-type-specific-keywords. Need to strip. * * - `strip-parameters-with-unsupported-validation` will strip parameters with unsupported validation. * - `strip-unsupported-validation` will keep the parameters but strip unsupported validation. * * @default `strip-unsupported-validation` */ toolStrictCompatibilityMode?: | "strip-parameters-with-unsupported-validation" | "strip-unsupported-validation"; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface OpenRouterModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; provider: "openrouter"; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: string; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface PerplexityAIModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; provider: "perplexity-ai"; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: string; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface TogetherAIModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; provider: "together-ai"; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: string; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface HangupNode { type: "hangup"; /** @maxLength 80 */ name: string; /** This is whether or not the node is the start of the workflow. */ isStart?: boolean; /** This is for metadata you want to store on the task. */ metadata?: object; } export interface WorkflowOpenAIModel { /** This is the provider of the model (`openai`). */ provider: "openai"; /** * This is the OpenAI model that will be used. * * When using Vapi OpenAI or your own Azure Credentials, you have the option to specify the region for the selected model. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest region that make sense. * This is helpful when you are required to comply with Data Residency rules. Learn more about Azure regions here https://azure.microsoft.com/en-us/explore/global-infrastructure/data-residency/. * @maxLength 100 */ model: | "gpt-5.1" | "gpt-5.1-chat-latest" | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gpt-4.1-2025-04-14" | "gpt-4.1-mini-2025-04-14" | "gpt-4.1-nano-2025-04-14" | "gpt-4.1" | "gpt-4.1-mini" | "gpt-4.1-nano" | "chatgpt-4o-latest" | "o3" | "o3-mini" | "o4-mini" | "o1-mini" | "o1-mini-2024-09-12" | "gpt-4o-mini-2024-07-18" | "gpt-4o-mini" | "gpt-4o" | "gpt-4o-2024-05-13" | "gpt-4o-2024-08-06" | "gpt-4o-2024-11-20" | "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-turbo-preview" | "gpt-4-0125-preview" | "gpt-4-1106-preview" | "gpt-4" | "gpt-4-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0613" | "gpt-4.1-2025-04-14:westus" | "gpt-4.1-2025-04-14:eastus2" | "gpt-4.1-2025-04-14:eastus" | "gpt-4.1-2025-04-14:westus3" | "gpt-4.1-2025-04-14:northcentralus" | "gpt-4.1-2025-04-14:southcentralus" | "gpt-4.1-mini-2025-04-14:westus" | "gpt-4.1-mini-2025-04-14:eastus2" | "gpt-4.1-mini-2025-04-14:eastus" | "gpt-4.1-mini-2025-04-14:westus3" | "gpt-4.1-mini-2025-04-14:northcentralus" | "gpt-4.1-mini-2025-04-14:southcentralus" | "gpt-4.1-nano-2025-04-14:westus" | "gpt-4.1-nano-2025-04-14:eastus2" | "gpt-4.1-nano-2025-04-14:westus3" | "gpt-4.1-nano-2025-04-14:northcentralus" | "gpt-4.1-nano-2025-04-14:southcentralus" | "gpt-4o-2024-11-20:swedencentral" | "gpt-4o-2024-11-20:westus" | "gpt-4o-2024-11-20:eastus2" | "gpt-4o-2024-11-20:eastus" | "gpt-4o-2024-11-20:westus3" | "gpt-4o-2024-11-20:southcentralus" | "gpt-4o-2024-08-06:westus" | "gpt-4o-2024-08-06:westus3" | "gpt-4o-2024-08-06:eastus" | "gpt-4o-2024-08-06:eastus2" | "gpt-4o-2024-08-06:northcentralus" | "gpt-4o-2024-08-06:southcentralus" | "gpt-4o-mini-2024-07-18:westus" | "gpt-4o-mini-2024-07-18:westus3" | "gpt-4o-mini-2024-07-18:eastus" | "gpt-4o-mini-2024-07-18:eastus2" | "gpt-4o-mini-2024-07-18:northcentralus" | "gpt-4o-mini-2024-07-18:southcentralus" | "gpt-4o-2024-05-13:eastus2" | "gpt-4o-2024-05-13:eastus" | "gpt-4o-2024-05-13:northcentralus" | "gpt-4o-2024-05-13:southcentralus" | "gpt-4o-2024-05-13:westus3" | "gpt-4o-2024-05-13:westus" | "gpt-4-turbo-2024-04-09:eastus2" | "gpt-4-0125-preview:eastus" | "gpt-4-0125-preview:northcentralus" | "gpt-4-0125-preview:southcentralus" | "gpt-4-1106-preview:australia" | "gpt-4-1106-preview:canadaeast" | "gpt-4-1106-preview:france" | "gpt-4-1106-preview:india" | "gpt-4-1106-preview:norway" | "gpt-4-1106-preview:swedencentral" | "gpt-4-1106-preview:uk" | "gpt-4-1106-preview:westus" | "gpt-4-1106-preview:westus3" | "gpt-4-0613:canadaeast" | "gpt-3.5-turbo-0125:canadaeast" | "gpt-3.5-turbo-0125:northcentralus" | "gpt-3.5-turbo-0125:southcentralus" | "gpt-3.5-turbo-1106:canadaeast" | "gpt-3.5-turbo-1106:westus"; /** * This is the temperature of the model. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * @min 50 * @max 10000 */ maxTokens?: number; } export interface WorkflowAnthropicModel { /** This is the provider of the model (`anthropic`). */ provider: "anthropic"; /** * This is the specific model that will be used. * @maxLength 100 */ model: | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-3-5-sonnet-20240620" | "claude-3-5-sonnet-20241022" | "claude-3-5-haiku-20241022" | "claude-3-7-sonnet-20250219" | "claude-opus-4-20250514" | "claude-opus-4-5-20251101" | "claude-sonnet-4-20250514" | "claude-sonnet-4-5-20250929" | "claude-haiku-4-5-20251001"; /** * This is the optional configuration for Anthropic's thinking feature. * * - If provided, `maxTokens` must be greater than `thinking.budgetTokens`. */ thinking?: AnthropicThinkingConfig; /** * This is the temperature of the model. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * @min 50 * @max 10000 */ maxTokens?: number; } export interface WorkflowGoogleModel { /** This is the provider of the model (`google`). */ provider: "google"; /** * This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b * @maxLength 100 */ model: | "gemini-2.5-pro" | "gemini-2.5-flash" | "gemini-2.5-flash-lite" | "gemini-2.0-flash-thinking-exp" | "gemini-2.0-pro-exp-02-05" | "gemini-2.0-flash" | "gemini-2.0-flash-lite" | "gemini-2.0-flash-exp" | "gemini-2.0-flash-realtime-exp" | "gemini-1.5-flash" | "gemini-1.5-flash-002" | "gemini-1.5-pro" | "gemini-1.5-pro-002" | "gemini-1.0-pro"; /** * This is the temperature of the model. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * @min 50 * @max 10000 */ maxTokens?: number; } export interface WorkflowCustomModel { /** This is the provider of the model (`custom-llm`). */ provider: "custom-llm"; /** * This determines whether metadata is sent in requests to the custom provider. * * - `off` will not send any metadata. payload will look like `{ messages }` * - `variable` will send `assistant.metadata` as a variable on the payload. payload will look like `{ messages, metadata }` * - `destructured` will send `assistant.metadata` fields directly on the payload. payload will look like `{ messages, ...metadata }` * * Further, `variable` and `destructured` will send `call`, `phoneNumber`, and `customer` objects in the payload. * * Default is `variable`. */ metadataSendMode?: "off" | "variable" | "destructured"; /** These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1 */ url: string; /** These are the headers we'll use for the OpenAI client's `headers`. */ headers?: object; /** * This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds. * @min 20 * @max 600 */ timeoutSeconds?: number; /** * This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b * @maxLength 100 */ model: string; /** * This is the temperature of the model. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * @min 50 * @max 10000 */ maxTokens?: number; } export interface GlobalNodePlan { /** * This is the flag to determine if this node is a global node * * @default false * @default false */ enabled?: boolean; /** * This is the condition that will be checked to determine if the global node should be executed. * * @default '' * @maxLength 1000 * @default "" */ enterCondition?: string; } export interface ConversationNode { /** * This is the Conversation node. This can be used to start a conversation with the customer. * * The flow is: * - Workflow starts the conversation node * - Model is active with the `prompt` and global context. * - Model will call a tool to exit this node. * - Workflow will extract variables from the conversation. * - Workflow continues. */ type: "conversation"; /** * This is the model for the node. * * This overrides `workflow.model`. */ model?: | WorkflowOpenAIModel | WorkflowAnthropicModel | WorkflowGoogleModel | WorkflowCustomModel; /** * This is the transcriber for the node. * * This overrides `workflow.transcriber`. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** * This is the voice for the node. * * This overrides `workflow.voice`. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * These are the tools that the conversation node can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the conversation node can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** @maxLength 5000 */ prompt?: string; /** This is the plan for the global node. */ globalNodePlan?: GlobalNodePlan; /** * This is the plan that controls the variable extraction from the user's responses. * * Usage: * Use `schema` to specify what you want to extract from the user's responses. * ```json * { * "schema": { * "type": "object", * "properties": { * "user": { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * } * } * } * } * } * } * ``` * * This will be extracted as `{{ user.name }}` and `{{ user.age }}` respectively. * * (Optional) Use `aliases` to create new variables. * * ```json * { * "aliases": [ * { * "key": "userAge", * "value": "{{user.age}}" * }, * { * "key": "userName", * "value": "{{user.name}}" * } * ] * } * ``` * * This will be extracted as `{{ userAge }}` and `{{ userName }}` respectively. * * Note: The `schema` field is required for Conversation nodes if you want to extract variables from the user's responses. `aliases` is just a convenience. */ variableExtractionPlan?: VariableExtractionPlan; /** @maxLength 80 */ name: string; /** This is whether or not the node is the start of the workflow. */ isStart?: boolean; /** This is for metadata you want to store on the task. */ metadata?: object; } export interface ToolNode { /** * This is the Tool node. This can be used to call a tool in your workflow. * * The flow is: * - Workflow starts the tool node * - Model is called to extract parameters needed by the tool from the conversation history * - Tool is called with the parameters * - Server returns a response * - Workflow continues with the response */ type: "tool"; /** This is the tool to call. To use an existing tool, send `toolId` instead. */ tool?: | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO; /** This is the tool to call. To use a transient tool, send `tool` instead. */ toolId?: string; /** @maxLength 80 */ name: string; /** This is whether or not the node is the start of the workflow. */ isStart?: boolean; /** This is for metadata you want to store on the task. */ metadata?: object; } export interface VoicemailDetectionBackoffPlan { /** * This is the number of seconds to wait before starting the first retry attempt. * @min 0 * @default 5 */ startAtSeconds?: number; /** * This is the interval in seconds between retry attempts. * @min 2.5 * @default 5 */ frequencySeconds?: number; /** * This is the maximum number of retry attempts before giving up. * @min 1 * @max 10 * @default 6 */ maxRetries?: number; } export interface GoogleVoicemailDetectionPlan { /** * This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message * * - If we detect a voicemail beep before this, we will speak the message at that point. * * - Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case. * * @default 30 * @min 0 * @max 60 * @min 0 * @max 30 * @default 30 */ beepMaxAwaitSeconds?: number; /** This is the provider to use for voicemail detection. */ provider: "google"; /** This is the backoff plan for the voicemail detection. */ backoffPlan?: VoicemailDetectionBackoffPlan; /** * This is the detection type to use for voicemail detection. * - 'audio': Uses native audio models (default) * - 'transcript': Uses ASR/transcript-based detection * @default 'audio' (audio detection) */ type?: "audio" | "transcript"; } export interface OpenAIVoicemailDetectionPlan { /** * This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message * * - If we detect a voicemail beep before this, we will speak the message at that point. * * - Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case. * * @default 30 * @min 0 * @max 60 * @min 0 * @max 30 * @default 30 */ beepMaxAwaitSeconds?: number; /** This is the provider to use for voicemail detection. */ provider: "openai"; /** This is the backoff plan for the voicemail detection. */ backoffPlan?: VoicemailDetectionBackoffPlan; /** * This is the detection type to use for voicemail detection. * - 'audio': Uses native audio models (default) * - 'transcript': Uses ASR/transcript-based detection * @default 'audio' (audio detection) */ type?: "audio" | "transcript"; } export interface TwilioVoicemailDetectionPlan { /** This is the provider to use for voicemail detection. */ provider: "twilio"; /** * These are the AMD messages from Twilio that are considered as voicemail. Default is ['machine_end_beep', 'machine_end_silence']. * * @default {Array} ['machine_end_beep', 'machine_end_silence'] * @example ["machine_end_beep","machine_end_silence"] */ voicemailDetectionTypes?: | "machine_start" | "human" | "fax" | "unknown" | "machine_end_beep" | "machine_end_silence" | "machine_end_other"; /** * This sets whether the assistant should detect voicemail. Defaults to true. * * @default true */ enabled?: boolean; /** * The number of seconds that Twilio should attempt to perform answering machine detection before timing out and returning AnsweredBy as unknown. Default is 30 seconds. * * Increasing this value will provide the engine more time to make a determination. This can be useful when DetectMessageEnd is provided in the MachineDetection parameter and there is an expectation of long answering machine greetings that can exceed 30 seconds. * * Decreasing this value will reduce the amount of time the engine has to make a determination. This can be particularly useful when the Enable option is provided in the MachineDetection parameter and you want to limit the time for initial detection. * * Check the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info. * * @default 30 * @min 3 * @max 59 */ machineDetectionTimeout?: number; /** * The number of milliseconds that is used as the measuring stick for the length of the speech activity. Durations lower than this value will be interpreted as a human, longer as a machine. Default is 2400 milliseconds. * * Increasing this value will reduce the chance of a False Machine (detected machine, actually human) for a long human greeting (e.g., a business greeting) but increase the time it takes to detect a machine. * * Decreasing this value will reduce the chances of a False Human (detected human, actually machine) for short voicemail greetings. The value of this parameter may need to be reduced by more than 1000ms to detect very short voicemail greetings. A reduction of that significance can result in increased False Machine detections. Adjusting the MachineDetectionSpeechEndThreshold is likely the better approach for short voicemails. Decreasing MachineDetectionSpeechThreshold will also reduce the time it takes to detect a machine. * * Check the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info. * * @default 2400 * @min 1000 * @max 6000 */ machineDetectionSpeechThreshold?: number; /** * The number of milliseconds of silence after speech activity at which point the speech activity is considered complete. Default is 1200 milliseconds. * * Increasing this value will typically be used to better address the short voicemail greeting scenarios. For short voicemails, there is typically 1000-2000ms of audio followed by 1200-2400ms of silence and then additional audio before the beep. Increasing the MachineDetectionSpeechEndThreshold to ~2500ms will treat the 1200-2400ms of silence as a gap in the greeting but not the end of the greeting and will result in a machine detection. The downsides of such a change include: * - Increasing the delay for human detection by the amount you increase this parameter, e.g., a change of 1200ms to 2500ms increases human detection delay by 1300ms. * - Cases where a human has two utterances separated by a period of silence (e.g. a "Hello", then 2000ms of silence, and another "Hello") may be interpreted as a machine. * * Decreasing this value will result in faster human detection. The consequence is that it can lead to increased False Human (detected human, actually machine) detections because a silence gap in a voicemail greeting (not necessarily just in short voicemail scenarios) can be incorrectly interpreted as the end of speech. * * Check the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info. * * @default 1200 * @min 500 * @max 5000 */ machineDetectionSpeechEndThreshold?: number; /** * The number of milliseconds of initial silence after which an unknown AnsweredBy result will be returned. Default is 5000 milliseconds. * * Increasing this value will result in waiting for a longer period of initial silence before returning an 'unknown' AMD result. * * Decreasing this value will result in waiting for a shorter period of initial silence before returning an 'unknown' AMD result. * * Check the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info. * * @default 5000 * @min 2000 * @max 10000 */ machineDetectionSilenceTimeout?: number; } export interface VapiVoicemailDetectionPlan { /** * This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message * * - If we detect a voicemail beep before this, we will speak the message at that point. * * - Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case. * * @default 30 * @min 0 * @max 60 * @min 0 * @max 30 * @default 30 */ beepMaxAwaitSeconds?: number; /** This is the provider to use for voicemail detection. */ provider: "vapi"; /** This is the backoff plan for the voicemail detection. */ backoffPlan?: VoicemailDetectionBackoffPlan; /** * This is the detection type to use for voicemail detection. * - 'audio': Uses native audio models (default) * - 'transcript': Uses ASR/transcript-based detection * @default 'audio' (audio detection) */ type?: "audio" | "transcript"; } export interface TransferHookAction { /** This is the type of action - must be "transfer" */ type: "transfer"; /** This is the destination details for the transfer - can be a phone number or SIP URI */ destination?: TransferDestinationNumber | TransferDestinationSip; } export interface FunctionCallHookAction { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "function" for Function tool. */ type: "function"; /** * This determines if the tool is async. * * If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server. * * If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server. * * Defaults to synchronous (`false`). * @example false */ async?: boolean; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** This is the function definition of the tool. */ function?: OpenAIFunction; } export interface SayHookAction { /** This is the type of action - must be "say" */ type: "say"; /** * This is the prompt for the assistant to generate a response based on existing conversation. * Can be a string or an array of chat messages. */ prompt?: | string | ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** This is the message to say */ exact?: object; } export interface CallHookFilter { /** * This is the type of filter - currently only "oneOf" is supported * @maxLength 1000 */ type: "oneOf"; /** * This is the key to filter on (e.g. "call.endedReason") * @maxLength 1000 */ key: string; /** This is the array of possible values to match against */ oneOf: string[]; } export interface CallHookCallEnding { /** * This is the event that triggers this hook * @maxLength 1000 */ on: "call.ending"; /** This is the set of actions to perform when the hook triggers */ do: ToolCallHookAction[]; /** This is the set of filters that must match for the hook to trigger */ filters?: CallHookFilter[]; } export interface CallHookAssistantSpeechInterrupted { /** * This is the event that triggers this hook * @maxLength 1000 */ on: "assistant.speech.interrupted"; /** This is the set of actions to perform when the hook triggers */ do: (SayHookAction | ToolCallHookAction)[]; } export interface CallHookCustomerSpeechInterrupted { /** * This is the event that triggers this hook * @maxLength 1000 */ on: "customer.speech.interrupted"; /** This is the set of actions to perform when the hook triggers */ do: (SayHookAction | ToolCallHookAction)[]; } export interface ToolCallHookAction { /** This is the type of action - must be "tool" */ type: "tool"; /** This is the tool to call. To use an existing tool, send `toolId` instead. */ tool?: | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO; /** This is the tool to call. To use a transient tool, send `tool` instead. */ toolId?: string; } export interface CustomerSpeechTimeoutOptions { /** * This is the timeout in seconds before action is triggered. * The clock starts when the assistant finishes speaking and remains active until the user speaks. * * @default 7.5 * @min 1 * @max 1000 */ timeoutSeconds: number; /** * This is the maximum number of times the hook will trigger in a call. * * @default 3 * @min 1 * @max 10 */ triggerMaxCount?: number; /** * This is whether the counter for hook trigger resets the user speaks. * * @default never */ triggerResetMode?: object; } export interface CallHookCustomerSpeechTimeout { /** * Must be either "customer.speech.timeout" or match the pattern "customer.speech.timeout[property=value]" * @maxLength 1000 */ on: string; /** This is the set of actions to perform when the hook triggers */ do: (SayHookAction | ToolCallHookAction)[]; /** This is the set of filters that must match for the hook to trigger */ options?: CustomerSpeechTimeoutOptions; /** * This is the name of the hook, it can be set by the user to identify the hook. * If no name is provided, the hook will be auto generated as UUID. * * @default UUID * @maxLength 1000 */ name?: string; } export interface CallHookModelResponseTimeout { /** * This is the event that triggers this hook * @maxLength 1000 */ on: "model.response.timeout"; /** This is the set of actions to perform when the hook triggers */ do: (SayHookAction | ToolCallHookAction)[]; } export interface AIEdgeCondition { type: "ai"; /** * This is the prompt for the AI edge condition. It should evaluate to a boolean. * @maxLength 1000 */ prompt: string; } export interface Edge { condition?: AIEdgeCondition; /** @maxLength 80 */ from: string; /** @maxLength 80 */ to: string; /** This is for metadata you want to store on the edge. */ metadata?: object; } export interface RecordingConsentPlanStayOnLine { /** * This is the message asking for consent to record the call. * If the type is `stay-on-line`, the message should ask the user to hang up if they do not consent. * If the type is `verbal`, the message should ask the user to verbally consent or decline. * @maxLength 1000 */ message: string; /** * This is the voice to use for the consent message. If not specified, inherits from the assistant's voice. * Use a different voice for the consent message for a better user experience. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the type of recording consent plan. This type assumes consent is granted if the user stays on the line. * @example "stay-on-line" */ type: "stay-on-line"; /** * Number of seconds to wait before transferring to the assistant if user stays on the call * @min 1 * @max 6 * @default 3 * @example 3 */ waitSeconds?: number; } export interface RecordingConsentPlanVerbal { /** * This is the message asking for consent to record the call. * If the type is `stay-on-line`, the message should ask the user to hang up if they do not consent. * If the type is `verbal`, the message should ask the user to verbally consent or decline. * @maxLength 1000 */ message: string; /** * This is the voice to use for the consent message. If not specified, inherits from the assistant's voice. * Use a different voice for the consent message for a better user experience. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the type of recording consent plan. This type assumes consent is granted if the user verbally consents or declines. * @example "verbal" */ type: "verbal"; /** Tool to execute if user verbally declines recording consent */ declineTool?: object; /** ID of existing tool to execute if user verbally declines recording consent */ declineToolId?: string; } export type SecurityFilterBase = object; export interface SecurityFilterPlan { /** * Whether the security filter is enabled. * @default false * @default false */ enabled?: boolean; /** * Array of security filter types to apply. * If array is not empty, only those security filters are run. * @example "[{ type: "sql-injection" }, { type: "xss" }]" */ filters?: SecurityFilterBase[]; /** * Mode of operation when a security threat is detected. * - 'sanitize': Remove or replace the threatening content * - 'reject': Replace the entire transcript with replacement text * - 'replace': Replace threatening patterns with replacement text * @default 'sanitize' * @default "sanitize" */ mode?: "sanitize" | "reject" | "replace"; /** * Text to use when replacing filtered content. * @default '[FILTERED]' * @default "[FILTERED]" */ replacementText?: string; } export interface CompliancePlan { /** * When this is enabled, no logs, recordings, or transcriptions will be stored. * At the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false. * @example {"hipaaEnabled":false} */ hipaaEnabled?: boolean; /** * When this is enabled, the user will be restricted to use PCI-compliant providers, and no logs or transcripts are stored. * At the end of the call, you will receive an end-of-call-report message to store on your server. Defaults to false. * @example {"pciEnabled":false} */ pciEnabled?: boolean; /** This is the security filter plan for the assistant. It allows filtering of transcripts for security threats before sending to LLM. */ securityFilterPlan?: SecurityFilterPlan; recordingConsentPlan?: | ({ type: "stay-on-line"; } & RecordingConsentPlanStayOnLine) | ({ type: "verbal"; } & RecordingConsentPlanVerbal); } export interface StructuredDataPlan { /** * These are the messages used to generate the structured data. * * @default: ``` * [ * { * "role": "system", * "content": "You are an expert data extractor. You will be given a transcript of a call. Extract structured data per the JSON Schema. DO NOT return anything except the structured data.\n\nJson Schema:\\n{{schema}}\n\nOnly respond with the JSON." * }, * { * "role": "user", * "content": "Here is the transcript:\n\n{{transcript}}\n\n. Here is the ended reason of the call:\n\n{{endedReason}}\n\n" * } * ]``` * * You can customize by providing any messages you want. * * Here are the template variables available: * - {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{schema}}: the schema of the structured data from `structuredDataPlan.schema`- {{endedReason}}: the ended reason of the call from `call.endedReason` */ messages?: object[]; /** * This determines whether structured data is generated and stored in `call.analysis.structuredData`. Defaults to false. * * Usage: * - If you want to extract structured data, set this to true and provide a `schema`. * * @default false */ enabled?: boolean; /** * This is the schema of the structured data. The output is stored in `call.analysis.structuredData`. * * Complete guide on JSON Schema can be found [here](https://ajv.js.org/json-schema.html#json-data-type). */ schema?: JsonSchema; /** * This is how long the request is tried before giving up. When request times out, `call.analysis.structuredData` will be empty. * * Usage: * - To guarantee the structured data is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond. * * @default 5 seconds * @min 1 * @max 60 */ timeoutSeconds?: number; } export interface StructuredDataMultiPlan { /** This is the key of the structured data plan in the catalog. */ key: string; /** This is an individual structured data plan in the catalog. */ plan: StructuredDataPlan; } export interface SuccessEvaluationPlan { /** * This enforces the rubric of the evaluation. The output is stored in `call.analysis.successEvaluation`. * * Options include: * - 'NumericScale': A scale of 1 to 10. * - 'DescriptiveScale': A scale of Excellent, Good, Fair, Poor. * - 'Checklist': A checklist of criteria and their status. * - 'Matrix': A grid that evaluates multiple criteria across different performance levels. * - 'PercentageScale': A scale of 0% to 100%. * - 'LikertScale': A scale of Strongly Agree, Agree, Neutral, Disagree, Strongly Disagree. * - 'AutomaticRubric': Automatically break down evaluation into several criteria, each with its own score. * - 'PassFail': A simple 'true' if call passed, 'false' if not. * * Default is 'PassFail'. */ rubric?: | "NumericScale" | "DescriptiveScale" | "Checklist" | "Matrix" | "PercentageScale" | "LikertScale" | "AutomaticRubric" | "PassFail"; /** * These are the messages used to generate the success evaluation. * * @default: ``` * [ * { * "role": "system", * "content": "You are an expert call evaluator. You will be given a transcript of a call and the system prompt of the AI participant. Determine if the call was successful based on the objectives inferred from the system prompt. DO NOT return anything except the result.\n\nRubric:\\n{{rubric}}\n\nOnly respond with the result." * }, * { * "role": "user", * "content": "Here is the transcript:\n\n{{transcript}}\n\n" * }, * { * "role": "user", * "content": "Here was the system prompt of the call:\n\n{{systemPrompt}}\n\n. Here is the ended reason of the call:\n\n{{endedReason}}\n\n" * } * ]``` * * You can customize by providing any messages you want. * * Here are the template variables available: * - {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{rubric}}: the rubric of the success evaluation from `successEvaluationPlan.rubric`- {{endedReason}}: the ended reason of the call from `call.endedReason` */ messages?: object[]; /** * This determines whether a success evaluation is generated and stored in `call.analysis.successEvaluation`. Defaults to true. * * Usage: * - If you want to disable the success evaluation, set this to false. * * @default true */ enabled?: boolean; /** * This is how long the request is tried before giving up. When request times out, `call.analysis.successEvaluation` will be empty. * * Usage: * - To guarantee the success evaluation is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond. * * @default 5 seconds * @min 1 * @max 60 */ timeoutSeconds?: number; } export interface AnalysisPlan { /** * The minimum number of messages required to run the analysis plan. * If the number of messages is less than this, analysis will be skipped. * @default 2 * @min 0 */ minMessagesThreshold?: number; /** This is the plan for generating the summary of the call. This outputs to `call.analysis.summary`. */ summaryPlan?: SummaryPlan; /** This is the plan for generating the structured data from the call. This outputs to `call.analysis.structuredData`. */ structuredDataPlan?: StructuredDataPlan; /** This is an array of structured data plan catalogs. Each entry includes a `key` and a `plan` for generating the structured data from the call. This outputs to `call.analysis.structuredDataMulti`. */ structuredDataMultiPlan?: StructuredDataMultiPlan[]; /** This is the plan for generating the success evaluation of the call. This outputs to `call.analysis.successEvaluation`. */ successEvaluationPlan?: SuccessEvaluationPlan; /** * This is an array of outcome UUIDs to be calculated during analysis. * The outcomes will be calculated and stored in `call.analysis.outcomes`. */ outcomeIds?: string[]; } export interface TranscriptPlan { /** * This determines whether the transcript is stored in `call.artifact.transcript`. Defaults to true. * * @default true * @example true */ enabled?: boolean; /** * This is the name of the assistant in the transcript. Defaults to 'AI'. * * Usage: * - If you want to change the name of the assistant in the transcript, set this. Example, here is what the transcript would look like with `assistantName` set to 'Buyer': * ``` * User: Hello, how are you? * Buyer: I'm fine. * User: Do you want to buy a car? * Buyer: No. * ``` * * @default 'AI' */ assistantName?: string; /** * This is the name of the user in the transcript. Defaults to 'User'. * * Usage: * - If you want to change the name of the user in the transcript, set this. Example, here is what the transcript would look like with `userName` set to 'Seller': * ``` * Seller: Hello, how are you? * AI: I'm fine. * Seller: Do you want to buy a car? * AI: No. * ``` * * @default 'User' */ userName?: string; } export interface ScorecardMetric { /** * This is the unique identifier for the structured output that will be used to evaluate the scorecard. * The structured output must be of type number or boolean only for now. */ structuredOutputId: string; /** * These are the conditions that will be used to evaluate the scorecard. * Each condition will have a comparator, value, and points that will be used to calculate the final score. * The points will be added to the overall score if the condition is met. * The overall score will be normalized to a 100 point scale to ensure uniformity across different scorecards. */ conditions: object[]; } export interface CreateScorecardDTO { /** * This is the name of the scorecard. It is only for user reference and will not be used for any evaluation. * @maxLength 80 */ name?: string; /** * This is the description of the scorecard. It is only for user reference and will not be used for any evaluation. * @maxLength 500 */ description?: string; /** * These are the metrics that will be used to evaluate the scorecard. * Each metric will have a set of conditions and points that will be used to generate the score. */ metrics: ScorecardMetric[]; /** * These are the assistant IDs that this scorecard is linked to. * When linked to assistants, this scorecard will be available for evaluation during those assistants' calls. */ assistantIds?: string[]; } export interface ArtifactPlan { /** * This determines whether assistant's calls are recorded. Defaults to true. * * Usage: * - If you don't want to record the calls, set this to false. * - If you want to record the calls when `assistant.hipaaEnabled` (deprecated) or `assistant.compliancePlan.hipaaEnabled` explicity set this to true and make sure to provide S3 or GCP credentials on the Provider Credentials page in the Dashboard. * * You can find the recording at `call.artifact.recordingUrl` and `call.artifact.stereoRecordingUrl` after the call is ended. * * @default true * @example true */ recordingEnabled?: boolean; /** * This determines the format of the recording. Defaults to `wav;l16`. * * @default 'wav;l16' */ recordingFormat?: "wav;l16" | "mp3"; /** * This determines whether to use custom storage (S3 or GCP) for call recordings when storage credentials are configured. * * When set to false, recordings will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured. * * Usage: * - Set to false if you have custom storage configured but want to store recordings on Vapi's storage for this assistant. * - Set to true (or leave unset) to use your custom storage for recordings when available. * * @default true * @example true */ recordingUseCustomStorageEnabled?: boolean; /** * This determines whether the video is recorded during the call. Defaults to false. Only relevant for `webCall` type. * * You can find the video recording at `call.artifact.videoRecordingUrl` after the call is ended. * * @default false * @example false */ videoRecordingEnabled?: boolean; /** * This determines whether the artifact contains the full message history, even after handoff context engineering. Defaults to false. * @example false */ fullMessageHistoryEnabled?: boolean; /** * This determines whether the SIP packet capture is enabled. Defaults to true. Only relevant for `phone` type calls where phone number's provider is `vapi` or `byo-phone-number`. * * You can find the packet capture at `call.artifact.pcapUrl` after the call is ended. * * @default true * @example true */ pcapEnabled?: boolean; /** * This is the path where the SIP packet capture will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard. * * If credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it. * * Usage: * - If you want to upload the packet capture to a specific path, set this to the path. Example: `/my-assistant-captures`. * - If you want to upload the packet capture to the root of the bucket, set this to `/`. * * @default '/' * @example "/pcaps" */ pcapS3PathPrefix?: string; /** * This determines whether to use custom storage (S3 or GCP) for SIP packet captures when storage credentials are configured. * * When set to false, packet captures will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured. * * Usage: * - Set to false if you have custom storage configured but want to store packet captures on Vapi's storage for this assistant. * - Set to true (or leave unset) to use your custom storage for packet captures when available. * * @default true * @example true */ pcapUseCustomStorageEnabled?: boolean; /** * This determines whether the call logs are enabled. Defaults to true. * * @default true * @example true */ loggingEnabled?: boolean; /** * This determines whether to use custom storage (S3 or GCP) for call logs when storage credentials are configured. * * When set to false, logs will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured. * * Usage: * - Set to false if you have custom storage configured but want to store logs on Vapi's storage for this assistant. * - Set to true (or leave unset) to use your custom storage for logs when available. * * @default true * @example true */ loggingUseCustomStorageEnabled?: boolean; /** This is the plan for `call.artifact.transcript`. To disable, set `transcriptPlan.enabled` to false. */ transcriptPlan?: TranscriptPlan; /** * This is the path where the recording will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard. * * If credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it. * * Usage: * - If you want to upload the recording to a specific path, set this to the path. Example: `/my-assistant-recordings`. * - If you want to upload the recording to the root of the bucket, set this to `/`. * * @default '/' */ recordingPath?: string; /** * This is an array of structured output IDs to be calculated during the call. * The outputs will be extracted and stored in `call.artifact.structuredOutputs` after the call is ended. */ structuredOutputIds?: string[]; /** * This is an array of scorecard IDs that will be evaluated based on the structured outputs extracted during the call. * The scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended. */ scorecardIds?: string[]; /** * This is the array of scorecards that will be evaluated based on the structured outputs extracted during the call. * The scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended. */ scorecards?: CreateScorecardDTO[]; /** * This is the path where the call logs will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard. * * If credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it. * * Usage: * - If you want to upload the call logs to a specific path, set this to the path. Example: `/my-assistant-logs`. * - If you want to upload the call logs to the root of the bucket, set this to `/`. * * @default '/' */ loggingPath?: string; } export interface StopSpeakingPlan { /** * This is the number of words that the customer has to say before the assistant will stop talking. * * Words like "stop", "actually", "no", etc. will always interrupt immediately regardless of this value. * * Words like "okay", "yeah", "right" will never interrupt. * * When set to 0, `voiceSeconds` is used in addition to the transcriptions to determine the customer has started speaking. * * Defaults to 0. * * @default 0 * @min 0 * @max 10 * @example 0 */ numWords?: number; /** * This is the seconds customer has to speak before the assistant stops talking. This uses the VAD (Voice Activity Detection) spike to determine if the customer has started speaking. * * Considerations: * - A lower value might be more responsive but could potentially pick up non-speech sounds. * - A higher value reduces false positives but might slightly delay the detection of speech onset. * * This is only used if `numWords` is set to 0. * * Defaults to 0.2 * * @default 0.2 * @min 0 * @max 0.5 * @example 0.2 */ voiceSeconds?: number; /** * This is the seconds to wait before the assistant will start talking again after being interrupted. * * Defaults to 1. * * @default 1 * @min 0 * @max 10 * @example 1 */ backoffSeconds?: number; /** * These are the phrases that will never interrupt the assistant, even if numWords threshold is met. * These are typically acknowledgement or backchanneling phrases. * @default ["i understand","i see","i got it","i hear you","im listening","im with you","right","okay","ok","sure","alright","got it","understood","yeah","yes","uh-huh","mm-hmm","gotcha","mhmm","ah","yeah okay","yeah sure"] * @example ["i understand","i see","i got it","i hear you","im listening","im with you","right","okay","ok","sure","alright","got it","understood","yeah","yes","uh-huh","mm-hmm","gotcha","mhmm","ah","yeah okay","yeah sure"] */ acknowledgementPhrases?: string[]; /** * These are the phrases that will always interrupt the assistant immediately, regardless of numWords. * These are typically phrases indicating disagreement or desire to stop. * @default ["stop","shut","up","enough","quiet","silence","but","dont","not","no","hold","wait","cut","pause","nope","nah","nevermind","never","bad","actually"] * @example ["stop","shut","up","enough","quiet","silence","but","dont","not","no","hold","wait","cut","pause","nope","nah","nevermind","never","bad","actually"] */ interruptionPhrases?: string[]; } export interface MonitorPlan { /** * This determines whether the assistant's calls allow live listening. Defaults to true. * * Fetch `call.monitor.listenUrl` to get the live listening URL. * * @default true * @example false */ listenEnabled?: boolean; /** * This enables authentication on the `call.monitor.listenUrl`. * * If `listenAuthenticationEnabled` is `true`, the `call.monitor.listenUrl` will require an `Authorization: Bearer ` header. * * @default false * @example false */ listenAuthenticationEnabled?: boolean; /** * This determines whether the assistant's calls allow live control. Defaults to true. * * Fetch `call.monitor.controlUrl` to get the live control URL. * * To use, send any control message via a POST request to `call.monitor.controlUrl`. Here are the types of controls supported: https://docs.vapi.ai/api-reference/messages/client-inbound-message * * @default true * @example false */ controlEnabled?: boolean; /** * This enables authentication on the `call.monitor.controlUrl`. * * If `controlAuthenticationEnabled` is `true`, the `call.monitor.controlUrl` will require an `Authorization: Bearer ` header. * * @default false * @example false */ controlAuthenticationEnabled?: boolean; } export interface SmartDenoisingPlan { /** * Whether smart denoising using Krisp is enabled. * @default false */ enabled?: boolean; } export interface FourierDenoisingPlan { /** * Whether Fourier denoising is enabled. Note that this is experimental and may not work as expected. * @default false */ enabled?: boolean; /** * Whether automatic media detection is enabled. When enabled, the filter will automatically * detect consistent background TV/music/radio and switch to more aggressive filtering settings. * Only applies when enabled is true. * @default true * @example true */ mediaDetectionEnabled?: boolean; /** * Static threshold in dB used as fallback when no baseline is established. * @min -80 * @max 0 * @default -35 * @example -35 */ staticThreshold?: number; /** * How far below the rolling baseline to filter audio, in dB. * Lower values (e.g., -10) are more aggressive, higher values (e.g., -20) are more conservative. * @min -30 * @max -5 * @default -15 * @example -15 */ baselineOffsetDb?: number; /** * Rolling window size in milliseconds for calculating the audio baseline. * Larger windows adapt more slowly but are more stable. * @min 1000 * @max 30000 * @default 3000 * @example 3000 */ windowSizeMs?: number; /** * Percentile to use for baseline calculation (1-99). * Higher percentiles (e.g., 85) focus on louder speech, lower percentiles (e.g., 50) include quieter speech. * @min 1 * @max 99 * @default 85 * @example 85 */ baselinePercentile?: number; } export interface BackgroundSpeechDenoisingPlan { /** Whether smart denoising using Krisp is enabled. */ smartDenoisingPlan?: SmartDenoisingPlan; /** * Whether Fourier denoising is enabled. Note that this is experimental and may not work as expected. * * This can be combined with smart denoising, and will be run afterwards. */ fourierDenoisingPlan?: FourierDenoisingPlan; } export interface KeypadInputPlan { /** * This keeps track of whether the user has enabled keypad input. * By default, it is off. * * @default false */ enabled?: boolean; /** * This is the time in seconds to wait before processing the input. * If the input is not received within this time, the input will be ignored. * If set to "off", the input will be processed when the user enters a delimiter or immediately if no delimiter is used. * * @default 2 * @min 0 * @max 10 */ timeoutSeconds?: number; /** * This is the delimiter(s) that will be used to process the input. * Can be '#', '*', or an empty array. */ delimiters?: "#" | "*" | ""; } export interface WorkflowUserEditable { nodes: (ConversationNode | ToolNode)[]; /** * This is the model for the workflow. * * This can be overridden at node level using `nodes[n].model`. */ model?: | WorkflowOpenAIModel | WorkflowAnthropicModel | WorkflowGoogleModel | WorkflowCustomModel; /** * This is the transcriber for the workflow. * * This can be overridden at node level using `nodes[n].transcriber`. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** * This is the voice for the workflow. * * This can be overridden at node level using `nodes[n].voice`. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the plan for observability of workflow's calls. * * Currently, only Langfuse is supported. */ observabilityPlan?: LangfuseObservabilityPlan; /** * This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** This is a set of actions that will be performed on certain events. */ hooks?: ( | CallHookCallEnding | CallHookAssistantSpeechInterrupted | CallHookCustomerSpeechInterrupted | CallHookCustomerSpeechTimeout | CallHookModelResponseTimeout )[]; /** These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. */ credentials?: ( | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO) )[]; /** This is the voicemail detection plan for the workflow. */ voicemailDetection?: | "off" | GoogleVoicemailDetectionPlan | OpenAIVoicemailDetectionPlan | TwilioVoicemailDetectionPlan | VapiVoicemailDetectionPlan; /** * This is the maximum duration of the call in seconds. * * After this duration, the call will automatically end. * * Default is 1800 (30 minutes), max is 43200 (12 hours), and min is 10 seconds. * @min 10 * @max 43200 * @example 600 */ maxDurationSeconds?: number; /** @maxLength 80 */ name: string; edges: Edge[]; /** @maxLength 5000 */ globalPrompt?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. tool.server * 2. workflow.server / assistant.server * 3. phoneNumber.server * 4. org.server */ server?: Server; /** This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings. */ compliancePlan?: CompliancePlan; /** This is the plan for analysis of workflow's calls. Stored in `call.analysis`. */ analysisPlan?: AnalysisPlan; /** This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`. */ artifactPlan?: ArtifactPlan; /** * This is the plan for when the workflow nodes should start talking. * * You should configure this if you're running into these issues: * - The assistant is too slow to start talking after the customer is done speaking. * - The assistant is too fast to start talking after the customer is done speaking. * - The assistant is so fast that it's actually interrupting the customer. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the plan for when workflow nodes should stop talking on customer interruption. * * You should configure this if you're running into these issues: * - The assistant is too slow to recognize customer's interruption. * - The assistant is too fast to recognize customer's interruption. * - The assistant is getting interrupted by phrases that are just acknowledgments. * - The assistant is getting interrupted by background noises. * - The assistant is not properly stopping -- it starts talking right after getting interrupted. */ stopSpeakingPlan?: StopSpeakingPlan; /** * This is the plan for real-time monitoring of the workflow's calls. * * Usage: * - To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`. * - To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`. */ monitorPlan?: MonitorPlan; /** * This enables filtering of noise and background speech while the user is talking. * * Features: * - Smart denoising using Krisp * - Fourier denoising * * Both can be used together. Order of precedence: * - Smart denoising * - Fourier denoising */ backgroundSpeechDenoisingPlan?: BackgroundSpeechDenoisingPlan; /** These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this. */ credentialIds?: string[]; /** This is the plan for keypad input handling during workflow calls. */ keypadInputPlan?: KeypadInputPlan; /** * This is the message that the assistant will say if the call is forwarded to voicemail. * * If unspecified, it will hang up. * @maxLength 1000 */ voicemailMessage?: string; } export interface VapiModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; provider: "vapi"; /** This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead. */ workflowId?: string; /** This is the workflow that will be used for the call. To use an existing workflow, use `workflowId` instead. */ workflow?: WorkflowUserEditable; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: string; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface XaiModel { /** This is the starting state for the conversation. */ messages?: OpenAIMessage[]; /** * These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. * * Both `tools` and `toolIds` can be used together. */ tools?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are the tools that the assistant can use during the call. To use transient tools, use `tools`. * * Both `tools` and `toolIds` can be used together. */ toolIds?: string[]; /** These are the options for the knowledge base. */ knowledgeBase?: CreateCustomKnowledgeBaseDTO; /** This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b */ model: | "grok-beta" | "grok-2" | "grok-3" | "grok-4-fast-reasoning" | "grok-4-fast-non-reasoning"; provider: "xai"; /** * This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency. * @min 0 * @max 2 */ temperature?: number; /** * This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250. * @min 50 * @max 10000 */ maxTokens?: number; /** * This determines whether we detect user's emotion while they speak and send it as an additional info to model. * * Default `false` because the model is usually are good at understanding the user's emotion from text. * * @default false */ emotionRecognitionEnabled?: boolean; /** * This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. * * Default is 0. * * @default 0 * @min 0 */ numFastTurns?: number; } export interface ExactReplacement { /** * This is the exact replacement type. You can use this to replace a specific word or phrase with a different word or phrase. * * Usage: * - Replace "hello" with "hi": { type: 'exact', key: 'hello', value: 'hi' } * - Replace "good morning" with "good day": { type: 'exact', key: 'good morning', value: 'good day' } * - Replace a specific name: { type: 'exact', key: 'John Doe', value: 'Jane Smith' } * - Replace an acronym: { type: 'exact', key: 'AI', value: 'Artificial Intelligence' } * - Replace a company name with its phonetic pronunciation: { type: 'exact', key: 'Vapi', value: 'Vappy' } */ type: "exact"; /** * This option let's you control whether to replace all instances of the key or only the first one. By default, it only replaces the first instance. * Examples: * - For { type: 'exact', key: 'hello', value: 'hi', replaceAllEnabled: false }. Before: "hello world, hello universe" | After: "hi world, hello universe" * - For { type: 'exact', key: 'hello', value: 'hi', replaceAllEnabled: true }. Before: "hello world, hello universe" | After: "hi world, hi universe" * @default false * @default false */ replaceAllEnabled?: boolean; /** This is the key to replace. */ key: string; /** * This is the value that will replace the match. * @maxLength 1000 */ value: string; } export interface RegexReplacement { /** * This is the regex replacement type. You can use this to replace a word or phrase that matches a pattern. * * Usage: * - Replace all numbers with "some number": { type: 'regex', regex: '\\d+', value: 'some number' } * - Replace email addresses with "[EMAIL]": { type: 'regex', regex: '\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b', value: '[EMAIL]' } * - Replace phone numbers with a formatted version: { type: 'regex', regex: '(\\d{3})(\\d{3})(\\d{4})', value: '($1) $2-$3' } * - Replace all instances of "color" or "colour" with "hue": { type: 'regex', regex: 'colou?r', value: 'hue' } * - Capitalize the first letter of every sentence: { type: 'regex', regex: '(?<=\\. |^)[a-z]', value: (match) => match.toUpperCase() } */ type: "regex"; /** * This is the regex pattern to replace. * * Note: * - This works by using the `string.replace` method in Node.JS. Eg. `"hello there".replace(/hello/g, "hi")` will return `"hi there"`. * * Hot tip: * - In JavaScript, escape `\` when sending the regex pattern. Eg. `"hello\sthere"` will be sent over the wire as `"hellosthere"`. Send `"hello\\sthere"` instead. */ regex: string; /** * These are the options for the regex replacement. Defaults to all disabled. * * @default [] */ options?: RegexOption[]; /** * This is the value that will replace the match. * @maxLength 1000 */ value: string; } export interface FormatPlan { /** * This determines whether the chunk is formatted before being sent to the voice provider. This helps with enunciation. This includes phone numbers, emails and addresses. Default `true`. * * Usage: * - To rely on the voice provider's formatting logic, set this to `false`. * * If `voice.chunkPlan.enabled` is `false`, this is automatically `false` since there's no chunk to format. * * @default true * @example true */ enabled?: boolean; /** * This is the cutoff after which a number is converted to individual digits instead of being spoken as words. * * Example: * - If cutoff 2025, "12345" is converted to "1 2 3 4 5" while "1200" is converted to "twelve hundred". * * Usage: * - If your use case doesn't involve IDs like zip codes, set this to a high value. * - If your use case involves IDs that are shorter than 5 digits, set this to a lower value. * * @default 2025 * @min 0 * @example 2025 */ numberToDigitsCutoff?: number; /** * These are the custom replacements you can make to the chunk before it is sent to the voice provider. * * Usage: * - To replace a specific word or phrase with a different word or phrase, use the `ExactReplacement` type. Eg. `{ type: 'exact', key: 'hello', value: 'hi' }` * - To replace a word or phrase that matches a pattern, use the `RegexReplacement` type. Eg. `{ type: 'regex', regex: '\\b[a-zA-Z]{5}\\b', value: 'hi' }` * * @default [] */ replacements?: (ExactReplacement | RegexReplacement)[]; /** * List of formatters to apply. If not provided, all default formatters will be applied. * If provided, only the specified formatters will be applied. * Note: Some essential formatters like angle bracket removal will always be applied. * @default undefined */ formattersEnabled?: | "markdown" | "asterisk" | "quote" | "dash" | "newline" | "colon" | "acronym" | "dollarAmount" | "email" | "date" | "time" | "distance" | "unit" | "percentage" | "phoneNumber" | "number" | "stripAsterisk"; } export interface ChunkPlan { /** * This determines whether the model output is chunked before being sent to the voice provider. Default `true`. * * Usage: * - To rely on the voice provider's audio generation logic, set this to `false`. * - If seeing issues with quality, set this to `true`. * * If disabled, Vapi-provided audio control tokens like will not work. * * @default true * @example true */ enabled?: boolean; /** * This is the minimum number of characters in a chunk. * * Usage: * - To increase quality, set this to a higher value. * - To decrease latency, set this to a lower value. * * @default 30 * @min 1 * @max 80 * @example 30 */ minCharacters?: number; /** * These are the punctuations that are considered valid boundaries for a chunk to be created. * * Usage: * - To increase quality, constrain to fewer boundaries. * - To decrease latency, enable all. * * Default is automatically set to balance the trade-off between quality and latency based on the provider. * @example ["。",",",".","!","?",";","،","۔","।","॥","|","||",",",":"] */ punctuationBoundaries?: | "。" | "," | "." | "!" | "?" | ";" | ")" | "،" | "۔" | "।" | "॥" | "|" | "||" | "," | ":"; /** This is the plan for formatting the chunk before it is sent to the voice provider. */ formatPlan?: FormatPlan; } export interface FallbackPlan { /** This is the list of voices to fallback to in the event that the primary voice provider fails. */ voices: ( | FallbackAzureVoice | FallbackCartesiaVoice | FallbackHumeVoice | FallbackCustomVoice | FallbackDeepgramVoice | FallbackElevenLabsVoice | FallbackVapiVoice | FallbackLMNTVoice | FallbackOpenAIVoice | FallbackPlayHTVoice | FallbackRimeAIVoice | FallbackSmallestAIVoice | FallbackTavusVoice | FallbackNeuphonicVoice | FallbackSesameVoice | FallbackInworldVoice )[]; } export interface AzureVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "azure"; /** This is the provider-specific ID that will be used. */ voiceId: "andrew" | "brian" | "emma" | string; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** * This is the speed multiplier that will be used. * @min 0.5 * @max 2 */ speed?: number; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface CartesiaExperimentalControls { speed?: "slowest" | "slow" | "normal" | "fast" | "fastest" | number; /** @example ["happiness:high"] */ emotion?: | "anger:lowest" | "anger:low" | "anger:high" | "anger:highest" | "positivity:lowest" | "positivity:low" | "positivity:high" | "positivity:highest" | "surprise:lowest" | "surprise:low" | "surprise:high" | "surprise:highest" | "sadness:lowest" | "sadness:low" | "sadness:high" | "sadness:highest" | "curiosity:lowest" | "curiosity:low" | "curiosity:high" | "curiosity:highest"; } export interface CartesiaGenerationConfigExperimental { /** * Toggle accent localization for sonic-3: 0 (disabled, default) or 1 (enabled). When enabled, the voice adapts to match the transcript language accent while preserving vocal characteristics. * @min 0 * @max 1 * @default 0 * @example 0 */ accentLocalization?: number; } export interface CartesiaGenerationConfig { /** * Fine-grained speed control for sonic-3. Only available for sonic-3 model. * @min 0.6 * @max 1.5 * @default 1 * @example 1 */ speed?: number; /** * Fine-grained volume control for sonic-3. Only available for sonic-3 model. * @min 0.5 * @max 2 * @default 1 * @example 1 */ volume?: number; /** Experimental model controls for sonic-3. These are subject to breaking changes. */ experimental?: CartesiaGenerationConfigExperimental; } export interface CartesiaVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "cartesia"; /** The ID of the particular voice you want to use. */ voiceId: string; /** * This is the model that will be used. This is optional and will default to the correct model for the voiceId. * @example "sonic-english" */ model?: | "sonic-3" | "sonic-2" | "sonic-english" | "sonic-multilingual" | "sonic-preview" | "sonic"; /** * This is the language that will be used. This is optional and will default to the correct language for the voiceId. * @example "en" */ language?: | "ar" | "bg" | "bn" | "cs" | "da" | "de" | "el" | "en" | "es" | "fi" | "fr" | "gu" | "he" | "hi" | "hr" | "hu" | "id" | "it" | "ja" | "ka" | "kn" | "ko" | "ml" | "mr" | "ms" | "nl" | "no" | "pa" | "pl" | "pt" | "ro" | "ru" | "sk" | "sv" | "ta" | "te" | "th" | "tl" | "tr" | "uk" | "vi" | "zh"; /** Experimental controls for Cartesia voice generation */ experimentalControls?: CartesiaExperimentalControls; /** Generation config for fine-grained control of sonic-3 voice output (speed, volume, and experimental controls). Only available for sonic-3 model. */ generationConfig?: CartesiaGenerationConfig; /** * Pronunciation dictionary ID for sonic-3. Allows custom pronunciations for specific words. Only available for sonic-3 model. * @example "dict_abc123" */ pronunciationDictId?: string; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface CustomVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. Use `custom-voice` for providers that are not natively supported. */ provider: "custom-voice"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** * This is where the voice request will be sent. * * Request Example: * * POST https://{server.url} * Content-Type: application/json * * { * "message": { * "type": "voice-request", * "text": "Hello, world!", * "sampleRate": 24000, * ...other metadata about the call... * } * } * * Response Expected: 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: * ``` * response.on('data', (chunk: Buffer) => { * outputStream.write(chunk); * }); * ``` */ server: Server; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface DeepgramVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "deepgram"; /** * This is the Deepgram Voice ID * This is the provider-specific ID that will be used. */ voiceId: | "asteria" | "luna" | "stella" | "athena" | "hera" | "orion" | "arcas" | "perseus" | "angus" | "orpheus" | "helios" | "zeus" | "thalia" | "andromeda" | "helena" | "apollo" | "aries" | "amalthea" | "atlas" | "aurora" | "callista" | "cora" | "cordelia" | "delia" | "draco" | "electra" | "harmonia" | "hermes" | "hyperion" | "iris" | "janus" | "juno" | "jupiter" | "mars" | "minerva" | "neptune" | "odysseus" | "ophelia" | "pandora" | "phoebe" | "pluto" | "saturn" | "selene" | "theia" | "vesta" | "celeste" | "estrella" | "nestor" | "sirio" | "carina" | "alvaro" | "diana" | "aquila" | "selena" | "javier"; /** * This is the model that will be used. Defaults to 'aura-2' when not specified. * @example "aura-2" */ model?: "aura" | "aura-2"; /** * If set to true, this will add mip_opt_out=true as a query parameter of all API requests. See https://developers.deepgram.com/docs/the-deepgram-model-improvement-partnership-program#want-to-opt-out * * This will only be used if you are using your own Deepgram API key. * * @default false * @default false * @example false */ mipOptOut?: boolean; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface ElevenLabsPronunciationDictionaryLocator { /** * This is the ElevenLabs Pronunciation Dictionary ID * This is the ID of the pronunciation dictionary to use. */ pronunciationDictionaryId: string; /** * This is the ElevenLabs Pronunciation Dictionary Version ID * This is the version ID of the pronunciation dictionary to use. */ versionId: string; } export interface ElevenLabsVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "11labs"; /** This is the provider-specific ID that will be used. Ensure the Voice is present in your 11Labs Voice Library. */ voiceId: | "burt" | "marissa" | "andrea" | "sarah" | "phillip" | "steve" | "joseph" | "myra" | "paula" | "ryan" | "drew" | "paul" | "mrb" | "matilda" | "mark" | string; /** * Defines the stability for voice settings. * @min 0 * @max 1 * @example 0.5 */ stability?: number; /** * Defines the similarity boost for voice settings. * @min 0 * @max 1 * @example 0.75 */ similarityBoost?: number; /** * Defines the style for voice settings. * @min 0 * @max 1 * @example 0 */ style?: number; /** * Defines the use speaker boost for voice settings. * @example false */ useSpeakerBoost?: boolean; /** * Defines the speed for voice settings. * @min 0.7 * @max 1.2 * @example 0.9 */ speed?: number; /** * Defines the optimize streaming latency for voice settings. Defaults to 3. * @min 0 * @max 4 * @example 3 */ optimizeStreamingLatency?: number; /** * This enables the use of https://elevenlabs.io/docs/speech-synthesis/prompting#pronunciation. Defaults to false to save latency. * * @default false * @example false */ enableSsmlParsing?: boolean; /** * Defines the auto mode for voice settings. Defaults to false. * @example false */ autoMode?: boolean; /** * This is the model that will be used. Defaults to 'eleven_turbo_v2' if not specified. * @example "eleven_turbo_v2_5" */ model?: | "eleven_multilingual_v2" | "eleven_turbo_v2" | "eleven_turbo_v2_5" | "eleven_flash_v2" | "eleven_flash_v2_5" | "eleven_monolingual_v1"; /** This is the language (ISO 639-1) that is enforced for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided. */ language?: string; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the pronunciation dictionary locators to use. */ pronunciationDictionaryLocators?: ElevenLabsPronunciationDictionaryLocator[]; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface HumeVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "hume"; /** * This is the model that will be used. * @example "octave2" */ model?: "octave" | "octave2"; /** The ID of the particular voice you want to use. */ voiceId: string; /** * Indicates whether the chosen voice is a preset Hume AI voice or a custom voice. * @example false */ isCustomHumeVoice?: boolean; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** * Natural language instructions describing how the synthesized speech should sound, including but not limited to tone, intonation, pacing, and accent (e.g., 'a soft, gentle voice with a strong British accent'). * * If a Voice is specified in the request, this description serves as acting instructions. * If no Voice is specified, a new voice is generated based on this description. */ description?: string; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface LMNTVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "lmnt"; /** This is the provider-specific ID that will be used. */ voiceId: | "amy" | "ansel" | "autumn" | "ava" | "brandon" | "caleb" | "cassian" | "chloe" | "dalton" | "daniel" | "dustin" | "elowen" | "evander" | "huxley" | "james" | "juniper" | "kennedy" | "lauren" | "leah" | "lily" | "lucas" | "magnus" | "miles" | "morgan" | "natalie" | "nathan" | "noah" | "nyssa" | "oliver" | "paige" | "ryan" | "sadie" | "sophie" | "stella" | "terrence" | "tyler" | "vesper" | "violet" | "warrick" | "zain" | "zeke" | "zoe" | string; /** * This is the speed multiplier that will be used. * @min 0.25 * @max 2 * @example null */ speed?: number; /** * Two letter ISO 639-1 language code. Use "auto" for auto-detection. * @example "en" */ language?: | "aa" | "ab" | "ae" | "af" | "ak" | "am" | "an" | "ar" | "as" | "av" | "ay" | "az" | "ba" | "be" | "bg" | "bh" | "bi" | "bm" | "bn" | "bo" | "br" | "bs" | "ca" | "ce" | "ch" | "co" | "cr" | "cs" | "cu" | "cv" | "cy" | "da" | "de" | "dv" | "dz" | "ee" | "el" | "en" | "eo" | "es" | "et" | "eu" | "fa" | "ff" | "fi" | "fj" | "fo" | "fr" | "fy" | "ga" | "gd" | "gl" | "gn" | "gu" | "gv" | "ha" | "he" | "hi" | "ho" | "hr" | "ht" | "hu" | "hy" | "hz" | "ia" | "id" | "ie" | "ig" | "ii" | "ik" | "io" | "is" | "it" | "iu" | "ja" | "jv" | "ka" | "kg" | "ki" | "kj" | "kk" | "kl" | "km" | "kn" | "ko" | "kr" | "ks" | "ku" | "kv" | "kw" | "ky" | "la" | "lb" | "lg" | "li" | "ln" | "lo" | "lt" | "lu" | "lv" | "mg" | "mh" | "mi" | "mk" | "ml" | "mn" | "mr" | "ms" | "mt" | "my" | "na" | "nb" | "nd" | "ne" | "ng" | "nl" | "nn" | "no" | "nr" | "nv" | "ny" | "oc" | "oj" | "om" | "or" | "os" | "pa" | "pi" | "pl" | "ps" | "pt" | "qu" | "rm" | "rn" | "ro" | "ru" | "rw" | "sa" | "sc" | "sd" | "se" | "sg" | "si" | "sk" | "sl" | "sm" | "sn" | "so" | "sq" | "sr" | "ss" | "st" | "su" | "sv" | "sw" | "ta" | "te" | "tg" | "th" | "ti" | "tk" | "tl" | "tn" | "to" | "tr" | "ts" | "tt" | "tw" | "ty" | "ug" | "uk" | "ur" | "uz" | "ve" | "vi" | "vo" | "wa" | "wo" | "xh" | "yi" | "yue" | "yo" | "za" | "zh" | "zu" | "auto"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface NeuphonicVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "neuphonic"; /** This is the provider-specific ID that will be used. */ voiceId: string; /** * This is the model that will be used. Defaults to 'neu_fast' if not specified. * @example "neu_fast" */ model?: "neu_hq" | "neu_fast"; /** * This is the language (ISO 639-1) that is enforced for the model. * @example "en" */ language: object; /** * This is the speed multiplier that will be used. * @min 0.25 * @max 2 * @example null */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface OpenAIVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "openai"; /** * This is the provider-specific ID that will be used. * Please note that ash, ballad, coral, sage, and verse may only be used with realtime models. */ voiceId: | "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | "marin" | "cedar" | string; /** This is the model that will be used for text-to-speech. */ model?: "tts-1" | "tts-1-hd" | "gpt-4o-mini-tts"; /** * This is a prompt that allows you to control the voice of your generated audio. * Does not work with 'tts-1' or 'tts-1-hd' models. * @maxLength 10000 */ instructions?: string; /** * This is the speed multiplier that will be used. * @min 0.25 * @max 4 * @example null */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface PlayHTVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "playht"; /** This is the provider-specific ID that will be used. */ voiceId: | "jennifer" | "melissa" | "will" | "chris" | "matt" | "jack" | "ruby" | "davis" | "donna" | "michael" | string; /** * This is the speed multiplier that will be used. * @min 0.1 * @max 5 * @example null */ speed?: number; /** * A floating point number between 0, exclusive, and 2, inclusive. If equal to null or not provided, the model's default temperature will be used. The temperature parameter controls variance. Lower temperatures result in more predictable results, higher temperatures allow each run to vary more, so the voice may sound less like the baseline voice. * @min 0.1 * @max 2 * @example null */ temperature?: number; /** * An emotion to be applied to the speech. * @example null */ emotion?: | "female_happy" | "female_sad" | "female_angry" | "female_fearful" | "female_disgust" | "female_surprised" | "male_happy" | "male_sad" | "male_angry" | "male_fearful" | "male_disgust" | "male_surprised"; /** * A number between 1 and 6. Use lower numbers to reduce how unique your chosen voice will be compared to other voices. * @min 1 * @max 6 * @example null */ voiceGuidance?: number; /** * A number between 1 and 30. Use lower numbers to to reduce how strong your chosen emotion will be. Higher numbers will create a very emotional performance. * @min 1 * @max 30 * @example null */ styleGuidance?: number; /** * A number between 1 and 2. This number influences how closely the generated speech adheres to the input text. Use lower values to create more fluid speech, but with a higher chance of deviating from the input text. Higher numbers will make the generated speech more accurate to the input text, ensuring that the words spoken align closely with the provided text. * @min 1 * @max 2 * @example null */ textGuidance?: number; /** Playht voice model/engine to use. */ model?: "PlayHT2.0" | "PlayHT2.0-turbo" | "Play3.0-mini" | "PlayDialog"; /** The language to use for the speech. */ language?: | "afrikaans" | "albanian" | "amharic" | "arabic" | "bengali" | "bulgarian" | "catalan" | "croatian" | "czech" | "danish" | "dutch" | "english" | "french" | "galician" | "german" | "greek" | "hebrew" | "hindi" | "hungarian" | "indonesian" | "italian" | "japanese" | "korean" | "malay" | "mandarin" | "polish" | "portuguese" | "russian" | "serbian" | "spanish" | "swedish" | "tagalog" | "thai" | "turkish" | "ukrainian" | "urdu" | "xhosa"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface RimeAIVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "rime-ai"; /** This is the provider-specific ID that will be used. */ voiceId: | "abbie" | "allison" | "ally" | "alona" | "amber" | "ana" | "antoine" | "armon" | "brenda" | "brittany" | "carol" | "colin" | "courtney" | "elena" | "elliot" | "eva" | "geoff" | "gerald" | "hank" | "helen" | "hera" | "jen" | "joe" | "joy" | "juan" | "kendra" | "kendrick" | "kenneth" | "kevin" | "kris" | "linda" | "madison" | "marge" | "marina" | "marissa" | "marta" | "maya" | "nicholas" | "nyles" | "phil" | "reba" | "rex" | "rick" | "ritu" | "rob" | "rodney" | "rohan" | "rosco" | "samantha" | "sandy" | "selena" | "seth" | "sharon" | "stan" | "tamra" | "tanya" | "tibur" | "tj" | "tyler" | "viv" | "yadira" | "marsh" | "bayou" | "creek" | "brook" | "flower" | "spore" | "glacier" | "gulch" | "alpine" | "cove" | "lagoon" | "tundra" | "steppe" | "mesa" | "grove" | "rainforest" | "moraine" | "wildflower" | "peak" | "boulder" | "gypsum" | "zest" | "luna" | "celeste" | "orion" | "ursa" | "astra" | "esther" | "estelle" | "andromeda" | string; /** * This is the model that will be used. Defaults to 'arcana' when not specified. * @example "arcana" */ model?: "arcana" | "mistv2" | "mist"; /** * This is the speed multiplier that will be used. * @min 0.1 * @example null */ speed?: number; /** * This is a flag that controls whether to add slight pauses using angle brackets. Example: "Hi. <200> I'd love to have a conversation with you." adds a 200ms pause between the first and second sentences. * @example false */ pauseBetweenBrackets?: boolean; /** * This is a flag that controls whether text inside brackets should be phonemized (converted to phonetic pronunciation) - Example: "{h'El.o} World" will pronounce "Hello" as expected. * @example false */ phonemizeBetweenBrackets?: boolean; /** * This is a flag that controls whether to optimize for reduced latency in streaming. https://docs.rime.ai/api-reference/endpoint/websockets#param-reduce-latency * @example false */ reduceLatency?: boolean; /** * This is a string that allows inline speed control using alpha notation. https://docs.rime.ai/api-reference/endpoint/websockets#param-inline-speed-alpha * @example null */ inlineSpeedAlpha?: string; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface SesameVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "sesame"; /** * Sesame Voice ID. This should be either a name (a built-in voice) or a UUID (a custom voice). * This is the provider-specific ID that will be used. */ voiceId: string; /** This is the model that will be used. */ model: "csm-1b"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface SmallestAIVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "smallest-ai"; /** This is the provider-specific ID that will be used. */ voiceId: | "emily" | "jasmine" | "arman" | "james" | "mithali" | "aravind" | "raj" | "diya" | "raman" | "ananya" | "isha" | "william" | "aarav" | "monika" | "niharika" | "deepika" | "raghav" | "kajal" | "radhika" | "mansi" | "nisha" | "saurabh" | "pooja" | "saina" | "sanya" | string; /** Smallest AI voice model to use. Defaults to 'lightning' when not specified. */ model?: "lightning"; /** * This is the speed multiplier that will be used. * @example null */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface TavusConversationProperties { /** * The maximum duration of the call in seconds. The default `maxCallDuration` is 3600 seconds (1 hour). * Once the time limit specified by this parameter has been reached, the conversation will automatically shut down. */ maxCallDuration?: number; /** The duration in seconds after which the call will be automatically shut down once the last participant leaves. */ participantLeftTimeout?: number; /** * Starting from conversation creation, the duration in seconds after which the call will be automatically shut down if no participant joins the call. * Default is 300 seconds (5 minutes). */ participantAbsentTimeout?: number; /** If true, the user will be able to record the conversation. */ enableRecording?: boolean; /** * If true, the user will be able to transcribe the conversation. * You can find more instructions on displaying transcriptions if you are using your custom DailyJS components here. * You need to have an event listener on Daily that listens for `app-messages`. */ enableTranscription?: boolean; /** * If true, the background will be replaced with a greenscreen (RGB values: `[0, 255, 155]`). * You can use WebGL on the frontend to make the greenscreen transparent or change its color. */ applyGreenscreen?: boolean; /** * The language of the conversation. Please provide the **full language name**, not the two-letter code. * If you are using your own TTS voice, please ensure it supports the language you provide. * If you are using a stock replica or default persona, please note that only ElevenLabs and Cartesia supported languages are available. * You can find a full list of supported languages for Cartesia here, for ElevenLabs here, and for PlayHT here. */ language?: string; /** The name of the S3 bucket where the recording will be stored. */ recordingS3BucketName?: string; /** The region of the S3 bucket where the recording will be stored. */ recordingS3BucketRegion?: string; /** The ARN of the role that will be assumed to access the S3 bucket. */ awsAssumeRoleArn?: string; } export interface TavusVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "tavus"; /** This is the provider-specific ID that will be used. */ voiceId: "r52da2535a" | string; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the unique identifier for the persona that the replica will use in the conversation. */ personaId?: string; /** This is the url that will receive webhooks with updates regarding the conversation state. */ callbackUrl?: string; /** This is the name for the conversation. */ conversationName?: string; /** This is the context that will be appended to any context provided in the persona, if one is provided. */ conversationalContext?: string; /** This is the custom greeting that the replica will give once a participant joines the conversation. */ customGreeting?: string; /** These are optional properties used to customize the conversation. */ properties?: TavusConversationProperties; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface VapiVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "vapi"; /** The voices provided by Vapi */ voiceId: | "Elliot" | "Kylie" | "Rohan" | "Lily" | "Savannah" | "Hana" | "Neha" | "Cole" | "Harry" | "Paige" | "Spencer" | "Leah" | "Tara"; /** * This is the speed multiplier that will be used. * * @default 1 * @min 0.25 * @max 2 * @default 1 */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface InworldVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "inworld"; /** * Inworld Voice ID * Available voices by language: * • en: Alex, Ashley, Craig, Deborah, Dennis, Edward, Elizabeth, Hades, Julia, Pixie, Mark, Olivia, Priya, Ronald, Sarah, Shaun, Theodore, Timothy, Wendy, Dominus * • zh: Yichen, Xiaoyin, Xinyi, Jing * • nl: Erik, Katrien, Lennart, Lore * • fr: Alain, Hélène, Mathieu, Étienne * • de: Johanna, Josef * • it: Gianni, Orietta * • ja: Asuka, Satoshi * • ko: Hyunwoo, Minji, Seojun, Yoona * • pl: Szymon, Wojciech * • pt: Heitor, Maitê * • es: Diego, Lupita, Miguel, Rafael * @maxLength 120 * @example "Alex" */ voiceId: | "Alex" | "Ashley" | "Craig" | "Deborah" | "Dennis" | "Edward" | "Elizabeth" | "Hades" | "Julia" | "Pixie" | "Mark" | "Olivia" | "Priya" | "Ronald" | "Sarah" | "Shaun" | "Theodore" | "Timothy" | "Wendy" | "Dominus" | "Yichen" | "Xiaoyin" | "Xinyi" | "Jing" | "Erik" | "Katrien" | "Lennart" | "Lore" | "Alain" | "Hélène" | "Mathieu" | "Étienne" | "Johanna" | "Josef" | "Gianni" | "Orietta" | "Asuka" | "Satoshi" | "Hyunwoo" | "Minji" | "Seojun" | "Yoona" | "Szymon" | "Wojciech" | "Heitor" | "Maitê" | "Diego" | "Lupita" | "Miguel" | "Rafael"; /** * This is the model that will be used. * @default "inworld-tts-1" */ model?: "inworld-tts-1"; /** * Language code for Inworld TTS synthesis * @default "en" */ languageCode?: | "en" | "zh" | "ko" | "nl" | "fr" | "es" | "ja" | "de" | "it" | "pl" | "pt"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface MinimaxVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "minimax"; /** * This is the Minimax Voice ID * This is the provider-specific ID that will be used. Use a voice from MINIMAX_PREDEFINED_VOICES or a custom cloned voice ID. */ voiceId: string; /** * This is the model that will be used. Options are 'speech-02-hd' and 'speech-02-turbo'. * speech-02-hd is optimized for high-fidelity applications like voiceovers and audiobooks. * speech-02-turbo is designed for real-time applications with low latency. * * @default "speech-02-turbo" * @default "speech-02-turbo" * @example "speech-02-turbo" */ model?: "speech-02-hd" | "speech-02-turbo" | "speech-2.5-turbo-preview"; /** * The emotion to use for the voice. If not provided, will use auto-detect mode. * Options include: 'happy', 'sad', 'angry', 'fearful', 'surprised', 'disgusted', 'neutral' * @example "happy" */ emotion?: string; /** * Voice pitch adjustment. Range from -12 to 12 semitones. * @default 0 * @min -12 * @max 12 * @default 0 * @example 0 */ pitch?: number; /** * Voice speed adjustment. Range from 0.5 to 2.0. * @default 1.0 * @min 0.5 * @max 2 * @default 1 * @example 1 */ speed?: number; /** * Voice volume adjustment. Range from 0.5 to 2.0. * @default 1.0 * @min 0.5 * @max 2 * @default 1 * @example 1 */ volume?: number; /** * The region for Minimax API. Defaults to "worldwide". * @default "worldwide" */ region?: "worldwide" | "china"; /** Language hint for MiniMax T2A. Example: yue (Cantonese), zh (Chinese), en (English). */ languageBoost?: | "Chinese" | "Chinese,Yue" | "English" | "Arabic" | "Russian" | "Spanish" | "French" | "Portuguese" | "German" | "Turkish" | "Dutch" | "Ukrainian" | "Vietnamese" | "Indonesian" | "Japanese" | "Italian" | "Korean" | "Thai" | "Polish" | "Romanian" | "Greek" | "Czech" | "Finnish" | "Hindi" | "Bulgarian" | "Danish" | "Hebrew" | "Malay" | "Persian" | "Slovak" | "Swedish" | "Croatian" | "Filipino" | "Hungarian" | "Norwegian" | "Slovenian" | "Catalan" | "Nynorsk" | "Tamil" | "Afrikaans" | "auto"; /** * Enable MiniMax text normalization to improve number reading and formatting. * @default true */ textNormalizationEnabled?: boolean; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; /** This is the plan for voice provider fallbacks in the event that the primary voice provider fails. */ fallbackPlan?: FallbackPlan; } export interface FallbackAzureVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "azure"; /** This is the provider-specific ID that will be used. */ voiceId: "andrew" | "brian" | "emma" | string; /** * This is the speed multiplier that will be used. * @min 0.5 * @max 2 */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackCartesiaVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "cartesia"; /** The ID of the particular voice you want to use. */ voiceId: string; /** * This is the model that will be used. This is optional and will default to the correct model for the voiceId. * @example "sonic-english" */ model?: | "sonic-3" | "sonic-2" | "sonic-english" | "sonic-multilingual" | "sonic-preview" | "sonic"; /** * This is the language that will be used. This is optional and will default to the correct language for the voiceId. * @example "en" */ language?: | "ar" | "bg" | "bn" | "cs" | "da" | "de" | "el" | "en" | "es" | "fi" | "fr" | "gu" | "he" | "hi" | "hr" | "hu" | "id" | "it" | "ja" | "ka" | "kn" | "ko" | "ml" | "mr" | "ms" | "nl" | "no" | "pa" | "pl" | "pt" | "ro" | "ru" | "sk" | "sv" | "ta" | "te" | "th" | "tl" | "tr" | "uk" | "vi" | "zh"; /** Experimental controls for Cartesia voice generation */ experimentalControls?: CartesiaExperimentalControls; /** Generation config for fine-grained control of sonic-3 voice output (speed, volume, and experimental controls). Only available for sonic-3 model. */ generationConfig?: CartesiaGenerationConfig; /** * Pronunciation dictionary ID for sonic-3. Allows custom pronunciations for specific words. Only available for sonic-3 model. * @example "dict_abc123" */ pronunciationDictId?: string; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackCustomVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. Use `custom-voice` for providers that are not natively supported. */ provider: "custom-voice"; /** * This is where the voice request will be sent. * * Request Example: * * POST https://{server.url} * Content-Type: application/json * * { * "message": { * "type": "voice-request", * "text": "Hello, world!", * "sampleRate": 24000, * ...other metadata about the call... * } * } * * Response Expected: 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: * ``` * response.on('data', (chunk: Buffer) => { * outputStream.write(chunk); * }); * ``` */ server: Server; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackDeepgramVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "deepgram"; /** * This is the Deepgram Voice ID * This is the provider-specific ID that will be used. */ voiceId: | "asteria" | "luna" | "stella" | "athena" | "hera" | "orion" | "arcas" | "perseus" | "angus" | "orpheus" | "helios" | "zeus" | "thalia" | "andromeda" | "helena" | "apollo" | "aries" | "amalthea" | "atlas" | "aurora" | "callista" | "cora" | "cordelia" | "delia" | "draco" | "electra" | "harmonia" | "hermes" | "hyperion" | "iris" | "janus" | "juno" | "jupiter" | "mars" | "minerva" | "neptune" | "odysseus" | "ophelia" | "pandora" | "phoebe" | "pluto" | "saturn" | "selene" | "theia" | "vesta" | "celeste" | "estrella" | "nestor" | "sirio" | "carina" | "alvaro" | "diana" | "aquila" | "selena" | "javier"; /** * This is the model that will be used. Defaults to 'aura-2' when not specified. * @example "aura-2" */ model?: "aura" | "aura-2"; /** * If set to true, this will add mip_opt_out=true as a query parameter of all API requests. See https://developers.deepgram.com/docs/the-deepgram-model-improvement-partnership-program#want-to-opt-out * * This will only be used if you are using your own Deepgram API key. * * @default false * @default false * @example false */ mipOptOut?: boolean; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackElevenLabsVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "11labs"; /** This is the provider-specific ID that will be used. Ensure the Voice is present in your 11Labs Voice Library. */ voiceId: | "burt" | "marissa" | "andrea" | "sarah" | "phillip" | "steve" | "joseph" | "myra" | "paula" | "ryan" | "drew" | "paul" | "mrb" | "matilda" | "mark" | string; /** * Defines the stability for voice settings. * @min 0 * @max 1 * @example 0.5 */ stability?: number; /** * Defines the similarity boost for voice settings. * @min 0 * @max 1 * @example 0.75 */ similarityBoost?: number; /** * Defines the style for voice settings. * @min 0 * @max 1 * @example 0 */ style?: number; /** * Defines the use speaker boost for voice settings. * @example false */ useSpeakerBoost?: boolean; /** * Defines the speed for voice settings. * @min 0.7 * @max 1.2 * @example 0.9 */ speed?: number; /** * Defines the optimize streaming latency for voice settings. Defaults to 3. * @min 0 * @max 4 * @example 3 */ optimizeStreamingLatency?: number; /** * This enables the use of https://elevenlabs.io/docs/speech-synthesis/prompting#pronunciation. Defaults to false to save latency. * * @default false * @example false */ enableSsmlParsing?: boolean; /** * Defines the auto mode for voice settings. Defaults to false. * @example false */ autoMode?: boolean; /** * This is the model that will be used. Defaults to 'eleven_turbo_v2' if not specified. * @example "eleven_turbo_v2_5" */ model?: | "eleven_multilingual_v2" | "eleven_turbo_v2" | "eleven_turbo_v2_5" | "eleven_flash_v2" | "eleven_flash_v2_5" | "eleven_monolingual_v1"; /** This is the language (ISO 639-1) that is enforced for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided. */ language?: string; /** This is the pronunciation dictionary locators to use. */ pronunciationDictionaryLocators?: ElevenLabsPronunciationDictionaryLocator[]; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackHumeVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "hume"; /** * This is the model that will be used. * @example "octave2" */ model?: "octave" | "octave2"; /** The ID of the particular voice you want to use. */ voiceId: string; /** * Indicates whether the chosen voice is a preset Hume AI voice or a custom voice. * @example false */ isCustomHumeVoice?: boolean; /** * Natural language instructions describing how the synthesized speech should sound, including but not limited to tone, intonation, pacing, and accent (e.g., 'a soft, gentle voice with a strong British accent'). * * If a Voice is specified in the request, this description serves as acting instructions. * If no Voice is specified, a new voice is generated based on this description. */ description?: string; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackLMNTVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "lmnt"; /** This is the provider-specific ID that will be used. */ voiceId: | "amy" | "ansel" | "autumn" | "ava" | "brandon" | "caleb" | "cassian" | "chloe" | "dalton" | "daniel" | "dustin" | "elowen" | "evander" | "huxley" | "james" | "juniper" | "kennedy" | "lauren" | "leah" | "lily" | "lucas" | "magnus" | "miles" | "morgan" | "natalie" | "nathan" | "noah" | "nyssa" | "oliver" | "paige" | "ryan" | "sadie" | "sophie" | "stella" | "terrence" | "tyler" | "vesper" | "violet" | "warrick" | "zain" | "zeke" | "zoe" | string; /** * This is the speed multiplier that will be used. * @min 0.25 * @max 2 * @example null */ speed?: number; /** * Two letter ISO 639-1 language code. Use "auto" for auto-detection. * @example "en" */ language?: | "aa" | "ab" | "ae" | "af" | "ak" | "am" | "an" | "ar" | "as" | "av" | "ay" | "az" | "ba" | "be" | "bg" | "bh" | "bi" | "bm" | "bn" | "bo" | "br" | "bs" | "ca" | "ce" | "ch" | "co" | "cr" | "cs" | "cu" | "cv" | "cy" | "da" | "de" | "dv" | "dz" | "ee" | "el" | "en" | "eo" | "es" | "et" | "eu" | "fa" | "ff" | "fi" | "fj" | "fo" | "fr" | "fy" | "ga" | "gd" | "gl" | "gn" | "gu" | "gv" | "ha" | "he" | "hi" | "ho" | "hr" | "ht" | "hu" | "hy" | "hz" | "ia" | "id" | "ie" | "ig" | "ii" | "ik" | "io" | "is" | "it" | "iu" | "ja" | "jv" | "ka" | "kg" | "ki" | "kj" | "kk" | "kl" | "km" | "kn" | "ko" | "kr" | "ks" | "ku" | "kv" | "kw" | "ky" | "la" | "lb" | "lg" | "li" | "ln" | "lo" | "lt" | "lu" | "lv" | "mg" | "mh" | "mi" | "mk" | "ml" | "mn" | "mr" | "ms" | "mt" | "my" | "na" | "nb" | "nd" | "ne" | "ng" | "nl" | "nn" | "no" | "nr" | "nv" | "ny" | "oc" | "oj" | "om" | "or" | "os" | "pa" | "pi" | "pl" | "ps" | "pt" | "qu" | "rm" | "rn" | "ro" | "ru" | "rw" | "sa" | "sc" | "sd" | "se" | "sg" | "si" | "sk" | "sl" | "sm" | "sn" | "so" | "sq" | "sr" | "ss" | "st" | "su" | "sv" | "sw" | "ta" | "te" | "tg" | "th" | "ti" | "tk" | "tl" | "tn" | "to" | "tr" | "ts" | "tt" | "tw" | "ty" | "ug" | "uk" | "ur" | "uz" | "ve" | "vi" | "vo" | "wa" | "wo" | "xh" | "yi" | "yue" | "yo" | "za" | "zh" | "zu" | "auto"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackNeuphonicVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "neuphonic"; /** This is the provider-specific ID that will be used. */ voiceId: string; /** * This is the model that will be used. Defaults to 'neu_fast' if not specified. * @example "neu_fast" */ model?: "neu_hq" | "neu_fast"; /** * This is the language (ISO 639-1) that is enforced for the model. * @example "en" */ language: object; /** * This is the speed multiplier that will be used. * @min 0.25 * @max 2 * @example null */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackOpenAIVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "openai"; /** * This is the provider-specific ID that will be used. * Please note that ash, ballad, coral, sage, and verse may only be used with realtime models. */ voiceId: | "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | "marin" | "cedar" | string; /** This is the model that will be used for text-to-speech. */ model?: "tts-1" | "tts-1-hd" | "gpt-4o-mini-tts"; /** * This is a prompt that allows you to control the voice of your generated audio. * Does not work with 'tts-1' or 'tts-1-hd' models. * @maxLength 10000 */ instructions?: string; /** * This is the speed multiplier that will be used. * @min 0.25 * @max 4 * @example null */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackPlayHTVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "playht"; /** This is the provider-specific ID that will be used. */ voiceId: | "jennifer" | "melissa" | "will" | "chris" | "matt" | "jack" | "ruby" | "davis" | "donna" | "michael" | string; /** * This is the speed multiplier that will be used. * @min 0.1 * @max 5 * @example null */ speed?: number; /** * A floating point number between 0, exclusive, and 2, inclusive. If equal to null or not provided, the model's default temperature will be used. The temperature parameter controls variance. Lower temperatures result in more predictable results, higher temperatures allow each run to vary more, so the voice may sound less like the baseline voice. * @min 0.1 * @max 2 * @example null */ temperature?: number; /** * An emotion to be applied to the speech. * @example null */ emotion?: | "female_happy" | "female_sad" | "female_angry" | "female_fearful" | "female_disgust" | "female_surprised" | "male_happy" | "male_sad" | "male_angry" | "male_fearful" | "male_disgust" | "male_surprised"; /** * A number between 1 and 6. Use lower numbers to reduce how unique your chosen voice will be compared to other voices. * @min 1 * @max 6 * @example null */ voiceGuidance?: number; /** * A number between 1 and 30. Use lower numbers to to reduce how strong your chosen emotion will be. Higher numbers will create a very emotional performance. * @min 1 * @max 30 * @example null */ styleGuidance?: number; /** * A number between 1 and 2. This number influences how closely the generated speech adheres to the input text. Use lower values to create more fluid speech, but with a higher chance of deviating from the input text. Higher numbers will make the generated speech more accurate to the input text, ensuring that the words spoken align closely with the provided text. * @min 1 * @max 2 * @example null */ textGuidance?: number; /** Playht voice model/engine to use. */ model?: "PlayHT2.0" | "PlayHT2.0-turbo" | "Play3.0-mini" | "PlayDialog"; /** The language to use for the speech. */ language?: | "afrikaans" | "albanian" | "amharic" | "arabic" | "bengali" | "bulgarian" | "catalan" | "croatian" | "czech" | "danish" | "dutch" | "english" | "french" | "galician" | "german" | "greek" | "hebrew" | "hindi" | "hungarian" | "indonesian" | "italian" | "japanese" | "korean" | "malay" | "mandarin" | "polish" | "portuguese" | "russian" | "serbian" | "spanish" | "swedish" | "tagalog" | "thai" | "turkish" | "ukrainian" | "urdu" | "xhosa"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackRimeAIVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "rime-ai"; /** This is the provider-specific ID that will be used. */ voiceId: | "abbie" | "allison" | "ally" | "alona" | "amber" | "ana" | "antoine" | "armon" | "brenda" | "brittany" | "carol" | "colin" | "courtney" | "elena" | "elliot" | "eva" | "geoff" | "gerald" | "hank" | "helen" | "hera" | "jen" | "joe" | "joy" | "juan" | "kendra" | "kendrick" | "kenneth" | "kevin" | "kris" | "linda" | "madison" | "marge" | "marina" | "marissa" | "marta" | "maya" | "nicholas" | "nyles" | "phil" | "reba" | "rex" | "rick" | "ritu" | "rob" | "rodney" | "rohan" | "rosco" | "samantha" | "sandy" | "selena" | "seth" | "sharon" | "stan" | "tamra" | "tanya" | "tibur" | "tj" | "tyler" | "viv" | "yadira" | "marsh" | "bayou" | "creek" | "brook" | "flower" | "spore" | "glacier" | "gulch" | "alpine" | "cove" | "lagoon" | "tundra" | "steppe" | "mesa" | "grove" | "rainforest" | "moraine" | "wildflower" | "peak" | "boulder" | "gypsum" | "zest" | "luna" | "celeste" | "orion" | "ursa" | "astra" | "esther" | "estelle" | "andromeda" | string; /** * This is the model that will be used. Defaults to 'arcana' when not specified. * @example "arcana" */ model?: "arcana" | "mistv2" | "mist"; /** * This is the speed multiplier that will be used. * @min 0.1 * @example null */ speed?: number; /** * This is a flag that controls whether to add slight pauses using angle brackets. Example: "Hi. <200> I'd love to have a conversation with you." adds a 200ms pause between the first and second sentences. * @example false */ pauseBetweenBrackets?: boolean; /** * This is a flag that controls whether text inside brackets should be phonemized (converted to phonetic pronunciation) - Example: "{h'El.o} World" will pronounce "Hello" as expected. * @example false */ phonemizeBetweenBrackets?: boolean; /** * This is a flag that controls whether to optimize for reduced latency in streaming. https://docs.rime.ai/api-reference/endpoint/websockets#param-reduce-latency * @example false */ reduceLatency?: boolean; /** * This is a string that allows inline speed control using alpha notation. https://docs.rime.ai/api-reference/endpoint/websockets#param-inline-speed-alpha * @example null */ inlineSpeedAlpha?: string; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackSesameVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "sesame"; /** * Sesame Voice ID. This should be either a name (a built-in voice) or a UUID (a custom voice). * This is the provider-specific ID that will be used. */ voiceId: string; /** This is the model that will be used. */ model: "csm-1b"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackSmallestAIVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "smallest-ai"; /** This is the provider-specific ID that will be used. */ voiceId: | "emily" | "jasmine" | "arman" | "james" | "mithali" | "aravind" | "raj" | "diya" | "raman" | "ananya" | "isha" | "william" | "aarav" | "monika" | "niharika" | "deepika" | "raghav" | "kajal" | "radhika" | "mansi" | "nisha" | "saurabh" | "pooja" | "saina" | "sanya" | string; /** Smallest AI voice model to use. Defaults to 'lightning' when not specified. */ model?: "lightning"; /** * This is the speed multiplier that will be used. * @example null */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackTavusVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "tavus"; /** This is the provider-specific ID that will be used. */ voiceId: "r52da2535a" | string; /** This is the unique identifier for the persona that the replica will use in the conversation. */ personaId?: string; /** This is the url that will receive webhooks with updates regarding the conversation state. */ callbackUrl?: string; /** This is the name for the conversation. */ conversationName?: string; /** This is the context that will be appended to any context provided in the persona, if one is provided. */ conversationalContext?: string; /** This is the custom greeting that the replica will give once a participant joines the conversation. */ customGreeting?: string; /** These are optional properties used to customize the conversation. */ properties?: TavusConversationProperties; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackVapiVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "vapi"; /** The voices provided by Vapi */ voiceId: | "Elliot" | "Kylie" | "Rohan" | "Lily" | "Savannah" | "Hana" | "Neha" | "Cole" | "Harry" | "Paige" | "Spencer" | "Leah" | "Tara"; /** * This is the speed multiplier that will be used. * * @default 1 * @min 0.25 * @max 2 * @default 1 */ speed?: number; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackInworldVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "inworld"; /** * Inworld Voice ID * Available voices by language: * • en: Alex, Ashley, Craig, Deborah, Dennis, Edward, Elizabeth, Hades, Julia, Pixie, Mark, Olivia, Priya, Ronald, Sarah, Shaun, Theodore, Timothy, Wendy, Dominus * • zh: Yichen, Xiaoyin, Xinyi, Jing * • nl: Erik, Katrien, Lennart, Lore * • fr: Alain, Hélène, Mathieu, Étienne * • de: Johanna, Josef * • it: Gianni, Orietta * • ja: Asuka, Satoshi * • ko: Hyunwoo, Minji, Seojun, Yoona * • pl: Szymon, Wojciech * • pt: Heitor, Maitê * • es: Diego, Lupita, Miguel, Rafael * @maxLength 120 * @example "Alex" */ voiceId: | "Alex" | "Ashley" | "Craig" | "Deborah" | "Dennis" | "Edward" | "Elizabeth" | "Hades" | "Julia" | "Pixie" | "Mark" | "Olivia" | "Priya" | "Ronald" | "Sarah" | "Shaun" | "Theodore" | "Timothy" | "Wendy" | "Dominus" | "Yichen" | "Xiaoyin" | "Xinyi" | "Jing" | "Erik" | "Katrien" | "Lennart" | "Lore" | "Alain" | "Hélène" | "Mathieu" | "Étienne" | "Johanna" | "Josef" | "Gianni" | "Orietta" | "Asuka" | "Satoshi" | "Hyunwoo" | "Minji" | "Seojun" | "Yoona" | "Szymon" | "Wojciech" | "Heitor" | "Maitê" | "Diego" | "Lupita" | "Miguel" | "Rafael"; /** * This is the model that will be used. * @default "inworld-tts-1" */ model?: "inworld-tts-1"; /** * Language code for Inworld TTS synthesis * @default "en" */ languageCode?: | "en" | "zh" | "ko" | "nl" | "fr" | "es" | "ja" | "de" | "it" | "pl" | "pt"; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface FallbackMinimaxVoice { /** * This is the flag to toggle voice caching for the assistant. * @default true * @example true */ cachingEnabled?: boolean; /** This is the voice provider that will be used. */ provider: "minimax"; /** * This is the Minimax Voice ID * This is the provider-specific ID that will be used. Use a voice from MINIMAX_PREDEFINED_VOICES or a custom cloned voice ID. */ voiceId: string; /** * This is the model that will be used. Options are 'speech-02-hd' and 'speech-02-turbo'. * speech-02-hd is optimized for high-fidelity applications like voiceovers and audiobooks. * speech-02-turbo is designed for real-time applications with low latency. * * @default "speech-02-turbo" * @default "speech-02-turbo" * @example "speech-02-turbo" */ model?: "speech-02-hd" | "speech-02-turbo" | "speech-2.5-turbo-preview"; /** * The emotion to use for the voice. If not provided, will use auto-detect mode. * Options include: 'happy', 'sad', 'angry', 'fearful', 'surprised', 'disgusted', 'neutral' * @example "happy" */ emotion?: string; /** * Voice pitch adjustment. Range from -12 to 12 semitones. * @default 0 * @min -12 * @max 12 * @default 0 * @example 0 */ pitch?: number; /** * Voice speed adjustment. Range from 0.5 to 2.0. * @default 1.0 * @min 0.5 * @max 2 * @default 1 * @example 1 */ speed?: number; /** * Voice volume adjustment. Range from 0.5 to 2.0. * @default 1.0 * @min 0.5 * @max 2 * @default 1 * @example 1 */ volume?: number; /** * The region for Minimax API. Defaults to "worldwide". * @default "worldwide" */ region?: "worldwide" | "china"; /** Language hint for MiniMax T2A. Example: yue (Cantonese), zh (Chinese), en (English). */ languageBoost?: | "Chinese" | "Chinese,Yue" | "English" | "Arabic" | "Russian" | "Spanish" | "French" | "Portuguese" | "German" | "Turkish" | "Dutch" | "Ukrainian" | "Vietnamese" | "Indonesian" | "Japanese" | "Italian" | "Korean" | "Thai" | "Polish" | "Romanian" | "Greek" | "Czech" | "Finnish" | "Hindi" | "Bulgarian" | "Danish" | "Hebrew" | "Malay" | "Persian" | "Slovak" | "Swedish" | "Croatian" | "Filipino" | "Hungarian" | "Norwegian" | "Slovenian" | "Catalan" | "Nynorsk" | "Tamil" | "Afrikaans" | "auto"; /** * Enable MiniMax text normalization to improve number reading and formatting. * @default true */ textNormalizationEnabled?: boolean; /** This is the plan for chunking the model output before it is sent to the voice provider. */ chunkPlan?: ChunkPlan; } export interface TransportConfigurationTwilio { provider: "twilio"; /** * The integer number of seconds that we should allow the phone to ring before assuming there is no answer. * The default is `60` seconds and the maximum is `600` seconds. * For some call flows, we will add a 5-second buffer to the timeout value you provide. * For this reason, a timeout value of 10 seconds could result in an actual timeout closer to 15 seconds. * You can set this to a short time, such as `15` seconds, to hang up before reaching an answering machine or voicemail. * * @default 60 * @min 1 * @max 600 * @example 60 */ timeout?: number; /** * Whether to record the call. * Can be `true` to record the phone call, or `false` to not. * The default is `false`. * * @default false * @example false */ record?: boolean; /** * The number of channels in the final recording. * Can be: `mono` or `dual`. * The default is `mono`. * `mono` records both legs of the call in a single channel of the recording file. * `dual` records each leg to a separate channel of the recording file. * The first channel of a dual-channel recording contains the parent call and the second channel contains the child call. * * @default 'mono' * @example "mono" */ recordingChannels?: "mono" | "dual"; } export interface CreateAnthropicCredentialDTO { provider: "anthropic"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateAnyscaleCredentialDTO { provider: "anyscale"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateAssemblyAICredentialDTO { provider: "assembly-ai"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface AzureBlobStorageBucketPlan { /** This is the blob storage connection string for the Azure resource. */ connectionString: string; /** This is the container name for the Azure blob storage. */ containerName: string; /** * This is the path where call artifacts will be stored. * * Usage: * - To store call artifacts in a specific folder, set this to the full path. Eg. "/folder-name1/folder-name2". * - To store call artifacts in the root of the bucket, leave this blank. * * @default "/" */ path?: string; } export interface CreateAzureCredentialDTO { provider: "azure"; /** * This is the service being used in Azure. * @default "speech" */ service: "speech" | "blob_storage"; /** This is the region of the Azure resource. */ region?: | "australia" | "canadaeast" | "canadacentral" | "eastus2" | "eastus" | "france" | "india" | "japaneast" | "japanwest" | "uaenorth" | "northcentralus" | "norway" | "southcentralus" | "swedencentral" | "switzerland" | "uk" | "westus" | "westus3"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** This is the bucket plan that can be provided to store call artifacts in Azure Blob Storage. */ bucketPlan?: AzureBlobStorageBucketPlan; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateAzureOpenAICredentialDTO { provider: "azure-openai"; region: | "australia" | "canadaeast" | "canadacentral" | "eastus2" | "eastus" | "france" | "india" | "japaneast" | "japanwest" | "uaenorth" | "northcentralus" | "norway" | "southcentralus" | "swedencentral" | "switzerland" | "uk" | "westus" | "westus3"; /** @example ["gpt-4-0125-preview","gpt-4-0613"] */ models: | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gpt-4.1-2025-04-14" | "gpt-4.1-mini-2025-04-14" | "gpt-4.1-nano-2025-04-14" | "gpt-4o-2024-11-20" | "gpt-4o-2024-08-06" | "gpt-4o-2024-05-13" | "gpt-4o-mini-2024-07-18" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" | "gpt-4-1106-preview" | "gpt-4-0613" | "gpt-35-turbo-0125" | "gpt-35-turbo-1106"; /** * This is not returned in the API. * @maxLength 10000 */ openAIKey: string; /** This is not returned in the API. */ ocpApimSubscriptionKey?: string; /** @maxLength 10000 */ openAIEndpoint: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface SipTrunkGateway { /** This is the address of the gateway. It can be an IPv4 address like 1.1.1.1 or a fully qualified domain name like my-sip-trunk.pstn.twilio.com. */ ip: string; /** * This is the port number of the gateway. Default is 5060. * * @default 5060 * @min 1 * @max 65535 */ port?: number; /** * This is the netmask of the gateway. Defaults to 32. * * @default 32 * @min 24 * @max 32 */ netmask?: number; /** * This is whether inbound calls are allowed from this gateway. Default is true. * * @default true */ inboundEnabled?: boolean; /** * This is whether outbound calls should be sent to this gateway. Default is true. * * Note, if netmask is less than 32, it doesn't affect the outbound IPs that are tried. 1 attempt is made to `ip:port`. * * @default true */ outboundEnabled?: boolean; /** * This is the protocol to use for SIP signaling outbound calls. Default is udp. * * @default udp */ outboundProtocol?: "tls/srtp" | "tcp" | "tls" | "udp"; /** * This is whether to send options ping to the gateway. This can be used to check if the gateway is reachable. Default is false. * * This is useful for high availability setups where you want to check if the gateway is reachable before routing calls to it. Note, if no gateway for a trunk is reachable, outbound calls will be rejected. * * @default false */ optionsPingEnabled?: boolean; } export interface SipTrunkOutboundSipRegisterPlan { domain?: string; username?: string; realm?: string; } export interface SipTrunkOutboundAuthenticationPlan { /** This is not returned in the API. */ authPassword?: string; authUsername?: string; /** This can be used to configure if SIP register is required by the SIP trunk. If not provided, no SIP registration will be attempted. */ sipRegisterPlan?: SipTrunkOutboundSipRegisterPlan; } export type SbcConfiguration = object; export interface CreateByoSipTrunkCredentialDTO { /** This can be used to bring your own SIP trunks or to connect to a Carrier. */ provider?: "byo-sip-trunk"; /** This is the list of SIP trunk's gateways. */ gateways: SipTrunkGateway[]; /** This can be used to configure the outbound authentication if required by the SIP trunk. */ outboundAuthenticationPlan?: SipTrunkOutboundAuthenticationPlan; /** * This ensures the outbound origination attempts have a leading plus. Defaults to false to match conventional telecom behavior. * * Usage: * - Vonage/Twilio requires leading plus for all outbound calls. Set this to true. * * @default false */ outboundLeadingPlusEnabled?: boolean; /** * This can be used to configure the tech prefix on outbound calls. This is an advanced property. * @maxLength 10000 */ techPrefix?: string; /** * This can be used to enable the SIP diversion header for authenticating the calling number if the SIP trunk supports it. This is an advanced property. * @maxLength 10000 */ sipDiversionHeader?: string; /** This is an advanced configuration for enterprise deployments. This uses the onprem SBC to trunk into the SIP trunk's `gateways`, rather than the managed SBC provided by Vapi. */ sbcConfiguration?: SbcConfiguration; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateCartesiaCredentialDTO { provider: "cartesia"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CloudflareR2BucketPlan { /** Cloudflare R2 Access key ID. */ accessKeyId?: string; /** Cloudflare R2 access key secret. This is not returned in the API. */ secretAccessKey?: string; /** Cloudflare R2 base url. */ url?: string; /** This is the name of the bucket. */ name: string; /** * This is the path where call artifacts will be stored. * * Usage: * - To store call artifacts in a specific folder, set this to the full path. Eg. "/folder-name1/folder-name2". * - To store call artifacts in the root of the bucket, leave this blank. * * @default "/" */ path?: string; } export interface CreateCloudflareCredentialDTO { /** Credential provider. Only allowed value is cloudflare */ provider: "cloudflare"; /** Cloudflare Account Id. */ accountId?: string; /** Cloudflare API Key / Token. */ apiKey?: string; /** Cloudflare Account Email. */ accountEmail?: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** This is the bucket plan that can be provided to store call artifacts in R2 */ bucketPlan?: CloudflareR2BucketPlan; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface OAuth2AuthenticationPlan { type: "oauth2"; /** This is the OAuth2 URL. */ url: string; /** This is the OAuth2 client ID. */ clientId: string; /** This is the OAuth2 client secret. */ clientSecret: string; /** * This is the scope of the OAuth2 token. * @maxLength 1000 */ scope?: string; } export interface CreateCustomLLMCredentialDTO { provider: "custom-llm"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the authentication plan. Currently supports OAuth2 RFC 6749. To use Bearer authentication, use apiKey */ authenticationPlan?: OAuth2AuthenticationPlan; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateDeepgramCredentialDTO { provider: "deepgram"; /** This is not returned in the API. */ apiKey: string; /** This can be used to point to an onprem Deepgram instance. Defaults to api.deepgram.com. */ apiUrl?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateDeepInfraCredentialDTO { provider: "deepinfra"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateDeepSeekCredentialDTO { provider: "deep-seek"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateElevenLabsCredentialDTO { provider: "11labs"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GcpKey { /** This is the type of the key. Most likely, this is "service_account". */ type: string; /** This is the ID of the Google Cloud project associated with this key. */ projectId: string; /** This is the unique identifier for the private key. */ privateKeyId: string; /** * This is the private key in PEM format. * * Note: This is not returned in the API. */ privateKey: string; /** This is the email address associated with the service account. */ clientEmail: string; /** This is the unique identifier for the client. */ clientId: string; /** This is the URI for the auth provider's authorization endpoint. */ authUri: string; /** This is the URI for the auth provider's token endpoint. */ tokenUri: string; /** This is the URL of the public x509 certificate for the auth provider. */ authProviderX509CertUrl: string; /** This is the URL of the public x509 certificate for the client. */ clientX509CertUrl: string; /** This is the domain associated with the universe this service account belongs to. */ universeDomain: string; } export interface BucketPlan { /** This is the name of the bucket. */ name: string; /** * This is the region of the bucket. * * Usage: * - If `credential.type` is `aws`, then this is required. * - If `credential.type` is `gcp`, then this is optional since GCP allows buckets to be accessed without a region but region is required for data residency requirements. Read here: https://cloud.google.com/storage/docs/request-endpoints * * This overrides the `credential.region` field if it is provided. */ region?: string; /** * This is the path where call artifacts will be stored. * * Usage: * - To store call artifacts in a specific folder, set this to the full path. Eg. "/folder-name1/folder-name2". * - To store call artifacts in the root of the bucket, leave this blank. * * @default "/" */ path?: string; /** * This is the HMAC access key offered by GCP for interoperability with S3 clients. Here is the guide on how to create: https://cloud.google.com/storage/docs/authentication/managing-hmackeys#console * * Usage: * - If `credential.type` is `gcp`, then this is required. * - If `credential.type` is `aws`, then this is not required since credential.awsAccessKeyId is used instead. */ hmacAccessKey?: string; /** * This is the secret for the HMAC access key. Here is the guide on how to create: https://cloud.google.com/storage/docs/authentication/managing-hmackeys#console * * Usage: * - If `credential.type` is `gcp`, then this is required. * - If `credential.type` is `aws`, then this is not required since credential.awsSecretAccessKey is used instead. * * Note: This is not returned in the API. */ hmacSecret?: string; } export interface CreateGcpCredentialDTO { provider: "gcp"; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** * This is the GCP key. This is the JSON that can be generated in the Google Cloud Console at https://console.cloud.google.com/iam-admin/serviceaccounts/details//keys. * * The schema is identical to the JSON that GCP outputs. */ gcpKey: GcpKey; /** * This is the region of the GCP resource. * @maxLength 40 */ region?: string; bucketPlan?: BucketPlan; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateGladiaCredentialDTO { provider: "gladia"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateGoHighLevelCredentialDTO { provider: "gohighlevel"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateGroqCredentialDTO { provider: "groq"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateLangfuseCredentialDTO { provider: "langfuse"; /** The public key for Langfuse project. Eg: pk-lf-... */ publicKey: string; /** The secret key for Langfuse project. Eg: sk-lf-... .This is not returned in the API. */ apiKey: string; /** The host URL for Langfuse project. Eg: https://cloud.langfuse.com */ apiUrl: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateLmntCredentialDTO { provider: "lmnt"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateMakeCredentialDTO { provider: "make"; /** Team ID */ teamId: string; /** Region of your application. For example: eu1, eu2, us1, us2 */ region: string; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateOpenAICredentialDTO { provider: "openai"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateOpenRouterCredentialDTO { provider: "openrouter"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreatePerplexityAICredentialDTO { provider: "perplexity-ai"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreatePlayHTCredentialDTO { provider: "playht"; /** This is not returned in the API. */ apiKey: string; userId: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateRimeAICredentialDTO { provider: "rime-ai"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateRunpodCredentialDTO { provider: "runpod"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateS3CredentialDTO { /** Credential provider. Only allowed value is s3 */ provider: "s3"; /** AWS access key ID. */ awsAccessKeyId: string; /** AWS access key secret. This is not returned in the API. */ awsSecretAccessKey: string; /** AWS region in which the S3 bucket is located. */ region: string; /** AWS S3 bucket name. */ s3BucketName: string; /** The path prefix for the uploaded recording. Ex. "recordings/" */ s3PathPrefix: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface SupabaseBucketPlan { /** * This is the S3 Region. It should look like us-east-1 * It should be one of the supabase regions defined in the SUPABASE_REGION enum * Check https://supabase.com/docs/guides/platform/regions for up to date regions */ region: | "us-west-1" | "us-east-1" | "us-east-2" | "ca-central-1" | "eu-west-1" | "eu-west-2" | "eu-west-3" | "eu-central-1" | "eu-central-2" | "eu-north-1" | "ap-south-1" | "ap-southeast-1" | "ap-northeast-1" | "ap-northeast-2" | "ap-southeast-2" | "sa-east-1"; /** * This is the S3 compatible URL for Supabase S3 * This should look like https://.supabase.co/storage/v1/s3 */ url: string; /** * This is the Supabase S3 Access Key ID. * The user creates this in the Supabase project Storage settings */ accessKeyId: string; /** * This is the Supabase S3 Secret Access Key. * The user creates this in the Supabase project Storage settings along with the access key id */ secretAccessKey: string; /** * This is the Supabase S3 Bucket Name. * The user must create this in Supabase under Storage > Buckets * A bucket that does not exist will not be checked now, but file uploads will fail */ name: string; /** * This is the Supabase S3 Bucket Folder Path. * The user can create this in Supabase under Storage > Buckets * A path that does not exist will not be checked now, but file uploads will fail * A Path is like a folder in the bucket * Eg. If the bucket is called "my-bucket" and the path is "my-folder", the full path is "my-bucket/my-folder" */ path?: string; } export interface CreateSupabaseCredentialDTO { /** This is for supabase storage. */ provider: "supabase"; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; bucketPlan?: SupabaseBucketPlan; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateSmallestAICredentialDTO { provider: "smallest-ai"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateTavusCredentialDTO { provider: "tavus"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateTogetherAICredentialDTO { provider: "together-ai"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateTwilioCredentialDTO { provider: "twilio"; /** This is not returned in the API. */ authToken?: string; /** This is not returned in the API. */ apiKey?: string; /** This is not returned in the API. */ apiSecret?: string; accountSid: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateVonageCredentialDTO { provider: "vonage"; /** This is not returned in the API. */ apiSecret: string; apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateWebhookCredentialDTO { provider: "webhook"; /** This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication. */ authenticationPlan: | ({ type: "oauth2"; } & OAuth2AuthenticationPlan) | ({ type: "hmac"; } & HMACAuthenticationPlan) | ({ type: "bearer"; } & BearerAuthenticationPlan); /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateXAiCredentialDTO { /** This is the api key for Grok in XAi's console. Get it from here: https://console.x.ai */ provider: "xai"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateGoogleCalendarOAuth2ClientCredentialDTO { provider: "google.calendar.oauth2-client"; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateGoogleCalendarOAuth2AuthorizationCredentialDTO { provider: "google.calendar.oauth2-authorization"; /** The authorization ID for the OAuth2 authorization */ authorizationId: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateGoogleSheetsOAuth2AuthorizationCredentialDTO { provider: "google.sheets.oauth2-authorization"; /** The authorization ID for the OAuth2 authorization */ authorizationId: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateSlackOAuth2AuthorizationCredentialDTO { provider: "slack.oauth2-authorization"; /** The authorization ID for the OAuth2 authorization */ authorizationId: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateMinimaxCredentialDTO { provider: "minimax"; /** This is not returned in the API. */ apiKey: string; /** This is the Minimax Group ID. */ groupId: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface SQLInjectionSecurityFilter { /** The type of security threat to filter. */ type: "sql-injection"; } export interface XSSSecurityFilter { /** The type of security threat to filter. */ type: "xss"; } export interface SSRFSecurityFilter { /** The type of security threat to filter. */ type: "ssrf"; } export interface RCESecurityFilter { /** The type of security threat to filter. */ type: "rce"; } export interface PromptInjectionSecurityFilter { /** The type of security threat to filter. */ type: "prompt-injection"; } export interface RegexSecurityFilter { /** The type of security threat to filter. */ type: "regex"; /** * The regex pattern to filter. * @example "badword1|badword2" */ regex: string; } export interface AssistantOverrides { /** These are the options for the assistant's transcriber. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** These are the options for the assistant's LLM. */ model?: | AnthropicModel | AnyscaleModel | CerebrasModel | CustomLLMModel | DeepInfraModel | DeepSeekModel | GoogleModel | GroqModel | InflectionAIModel | OpenAIModel | OpenRouterModel | PerplexityAIModel | TogetherAIModel | XaiModel; /** These are the options for the assistant's voice. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.). * * If unspecified, assistant will wait for user to speak and use the model to respond once they speak. * @example "Hello! How can I help you today?" */ firstMessage?: string; /** @default false */ firstMessageInterruptionsEnabled?: boolean; /** * This is the mode for the first message. Default is 'assistant-speaks-first'. * * Use: * - 'assistant-speaks-first' to have the assistant speak first. * - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. * - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points). * * @default 'assistant-speaks-first' * @example "assistant-speaks-first" */ firstMessageMode?: | "assistant-speaks-first" | "assistant-speaks-first-with-model-generated-message" | "assistant-waits-for-user"; /** * These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool]. * By default, voicemail detection is disabled. */ voicemailDetection?: | "off" | GoogleVoicemailDetectionPlan | OpenAIVoicemailDetectionPlan | TwilioVoicemailDetectionPlan | VapiVoicemailDetectionPlan; /** * These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started,assistant.started. You can check the shape of the messages in ClientMessage schema. * @example ["conversation-update","function-call","hang","model-output","speech-update","status-update","transfer-update","transcript","tool-calls","user-interrupted","voice-input","workflow.node.started","assistant.started"] */ clientMessages?: | "conversation-update" | "function-call" | "function-call-result" | "hang" | "language-changed" | "metadata" | "model-output" | "speech-update" | "status-update" | "transcript" | "tool-calls" | "tool-calls-result" | "tool.completed" | "transfer-update" | "user-interrupted" | "voice-input" | "workflow.node.started" | "assistant.started"; /** * These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted,assistant.started. You can check the shape of the messages in ServerMessage schema. * @example ["conversation-update","end-of-call-report","function-call","hang","speech-update","status-update","tool-calls","transfer-destination-request","handoff-destination-request","user-interrupted","assistant.started"] */ serverMessages?: | "assistant.started" | "conversation-update" | "end-of-call-report" | "function-call" | "hang" | "language-changed" | "language-change-detected" | "model-output" | "phone-call-control" | "speech-update" | "status-update" | "transcript" | "transcript[transcriptType='final']" | "tool-calls" | "transfer-destination-request" | "handoff-destination-request" | "transfer-update" | "user-interrupted" | "voice-input" | "chat.created" | "chat.deleted" | "session.created" | "session.updated" | "session.deleted" | "call.deleted" | "call.delete.failed"; /** * This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended. * * @default 600 (10 minutes) * @min 10 * @max 43200 * @example 600 */ maxDurationSeconds?: number; /** * This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** * This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech. * * Default `false` while in beta. * * @default false * @example false */ modelOutputInMessagesEnabled?: boolean; /** These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used. */ transportConfigurations?: TransportConfigurationTwilio[]; /** * This is the plan for observability of assistant's calls. * * Currently, only Langfuse is supported. */ observabilityPlan?: LangfuseObservabilityPlan; /** These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. */ credentials?: ( | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO) )[]; /** This is a set of actions that will be performed on certain events. */ hooks?: ( | CallHookCallEnding | CallHookAssistantSpeechInterrupted | CallHookCustomerSpeechInterrupted | CallHookCustomerSpeechTimeout )[]; "tools:append"?: ( | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO )[]; /** * These are values that will be used to replace the template variables in the assistant messages and other text-based fields. * This uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html * * So for example, `{{ name }}` will be replaced with the value of `name` in `variableValues`. * `{{"now" | date: "%b %d, %Y, %I:%M %p", "America/New_York"}}` will be replaced with the current date and time in New York. * Some VAPI reserved defaults: * - *customer* - the customer object */ variableValues?: object; /** * This is the name of the assistant. * * This is required when you want to transfer between assistants in a call. * @maxLength 40 */ name?: string; /** * This is the message that the assistant will say if the call is forwarded to voicemail. * * If unspecified, it will hang up. * @maxLength 1000 */ voicemailMessage?: string; /** * This is the message that the assistant will say if it ends the call. * * If unspecified, it will hang up without saying anything. * @maxLength 1000 */ endCallMessage?: string; /** This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive. */ endCallPhrases?: string[]; compliancePlan?: CompliancePlan; /** This is for metadata you want to store on the assistant. */ metadata?: object; /** * This enables filtering of noise and background speech while the user is talking. * * Features: * - Smart denoising using Krisp * - Fourier denoising * * Smart denoising can be combined with or used independently of Fourier denoising. * * Order of precedence: * - Smart denoising * - Fourier denoising */ backgroundSpeechDenoisingPlan?: BackgroundSpeechDenoisingPlan; /** This is the plan for analysis of assistant's calls. Stored in `call.analysis`. */ analysisPlan?: AnalysisPlan; /** This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`. */ artifactPlan?: ArtifactPlan; /** * This is the plan for when the assistant should start talking. * * You should configure this if you're running into these issues: * - The assistant is too slow to start talking after the customer is done speaking. * - The assistant is too fast to start talking after the customer is done speaking. * - The assistant is so fast that it's actually interrupting the customer. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the plan for when assistant should stop talking on customer interruption. * * You should configure this if you're running into these issues: * - The assistant is too slow to recognize customer's interruption. * - The assistant is too fast to recognize customer's interruption. * - The assistant is getting interrupted by phrases that are just acknowledgments. * - The assistant is getting interrupted by background noises. * - The assistant is not properly stopping -- it starts talking right after getting interrupted. */ stopSpeakingPlan?: StopSpeakingPlan; /** * This is the plan for real-time monitoring of the assistant's calls. * * Usage: * - To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`. * - To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`. */ monitorPlan?: MonitorPlan; /** These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this. */ credentialIds?: string[]; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server.url * 2. phoneNumber.serverUrl * 3. org.serverUrl */ server?: Server; keypadInputPlan?: KeypadInputPlan; } export interface CreateAssistantDTO { /** These are the options for the assistant's transcriber. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** These are the options for the assistant's LLM. */ model?: | AnthropicModel | AnyscaleModel | CerebrasModel | CustomLLMModel | DeepInfraModel | DeepSeekModel | GoogleModel | GroqModel | InflectionAIModel | OpenAIModel | OpenRouterModel | PerplexityAIModel | TogetherAIModel | XaiModel; /** These are the options for the assistant's voice. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.). * * If unspecified, assistant will wait for user to speak and use the model to respond once they speak. * @example "Hello! How can I help you today?" */ firstMessage?: string; /** @default false */ firstMessageInterruptionsEnabled?: boolean; /** * This is the mode for the first message. Default is 'assistant-speaks-first'. * * Use: * - 'assistant-speaks-first' to have the assistant speak first. * - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. * - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points). * * @default 'assistant-speaks-first' * @example "assistant-speaks-first" */ firstMessageMode?: | "assistant-speaks-first" | "assistant-speaks-first-with-model-generated-message" | "assistant-waits-for-user"; /** * These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool]. * By default, voicemail detection is disabled. */ voicemailDetection?: | "off" | GoogleVoicemailDetectionPlan | OpenAIVoicemailDetectionPlan | TwilioVoicemailDetectionPlan | VapiVoicemailDetectionPlan; /** * These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started,assistant.started. You can check the shape of the messages in ClientMessage schema. * @example ["conversation-update","function-call","hang","model-output","speech-update","status-update","transfer-update","transcript","tool-calls","user-interrupted","voice-input","workflow.node.started","assistant.started"] */ clientMessages?: | "conversation-update" | "function-call" | "function-call-result" | "hang" | "language-changed" | "metadata" | "model-output" | "speech-update" | "status-update" | "transcript" | "tool-calls" | "tool-calls-result" | "tool.completed" | "transfer-update" | "user-interrupted" | "voice-input" | "workflow.node.started" | "assistant.started"; /** * These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted,assistant.started. You can check the shape of the messages in ServerMessage schema. * @example ["conversation-update","end-of-call-report","function-call","hang","speech-update","status-update","tool-calls","transfer-destination-request","handoff-destination-request","user-interrupted","assistant.started"] */ serverMessages?: | "assistant.started" | "conversation-update" | "end-of-call-report" | "function-call" | "hang" | "language-changed" | "language-change-detected" | "model-output" | "phone-call-control" | "speech-update" | "status-update" | "transcript" | "transcript[transcriptType='final']" | "tool-calls" | "transfer-destination-request" | "handoff-destination-request" | "transfer-update" | "user-interrupted" | "voice-input" | "chat.created" | "chat.deleted" | "session.created" | "session.updated" | "session.deleted" | "call.deleted" | "call.delete.failed"; /** * This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended. * * @default 600 (10 minutes) * @min 10 * @max 43200 * @example 600 */ maxDurationSeconds?: number; /** * This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** * This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech. * * Default `false` while in beta. * * @default false * @example false */ modelOutputInMessagesEnabled?: boolean; /** These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used. */ transportConfigurations?: TransportConfigurationTwilio[]; /** * This is the plan for observability of assistant's calls. * * Currently, only Langfuse is supported. */ observabilityPlan?: LangfuseObservabilityPlan; /** These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. */ credentials?: ( | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO) )[]; /** This is a set of actions that will be performed on certain events. */ hooks?: ( | CallHookCallEnding | CallHookAssistantSpeechInterrupted | CallHookCustomerSpeechInterrupted | CallHookCustomerSpeechTimeout )[]; /** * This is the name of the assistant. * * This is required when you want to transfer between assistants in a call. * @maxLength 40 */ name?: string; /** * This is the message that the assistant will say if the call is forwarded to voicemail. * * If unspecified, it will hang up. * @maxLength 1000 */ voicemailMessage?: string; /** * This is the message that the assistant will say if it ends the call. * * If unspecified, it will hang up without saying anything. * @maxLength 1000 */ endCallMessage?: string; /** This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive. */ endCallPhrases?: string[]; compliancePlan?: CompliancePlan; /** This is for metadata you want to store on the assistant. */ metadata?: object; /** * This enables filtering of noise and background speech while the user is talking. * * Features: * - Smart denoising using Krisp * - Fourier denoising * * Smart denoising can be combined with or used independently of Fourier denoising. * * Order of precedence: * - Smart denoising * - Fourier denoising */ backgroundSpeechDenoisingPlan?: BackgroundSpeechDenoisingPlan; /** This is the plan for analysis of assistant's calls. Stored in `call.analysis`. */ analysisPlan?: AnalysisPlan; /** This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`. */ artifactPlan?: ArtifactPlan; /** * This is the plan for when the assistant should start talking. * * You should configure this if you're running into these issues: * - The assistant is too slow to start talking after the customer is done speaking. * - The assistant is too fast to start talking after the customer is done speaking. * - The assistant is so fast that it's actually interrupting the customer. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the plan for when assistant should stop talking on customer interruption. * * You should configure this if you're running into these issues: * - The assistant is too slow to recognize customer's interruption. * - The assistant is too fast to recognize customer's interruption. * - The assistant is getting interrupted by phrases that are just acknowledgments. * - The assistant is getting interrupted by background noises. * - The assistant is not properly stopping -- it starts talking right after getting interrupted. */ stopSpeakingPlan?: StopSpeakingPlan; /** * This is the plan for real-time monitoring of the assistant's calls. * * Usage: * - To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`. * - To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`. */ monitorPlan?: MonitorPlan; /** These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this. */ credentialIds?: string[]; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server.url * 2. phoneNumber.serverUrl * 3. org.serverUrl */ server?: Server; keypadInputPlan?: KeypadInputPlan; } export interface Assistant { /** These are the options for the assistant's transcriber. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** These are the options for the assistant's LLM. */ model?: | AnthropicModel | AnyscaleModel | CerebrasModel | CustomLLMModel | DeepInfraModel | DeepSeekModel | GoogleModel | GroqModel | InflectionAIModel | OpenAIModel | OpenRouterModel | PerplexityAIModel | TogetherAIModel | XaiModel; /** These are the options for the assistant's voice. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.). * * If unspecified, assistant will wait for user to speak and use the model to respond once they speak. * @example "Hello! How can I help you today?" */ firstMessage?: string; /** @default false */ firstMessageInterruptionsEnabled?: boolean; /** * This is the mode for the first message. Default is 'assistant-speaks-first'. * * Use: * - 'assistant-speaks-first' to have the assistant speak first. * - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. * - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points). * * @default 'assistant-speaks-first' * @example "assistant-speaks-first" */ firstMessageMode?: | "assistant-speaks-first" | "assistant-speaks-first-with-model-generated-message" | "assistant-waits-for-user"; /** * These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool]. * By default, voicemail detection is disabled. */ voicemailDetection?: | "off" | GoogleVoicemailDetectionPlan | OpenAIVoicemailDetectionPlan | TwilioVoicemailDetectionPlan | VapiVoicemailDetectionPlan; /** * These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started,assistant.started. You can check the shape of the messages in ClientMessage schema. * @example ["conversation-update","function-call","hang","model-output","speech-update","status-update","transfer-update","transcript","tool-calls","user-interrupted","voice-input","workflow.node.started","assistant.started"] */ clientMessages?: | "conversation-update" | "function-call" | "function-call-result" | "hang" | "language-changed" | "metadata" | "model-output" | "speech-update" | "status-update" | "transcript" | "tool-calls" | "tool-calls-result" | "tool.completed" | "transfer-update" | "user-interrupted" | "voice-input" | "workflow.node.started" | "assistant.started"; /** * These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted,assistant.started. You can check the shape of the messages in ServerMessage schema. * @example ["conversation-update","end-of-call-report","function-call","hang","speech-update","status-update","tool-calls","transfer-destination-request","handoff-destination-request","user-interrupted","assistant.started"] */ serverMessages?: | "assistant.started" | "conversation-update" | "end-of-call-report" | "function-call" | "hang" | "language-changed" | "language-change-detected" | "model-output" | "phone-call-control" | "speech-update" | "status-update" | "transcript" | "transcript[transcriptType='final']" | "tool-calls" | "transfer-destination-request" | "handoff-destination-request" | "transfer-update" | "user-interrupted" | "voice-input" | "chat.created" | "chat.deleted" | "session.created" | "session.updated" | "session.deleted" | "call.deleted" | "call.delete.failed"; /** * This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended. * * @default 600 (10 minutes) * @min 10 * @max 43200 * @example 600 */ maxDurationSeconds?: number; /** * This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** * This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech. * * Default `false` while in beta. * * @default false * @example false */ modelOutputInMessagesEnabled?: boolean; /** These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used. */ transportConfigurations?: TransportConfigurationTwilio[]; /** * This is the plan for observability of assistant's calls. * * Currently, only Langfuse is supported. */ observabilityPlan?: LangfuseObservabilityPlan; /** These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. */ credentials?: ( | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO) )[]; /** This is a set of actions that will be performed on certain events. */ hooks?: ( | CallHookCallEnding | CallHookAssistantSpeechInterrupted | CallHookCustomerSpeechInterrupted | CallHookCustomerSpeechTimeout )[]; /** * This is the name of the assistant. * * This is required when you want to transfer between assistants in a call. * @maxLength 40 */ name?: string; /** * This is the message that the assistant will say if the call is forwarded to voicemail. * * If unspecified, it will hang up. * @maxLength 1000 */ voicemailMessage?: string; /** * This is the message that the assistant will say if it ends the call. * * If unspecified, it will hang up without saying anything. * @maxLength 1000 */ endCallMessage?: string; /** This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive. */ endCallPhrases?: string[]; compliancePlan?: CompliancePlan; /** This is for metadata you want to store on the assistant. */ metadata?: object; /** * This enables filtering of noise and background speech while the user is talking. * * Features: * - Smart denoising using Krisp * - Fourier denoising * * Smart denoising can be combined with or used independently of Fourier denoising. * * Order of precedence: * - Smart denoising * - Fourier denoising */ backgroundSpeechDenoisingPlan?: BackgroundSpeechDenoisingPlan; /** This is the plan for analysis of assistant's calls. Stored in `call.analysis`. */ analysisPlan?: AnalysisPlan; /** This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`. */ artifactPlan?: ArtifactPlan; /** * This is the plan for when the assistant should start talking. * * You should configure this if you're running into these issues: * - The assistant is too slow to start talking after the customer is done speaking. * - The assistant is too fast to start talking after the customer is done speaking. * - The assistant is so fast that it's actually interrupting the customer. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the plan for when assistant should stop talking on customer interruption. * * You should configure this if you're running into these issues: * - The assistant is too slow to recognize customer's interruption. * - The assistant is too fast to recognize customer's interruption. * - The assistant is getting interrupted by phrases that are just acknowledgments. * - The assistant is getting interrupted by background noises. * - The assistant is not properly stopping -- it starts talking right after getting interrupted. */ stopSpeakingPlan?: StopSpeakingPlan; /** * This is the plan for real-time monitoring of the assistant's calls. * * Usage: * - To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`. * - To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`. */ monitorPlan?: MonitorPlan; /** These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this. */ credentialIds?: string[]; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server.url * 2. phoneNumber.serverUrl * 3. org.serverUrl */ server?: Server; keypadInputPlan?: KeypadInputPlan; /** This is the unique identifier for the assistant. */ id: string; /** This is the unique identifier for the org that this assistant belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the assistant was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; } export interface PaginationMeta { itemsPerPage: number; totalItems: number; currentPage: number; itemsBeyondRetention?: boolean; /** @format date-time */ createdAtLe?: string; /** @format date-time */ createdAtGe?: string; } export interface AssistantPaginatedResponse { results: Assistant[]; metadata: PaginationMeta; } export interface AssistantVersionPaginatedResponse { results: any[]; metadata: PaginationMeta; nextPageState?: string; } export interface UpdateAssistantDTO { /** These are the options for the assistant's transcriber. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** These are the options for the assistant's LLM. */ model?: | AnthropicModel | AnyscaleModel | CerebrasModel | CustomLLMModel | DeepInfraModel | DeepSeekModel | GoogleModel | GroqModel | InflectionAIModel | OpenAIModel | OpenRouterModel | PerplexityAIModel | TogetherAIModel | XaiModel; /** These are the options for the assistant's voice. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.). * * If unspecified, assistant will wait for user to speak and use the model to respond once they speak. * @example "Hello! How can I help you today?" */ firstMessage?: string; /** @default false */ firstMessageInterruptionsEnabled?: boolean; /** * This is the mode for the first message. Default is 'assistant-speaks-first'. * * Use: * - 'assistant-speaks-first' to have the assistant speak first. * - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. * - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points). * * @default 'assistant-speaks-first' * @example "assistant-speaks-first" */ firstMessageMode?: | "assistant-speaks-first" | "assistant-speaks-first-with-model-generated-message" | "assistant-waits-for-user"; /** * These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool]. * By default, voicemail detection is disabled. */ voicemailDetection?: | "off" | GoogleVoicemailDetectionPlan | OpenAIVoicemailDetectionPlan | TwilioVoicemailDetectionPlan | VapiVoicemailDetectionPlan; /** * These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started,assistant.started. You can check the shape of the messages in ClientMessage schema. * @example ["conversation-update","function-call","hang","model-output","speech-update","status-update","transfer-update","transcript","tool-calls","user-interrupted","voice-input","workflow.node.started","assistant.started"] */ clientMessages?: | "conversation-update" | "function-call" | "function-call-result" | "hang" | "language-changed" | "metadata" | "model-output" | "speech-update" | "status-update" | "transcript" | "tool-calls" | "tool-calls-result" | "tool.completed" | "transfer-update" | "user-interrupted" | "voice-input" | "workflow.node.started" | "assistant.started"; /** * These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted,assistant.started. You can check the shape of the messages in ServerMessage schema. * @example ["conversation-update","end-of-call-report","function-call","hang","speech-update","status-update","tool-calls","transfer-destination-request","handoff-destination-request","user-interrupted","assistant.started"] */ serverMessages?: | "assistant.started" | "conversation-update" | "end-of-call-report" | "function-call" | "hang" | "language-changed" | "language-change-detected" | "model-output" | "phone-call-control" | "speech-update" | "status-update" | "transcript" | "transcript[transcriptType='final']" | "tool-calls" | "transfer-destination-request" | "handoff-destination-request" | "transfer-update" | "user-interrupted" | "voice-input" | "chat.created" | "chat.deleted" | "session.created" | "session.updated" | "session.deleted" | "call.deleted" | "call.delete.failed"; /** * This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended. * * @default 600 (10 minutes) * @min 10 * @max 43200 * @example 600 */ maxDurationSeconds?: number; /** * This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** * This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech. * * Default `false` while in beta. * * @default false * @example false */ modelOutputInMessagesEnabled?: boolean; /** These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used. */ transportConfigurations?: TransportConfigurationTwilio[]; /** * This is the plan for observability of assistant's calls. * * Currently, only Langfuse is supported. */ observabilityPlan?: LangfuseObservabilityPlan; /** These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. */ credentials?: ( | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO) )[]; /** This is a set of actions that will be performed on certain events. */ hooks?: ( | CallHookCallEnding | CallHookAssistantSpeechInterrupted | CallHookCustomerSpeechInterrupted | CallHookCustomerSpeechTimeout )[]; /** * This is the name of the assistant. * * This is required when you want to transfer between assistants in a call. * @maxLength 40 */ name?: string; /** * This is the message that the assistant will say if the call is forwarded to voicemail. * * If unspecified, it will hang up. * @maxLength 1000 */ voicemailMessage?: string; /** * This is the message that the assistant will say if it ends the call. * * If unspecified, it will hang up without saying anything. * @maxLength 1000 */ endCallMessage?: string; /** This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive. */ endCallPhrases?: string[]; compliancePlan?: CompliancePlan; /** This is for metadata you want to store on the assistant. */ metadata?: object; /** * This enables filtering of noise and background speech while the user is talking. * * Features: * - Smart denoising using Krisp * - Fourier denoising * * Smart denoising can be combined with or used independently of Fourier denoising. * * Order of precedence: * - Smart denoising * - Fourier denoising */ backgroundSpeechDenoisingPlan?: BackgroundSpeechDenoisingPlan; /** This is the plan for analysis of assistant's calls. Stored in `call.analysis`. */ analysisPlan?: AnalysisPlan; /** This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`. */ artifactPlan?: ArtifactPlan; /** * This is the plan for when the assistant should start talking. * * You should configure this if you're running into these issues: * - The assistant is too slow to start talking after the customer is done speaking. * - The assistant is too fast to start talking after the customer is done speaking. * - The assistant is so fast that it's actually interrupting the customer. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the plan for when assistant should stop talking on customer interruption. * * You should configure this if you're running into these issues: * - The assistant is too slow to recognize customer's interruption. * - The assistant is too fast to recognize customer's interruption. * - The assistant is getting interrupted by phrases that are just acknowledgments. * - The assistant is getting interrupted by background noises. * - The assistant is not properly stopping -- it starts talking right after getting interrupted. */ stopSpeakingPlan?: StopSpeakingPlan; /** * This is the plan for real-time monitoring of the assistant's calls. * * Usage: * - To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`. * - To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`. */ monitorPlan?: MonitorPlan; /** These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this. */ credentialIds?: string[]; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server.url * 2. phoneNumber.serverUrl * 3. org.serverUrl */ server?: Server; keypadInputPlan?: KeypadInputPlan; } export interface SquadMemberDTO { assistantDestinations?: ( | TransferDestinationAssistant | HandoffDestinationAssistant )[]; /** This is the assistant that will be used for the call. To use a transient assistant, use `assistant` instead. */ assistantId?: string | null; /** This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead. */ assistant?: CreateAssistantDTO; /** This can be used to override the assistant's settings and provide values for it's template variables. */ assistantOverrides?: AssistantOverrides; } export interface CreateSquadDTO { /** This is the name of the squad. */ name?: string; /** * This is the list of assistants that make up the squad. * * The call will start with the first assistant in the list. */ members: SquadMemberDTO[]; /** * This can be used to override all the assistants' settings and provide values for their template variables. * * Both `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override. */ membersOverrides?: AssistantOverrides; } export interface Squad { /** This is the name of the squad. */ name?: string; /** * This is the list of assistants that make up the squad. * * The call will start with the first assistant in the list. */ members: SquadMemberDTO[]; /** * This can be used to override all the assistants' settings and provide values for their template variables. * * Both `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override. */ membersOverrides?: AssistantOverrides; /** This is the unique identifier for the squad. */ id: string; /** This is the unique identifier for the org that this squad belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the squad was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the squad was last updated. * @format date-time */ updatedAt: string; } export interface UpdateSquadDTO { /** This is the name of the squad. */ name?: string; /** * This is the list of assistants that make up the squad. * * The call will start with the first assistant in the list. */ members: SquadMemberDTO[]; /** * This can be used to override all the assistants' settings and provide values for their template variables. * * Both `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override. */ membersOverrides?: AssistantOverrides; } export interface Workflow { nodes: (ConversationNode | ToolNode)[]; /** * This is the model for the workflow. * * This can be overridden at node level using `nodes[n].model`. */ model?: | WorkflowOpenAIModel | WorkflowAnthropicModel | WorkflowGoogleModel | WorkflowCustomModel; /** * This is the transcriber for the workflow. * * This can be overridden at node level using `nodes[n].transcriber`. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** * This is the voice for the workflow. * * This can be overridden at node level using `nodes[n].voice`. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the plan for observability of workflow's calls. * * Currently, only Langfuse is supported. */ observabilityPlan?: LangfuseObservabilityPlan; /** * This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** This is a set of actions that will be performed on certain events. */ hooks?: ( | CallHookCallEnding | CallHookAssistantSpeechInterrupted | CallHookCustomerSpeechInterrupted | CallHookCustomerSpeechTimeout | CallHookModelResponseTimeout )[]; /** These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. */ credentials?: ( | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO) )[]; /** This is the voicemail detection plan for the workflow. */ voicemailDetection?: | "off" | GoogleVoicemailDetectionPlan | OpenAIVoicemailDetectionPlan | TwilioVoicemailDetectionPlan | VapiVoicemailDetectionPlan; /** * This is the maximum duration of the call in seconds. * * After this duration, the call will automatically end. * * Default is 1800 (30 minutes), max is 43200 (12 hours), and min is 10 seconds. * @min 10 * @max 43200 * @example 600 */ maxDurationSeconds?: number; id: string; orgId: string; /** @format date-time */ createdAt: string; /** @format date-time */ updatedAt: string; /** @maxLength 80 */ name: string; edges: Edge[]; /** @maxLength 5000 */ globalPrompt?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. tool.server * 2. workflow.server / assistant.server * 3. phoneNumber.server * 4. org.server */ server?: Server; /** This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings. */ compliancePlan?: CompliancePlan; /** This is the plan for analysis of workflow's calls. Stored in `call.analysis`. */ analysisPlan?: AnalysisPlan; /** This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`. */ artifactPlan?: ArtifactPlan; /** * This is the plan for when the workflow nodes should start talking. * * You should configure this if you're running into these issues: * - The assistant is too slow to start talking after the customer is done speaking. * - The assistant is too fast to start talking after the customer is done speaking. * - The assistant is so fast that it's actually interrupting the customer. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the plan for when workflow nodes should stop talking on customer interruption. * * You should configure this if you're running into these issues: * - The assistant is too slow to recognize customer's interruption. * - The assistant is too fast to recognize customer's interruption. * - The assistant is getting interrupted by phrases that are just acknowledgments. * - The assistant is getting interrupted by background noises. * - The assistant is not properly stopping -- it starts talking right after getting interrupted. */ stopSpeakingPlan?: StopSpeakingPlan; /** * This is the plan for real-time monitoring of the workflow's calls. * * Usage: * - To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`. * - To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`. */ monitorPlan?: MonitorPlan; /** * This enables filtering of noise and background speech while the user is talking. * * Features: * - Smart denoising using Krisp * - Fourier denoising * * Both can be used together. Order of precedence: * - Smart denoising * - Fourier denoising */ backgroundSpeechDenoisingPlan?: BackgroundSpeechDenoisingPlan; /** These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this. */ credentialIds?: string[]; /** This is the plan for keypad input handling during workflow calls. */ keypadInputPlan?: KeypadInputPlan; /** * This is the message that the assistant will say if the call is forwarded to voicemail. * * If unspecified, it will hang up. * @maxLength 1000 */ voicemailMessage?: string; } export interface CreateWorkflowDTO { nodes: (ConversationNode | ToolNode)[]; /** * This is the model for the workflow. * * This can be overridden at node level using `nodes[n].model`. */ model?: | WorkflowOpenAIModel | WorkflowAnthropicModel | WorkflowGoogleModel | WorkflowCustomModel; /** * This is the transcriber for the workflow. * * This can be overridden at node level using `nodes[n].transcriber`. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** * This is the voice for the workflow. * * This can be overridden at node level using `nodes[n].voice`. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the plan for observability of workflow's calls. * * Currently, only Langfuse is supported. */ observabilityPlan?: LangfuseObservabilityPlan; /** * This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** This is a set of actions that will be performed on certain events. */ hooks?: ( | CallHookCallEnding | CallHookAssistantSpeechInterrupted | CallHookCustomerSpeechInterrupted | CallHookCustomerSpeechTimeout | CallHookModelResponseTimeout )[]; /** These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. */ credentials?: ( | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO) )[]; /** This is the voicemail detection plan for the workflow. */ voicemailDetection?: | "off" | GoogleVoicemailDetectionPlan | OpenAIVoicemailDetectionPlan | TwilioVoicemailDetectionPlan | VapiVoicemailDetectionPlan; /** * This is the maximum duration of the call in seconds. * * After this duration, the call will automatically end. * * Default is 1800 (30 minutes), max is 43200 (12 hours), and min is 10 seconds. * @min 10 * @max 43200 * @example 600 */ maxDurationSeconds?: number; /** @maxLength 80 */ name: string; edges: Edge[]; /** @maxLength 5000 */ globalPrompt?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. tool.server * 2. workflow.server / assistant.server * 3. phoneNumber.server * 4. org.server */ server?: Server; /** This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings. */ compliancePlan?: CompliancePlan; /** This is the plan for analysis of workflow's calls. Stored in `call.analysis`. */ analysisPlan?: AnalysisPlan; /** This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`. */ artifactPlan?: ArtifactPlan; /** * This is the plan for when the workflow nodes should start talking. * * You should configure this if you're running into these issues: * - The assistant is too slow to start talking after the customer is done speaking. * - The assistant is too fast to start talking after the customer is done speaking. * - The assistant is so fast that it's actually interrupting the customer. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the plan for when workflow nodes should stop talking on customer interruption. * * You should configure this if you're running into these issues: * - The assistant is too slow to recognize customer's interruption. * - The assistant is too fast to recognize customer's interruption. * - The assistant is getting interrupted by phrases that are just acknowledgments. * - The assistant is getting interrupted by background noises. * - The assistant is not properly stopping -- it starts talking right after getting interrupted. */ stopSpeakingPlan?: StopSpeakingPlan; /** * This is the plan for real-time monitoring of the workflow's calls. * * Usage: * - To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`. * - To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`. */ monitorPlan?: MonitorPlan; /** * This enables filtering of noise and background speech while the user is talking. * * Features: * - Smart denoising using Krisp * - Fourier denoising * * Both can be used together. Order of precedence: * - Smart denoising * - Fourier denoising */ backgroundSpeechDenoisingPlan?: BackgroundSpeechDenoisingPlan; /** These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this. */ credentialIds?: string[]; /** This is the plan for keypad input handling during workflow calls. */ keypadInputPlan?: KeypadInputPlan; /** * This is the message that the assistant will say if the call is forwarded to voicemail. * * If unspecified, it will hang up. * @maxLength 1000 */ voicemailMessage?: string; } export interface UpdateWorkflowDTO { nodes?: (ConversationNode | ToolNode)[]; /** * This is the model for the workflow. * * This can be overridden at node level using `nodes[n].model`. */ model?: | WorkflowOpenAIModel | WorkflowAnthropicModel | WorkflowGoogleModel | WorkflowCustomModel; /** * This is the transcriber for the workflow. * * This can be overridden at node level using `nodes[n].transcriber`. */ transcriber?: | AssemblyAITranscriber | AzureSpeechTranscriber | CustomTranscriber | DeepgramTranscriber | ElevenLabsTranscriber | GladiaTranscriber | GoogleTranscriber | SpeechmaticsTranscriber | TalkscriberTranscriber | OpenAITranscriber | CartesiaTranscriber; /** * This is the voice for the workflow. * * This can be overridden at node level using `nodes[n].voice`. */ voice?: | AzureVoice | CartesiaVoice | CustomVoice | DeepgramVoice | ElevenLabsVoice | HumeVoice | LMNTVoice | NeuphonicVoice | OpenAIVoice | PlayHTVoice | RimeAIVoice | SmallestAIVoice | TavusVoice | VapiVoice | SesameVoice | InworldVoice | MinimaxVoice; /** * This is the plan for observability of workflow's calls. * * Currently, only Langfuse is supported. */ observabilityPlan?: LangfuseObservabilityPlan; /** * This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. * You can also provide a custom sound by providing a URL to an audio file. */ backgroundSound?: "off" | "office" | string; /** This is a set of actions that will be performed on certain events. */ hooks?: ( | CallHookCallEnding | CallHookAssistantSpeechInterrupted | CallHookCustomerSpeechInterrupted | CallHookCustomerSpeechTimeout | CallHookModelResponseTimeout )[]; /** These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. */ credentials?: ( | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO) )[]; /** This is the voicemail detection plan for the workflow. */ voicemailDetection?: | "off" | GoogleVoicemailDetectionPlan | OpenAIVoicemailDetectionPlan | TwilioVoicemailDetectionPlan | VapiVoicemailDetectionPlan; /** * This is the maximum duration of the call in seconds. * * After this duration, the call will automatically end. * * Default is 1800 (30 minutes), max is 43200 (12 hours), and min is 10 seconds. * @min 10 * @max 43200 * @example 600 */ maxDurationSeconds?: number; /** @maxLength 80 */ name?: string; edges?: Edge[]; /** @maxLength 5000 */ globalPrompt?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. tool.server * 2. workflow.server / assistant.server * 3. phoneNumber.server * 4. org.server */ server?: Server; /** This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings. */ compliancePlan?: CompliancePlan; /** This is the plan for analysis of workflow's calls. Stored in `call.analysis`. */ analysisPlan?: AnalysisPlan; /** This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`. */ artifactPlan?: ArtifactPlan; /** * This is the plan for when the workflow nodes should start talking. * * You should configure this if you're running into these issues: * - The assistant is too slow to start talking after the customer is done speaking. * - The assistant is too fast to start talking after the customer is done speaking. * - The assistant is so fast that it's actually interrupting the customer. */ startSpeakingPlan?: StartSpeakingPlan; /** * This is the plan for when workflow nodes should stop talking on customer interruption. * * You should configure this if you're running into these issues: * - The assistant is too slow to recognize customer's interruption. * - The assistant is too fast to recognize customer's interruption. * - The assistant is getting interrupted by phrases that are just acknowledgments. * - The assistant is getting interrupted by background noises. * - The assistant is not properly stopping -- it starts talking right after getting interrupted. */ stopSpeakingPlan?: StopSpeakingPlan; /** * This is the plan for real-time monitoring of the workflow's calls. * * Usage: * - To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`. * - To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`. */ monitorPlan?: MonitorPlan; /** * This enables filtering of noise and background speech while the user is talking. * * Features: * - Smart denoising using Krisp * - Fourier denoising * * Both can be used together. Order of precedence: * - Smart denoising * - Fourier denoising */ backgroundSpeechDenoisingPlan?: BackgroundSpeechDenoisingPlan; /** These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this. */ credentialIds?: string[]; /** This is the plan for keypad input handling during workflow calls. */ keypadInputPlan?: KeypadInputPlan; /** * This is the message that the assistant will say if the call is forwarded to voicemail. * * If unspecified, it will hang up. * @maxLength 1000 */ voicemailMessage?: string; } export interface SubscriptionLimits { /** * True if this call was blocked by the Call Concurrency limit * @default false */ concurrencyBlocked?: boolean; /** Account Call Concurrency limit */ concurrencyLimit?: number; /** Incremental number of concurrent calls that will be allowed, including this call */ remainingConcurrentCalls?: number; } export interface AnalysisCostBreakdown { /** This is the cost to summarize the call. */ summary?: number; /** This is the number of prompt tokens used to summarize the call. */ summaryPromptTokens?: number; /** This is the number of completion tokens used to summarize the call. */ summaryCompletionTokens?: number; /** This is the cost to extract structured data from the call. */ structuredData?: number; /** This is the number of prompt tokens used to extract structured data from the call. */ structuredDataPromptTokens?: number; /** This is the number of completion tokens used to extract structured data from the call. */ structuredDataCompletionTokens?: number; /** This is the cost to evaluate if the call was successful. */ successEvaluation?: number; /** This is the number of prompt tokens used to evaluate if the call was successful. */ successEvaluationPromptTokens?: number; /** This is the number of completion tokens used to evaluate if the call was successful. */ successEvaluationCompletionTokens?: number; /** This is the cost to evaluate structuredOutputs from the call. */ structuredOutput?: number; /** This is the number of prompt tokens used to evaluate structuredOutputs from the call. */ structuredOutputPromptTokens?: number; /** This is the number of completion tokens used to evaluate structuredOutputs from the call. */ structuredOutputCompletionTokens?: number; } export interface CostBreakdown { /** This is the cost of the transport provider, like Twilio or Vonage. */ transport?: number; /** This is the cost of the speech-to-text service. */ stt?: number; /** This is the cost of the language model. */ llm?: number; /** This is the cost of the text-to-speech service. */ tts?: number; /** This is the cost of Vapi. */ vapi?: number; /** This is the cost of chat interactions. */ chat?: number; /** This is the total cost of the call. */ total?: number; /** This is the LLM prompt tokens used for the call. */ llmPromptTokens?: number; /** This is the LLM completion tokens used for the call. */ llmCompletionTokens?: number; /** This is the TTS characters used for the call. */ ttsCharacters?: number; /** This is the cost of the analysis. */ analysisCostBreakdown?: AnalysisCostBreakdown; } export interface Analysis { /** This is the summary of the call. Customize by setting `assistant.analysisPlan.summaryPrompt`. */ summary?: string; /** This is the structured data extracted from the call. Customize by setting `assistant.analysisPlan.structuredDataPrompt` and/or `assistant.analysisPlan.structuredDataSchema`. */ structuredData?: object; /** This is the structured data catalog of the call. Customize by setting `assistant.analysisPlan.structuredDataMultiPlan`. */ structuredDataMulti?: object[]; /** This is the evaluation of the call. Customize by setting `assistant.analysisPlan.successEvaluationPrompt` and/or `assistant.analysisPlan.successEvaluationRubric`. */ successEvaluation?: string; } export interface Monitor { /** This is the URL where the assistant's calls can be listened to in real-time. To enable, set `assistant.monitorPlan.listenEnabled` to `true`. */ listenUrl?: string; /** This is the URL where the assistant's calls can be controlled in real-time. To enable, set `assistant.monitorPlan.controlEnabled` to `true`. */ controlUrl?: string; } export interface Mono { /** This is the combined recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`. */ combinedUrl?: string; /** This is the mono recording url for the assistant. To enable, set `assistant.artifactPlan.recordingEnabled`. */ assistantUrl?: string; /** This is the mono recording url for the customer. To enable, set `assistant.artifactPlan.recordingEnabled`. */ customerUrl?: string; } export interface Recording { /** This is the stereo recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`. */ stereoUrl?: string; /** This is the video recording url for the call. To enable, set `assistant.artifactPlan.videoRecordingEnabled`. */ videoUrl?: string; /** This is video recording start delay in ms. To enable, set `assistant.artifactPlan.videoRecordingEnabled`. This can be used to align the playback of the recording with artifact.messages timestamps. */ videoRecordingStartDelaySeconds?: number; /** This is the mono recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`. */ mono?: Mono; } export interface NodeArtifact { /** These are the messages that were spoken during the node. */ messages?: ( | UserMessage | SystemMessage | BotMessage | ToolCallMessage | ToolCallResultMessage )[]; /** This is the node name. */ nodeName?: string; /** These are the variable values that were extracted from the node. */ variableValues?: object; } export interface TurnLatency { /** This is the model latency for the first token. */ modelLatency?: number; /** This is the voice latency from the model output. */ voiceLatency?: number; /** This is the transcriber latency from the user speech. */ transcriberLatency?: number; /** This is the endpointing latency. */ endpointingLatency?: number; /** This is the latency for the whole turn. */ turnLatency?: number; } export interface PerformanceMetrics { /** These are the individual latencies for each turn. */ turnLatencies?: TurnLatency[]; /** This is the average latency for the model to output the first token. */ modelLatencyAverage?: number; /** This is the average latency for the text to speech. */ voiceLatencyAverage?: number; /** This is the average latency for the transcriber. */ transcriberLatencyAverage?: number; /** This is the average latency for the endpointing. */ endpointingLatencyAverage?: number; /** This is the average latency for complete turns. */ turnLatencyAverage?: number; /** This is the average latency for packets received from the transport provider in milliseconds. */ fromTransportLatencyAverage?: number; /** This is the average latency for packets sent to the transport provider in milliseconds. */ toTransportLatencyAverage?: number; /** This is the number of times the user was interrupted by the assistant during the call. */ numUserInterrupted?: number; /** This is the number of times the assistant was interrupted by the user during the call. */ numAssistantInterrupted?: number; } export interface Artifact { /** These are the messages that were spoken during the call. */ messages?: ( | UserMessage | SystemMessage | BotMessage | ToolCallMessage | ToolCallResultMessage )[]; /** These are the messages that were spoken during the call, formatted for OpenAI. */ messagesOpenAIFormatted?: OpenAIMessage[]; /** * This is the recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`. * @deprecated */ recordingUrl?: string; /** * This is the stereo recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`. * @deprecated */ stereoRecordingUrl?: string; /** * This is video recording url for the call. To enable, set `assistant.artifactPlan.videoRecordingEnabled`. * @deprecated */ videoRecordingUrl?: string; /** * This is video recording start delay in ms. To enable, set `assistant.artifactPlan.videoRecordingEnabled`. This can be used to align the playback of the recording with artifact.messages timestamps. * @deprecated */ videoRecordingStartDelaySeconds?: number; /** This is the recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`. */ recording?: Recording; /** This is the transcript of the call. This is derived from `artifact.messages` but provided for convenience. */ transcript?: string; /** This is the packet capture url for the call. This is only available for `phone` type calls where phone number's provider is `vapi` or `byo-phone-number`. */ pcapUrl?: string; /** This is the url for the call logs. This includes all logging output during the call for debugging purposes. */ logUrl?: string; /** This is the history of workflow nodes that were executed during the call. */ nodes?: NodeArtifact[]; /** These are the variable values at the end of the workflow execution. */ variableValues?: object; /** This is the performance metrics for the call. It contains the turn latency, broken down by component. */ performanceMetrics?: PerformanceMetrics; /** * These are the structured outputs that will be extracted from the call. * To enable, set `assistant.artifactPlan.structuredOutputIds` with the IDs of the structured outputs you want to extract. */ structuredOutputs?: object; /** * These are the scorecards that have been evaluated based on the structured outputs extracted during the call. * To enable, set `assistant.artifactPlan.scorecardIds` or `assistant.artifactPlan.scorecards` with the IDs or objects of the scorecards you want to evaluate. */ scorecards?: object; /** These are the transfer records from warm transfers, including destinations, transcripts, and status. */ transfers?: string[]; /** * This is when the structured outputs were last updated * @format date-time */ structuredOutputsLastUpdatedAt?: string; } export interface RecordingConsent { /** This is the type of recording consent. */ type: object; /** * This is the date and time the recording consent was granted. * If not specified, it means the recording consent was not granted. * @format date-time */ grantedAt?: string; } export interface Compliance { /** This is the recording consent of the call. Configure in `assistant.compliancePlan.recordingConsentPlan`. */ recordingConsent?: RecordingConsent; } export interface WorkflowOverrides { /** * These are values that will be used to replace the template variables in the workflow messages and other text-based fields. * This uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html * * So for example, `{{ name }}` will be replaced with the value of `name` in `variableValues`. * `{{"now" | date: "%b %d, %Y, %I:%M %p", "America/New_York"}}` will be replaced with the current date and time in New York. * Some VAPI reserved defaults: * - *customer* - the customer object */ variableValues?: object; } export interface TransferPhoneNumberHookAction { /** This is the type of action - must be "transfer" */ type: "transfer"; /** This is the destination details for the transfer - can be a phone number or SIP URI */ destination?: TransferDestinationNumber | TransferDestinationSip; } export interface SayPhoneNumberHookAction { /** This is the type of action - must be "say" */ type: "say"; /** * This is the message to say * @maxLength 4000 */ exact: string; } export interface PhoneNumberHookCallRinging { /** * This is the event to trigger the hook on * @maxLength 1000 */ on: "call.ringing"; /** Only the first action will be executed. Additional actions will be ignored. */ do: (TransferPhoneNumberHookAction | SayPhoneNumberHookAction)[]; } export interface PhoneNumberCallEndingHookFilter { /** * This is the type of filter - currently only "oneOf" is supported * @maxLength 1000 */ type: "oneOf"; /** * This is the key to filter on - only "call.endedReason" is allowed for phone number call ending hooks * @maxLength 1000 */ key: "call.endedReason"; /** This is the array of assistant-request related ended reasons to match against */ oneOf: | "assistant-request-failed" | "assistant-request-returned-error" | "assistant-request-returned-unspeakable-error" | "assistant-request-returned-invalid-assistant" | "assistant-request-returned-no-assistant" | "assistant-request-returned-forwarding-phone-number"; } export interface PhoneNumberHookCallEnding { /** * This is the event to trigger the hook on * @maxLength 1000 */ on: "call.ending"; /** Optional filters to decide when to trigger - restricted to assistant-request related ended reasons */ filters?: PhoneNumberCallEndingHookFilter[]; /** This is the action to perform when the hook triggers */ do?: TransferPhoneNumberHookAction | SayPhoneNumberHookAction; } export interface ImportTwilioPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** * Controls whether Vapi sets the messaging webhook URL on the Twilio number during import. * * If set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is. * If `true` or omitted (default), Vapi will configure both the voice and messaging URLs. * * @default true * @default true */ smsEnabled?: boolean; /** * These are the digits of the phone number you own on your Twilio. * @deprecated */ twilioPhoneNumber: string; /** This is your Twilio Account SID that will be used to handle this phone number. */ twilioAccountSid: string; /** This is the Twilio Auth Token that will be used to handle this phone number. */ twilioAuthToken?: string; /** This is the Twilio API Key that will be used to handle this phone number. If AuthToken is provided, this will be ignored. */ twilioApiKey?: string; /** This is the Twilio API Secret that will be used to handle this phone number. If AuthToken is provided, this will be ignored. */ twilioApiSecret?: string; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; } export interface CreateCustomerDTO { /** * This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it. * * Use cases: * - `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks. * - `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls. * * If `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\+?[a-zA-Z0-9]+$/`). * * @default true (E164 check is enabled) * @default true */ numberE164CheckEnabled?: boolean; /** * This is the extension that will be dialed after the call is answered. * @maxLength 10 * @example null */ extension?: string; /** * These are the overrides for the assistant's settings and template variables specific to this customer. * This allows customization of the assistant's behavior for individual customers in batch calls. */ assistantOverrides?: AssistantOverrides; /** * This is the number of the customer. * @minLength 3 * @maxLength 40 */ number?: string; /** This is the SIP URI of the customer. */ sipUri?: string; /** * This is the name of the customer. This is just for your own reference. * * For SIP inbound calls, this is extracted from the `From` SIP header with format `"Display Name" `. * @maxLength 40 */ name?: string; /** * This is the email of the customer. * @maxLength 40 */ email?: string; /** * This is the external ID of the customer. * @maxLength 40 */ externalId?: string; } export interface SchedulePlan { /** * This is the ISO 8601 date-time string of the earliest time the call can be scheduled. * @format date-time */ earliestAt: string; /** * This is the ISO 8601 date-time string of the latest time the call can be scheduled. * @format date-time */ latestAt?: string; } export interface Call { /** This is the type of call. */ type?: | "inboundPhoneCall" | "outboundPhoneCall" | "webCall" | "vapi.websocketCall"; /** These are the costs of individual components of the call in USD. */ costs?: ( | TransportCost | TranscriberCost | ModelCost | VoiceCost | VapiCost | VoicemailDetectionCost | AnalysisCost | KnowledgeBaseCost )[]; messages?: ( | UserMessage | SystemMessage | BotMessage | ToolCallMessage | ToolCallResultMessage )[]; /** * This is the provider of the call. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. * @deprecated */ phoneCallProvider?: "twilio" | "vonage" | "vapi" | "telnyx"; /** * This is the transport of the phone call. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ phoneCallTransport?: "sip" | "pstn"; /** This is the status of the call. */ status?: | "scheduled" | "queued" | "ringing" | "in-progress" | "forwarding" | "ended" | "not-found" | "deletion-failed"; /** This is the explanation for how the call ended. */ endedReason?: | "call-start-error-neither-assistant-nor-server-set" | "assistant-request-failed" | "assistant-request-returned-error" | "assistant-request-returned-unspeakable-error" | "assistant-request-returned-invalid-assistant" | "assistant-request-returned-no-assistant" | "assistant-request-returned-forwarding-phone-number" | "scheduled-call-deleted" | "call.start.error-vapifault-get-org" | "call.start.error-vapifault-get-subscription" | "call.start.error-get-assistant" | "call.start.error-get-phone-number" | "call.start.error-get-customer" | "call.start.error-get-resources-validation" | "call.start.error-vapi-number-international" | "call.start.error-vapi-number-outbound-daily-limit" | "call.start.error-get-transport" | "call.start.error-subscription-wallet-does-not-exist" | "call.start.error-fraud-check-failed" | "call.start.error-subscription-frozen" | "call.start.error-subscription-insufficient-credits" | "call.start.error-subscription-upgrade-failed" | "call.start.error-subscription-concurrency-limit-reached" | "call.start.error-enterprise-feature-not-available-recording-consent" | "assistant-not-valid" | "call.start.error-vapifault-database-error" | "assistant-not-found" | "pipeline-error-openai-voice-failed" | "pipeline-error-cartesia-voice-failed" | "pipeline-error-deepgram-voice-failed" | "pipeline-error-eleven-labs-voice-failed" | "pipeline-error-playht-voice-failed" | "pipeline-error-lmnt-voice-failed" | "pipeline-error-azure-voice-failed" | "pipeline-error-rime-ai-voice-failed" | "pipeline-error-smallest-ai-voice-failed" | "pipeline-error-vapi-voice-failed" | "pipeline-error-neuphonic-voice-failed" | "pipeline-error-hume-voice-failed" | "pipeline-error-sesame-voice-failed" | "pipeline-error-inworld-voice-failed" | "pipeline-error-minimax-voice-failed" | "pipeline-error-tavus-video-failed" | "call.in-progress.error-vapifault-openai-voice-failed" | "call.in-progress.error-vapifault-cartesia-voice-failed" | "call.in-progress.error-vapifault-deepgram-voice-failed" | "call.in-progress.error-vapifault-eleven-labs-voice-failed" | "call.in-progress.error-vapifault-playht-voice-failed" | "call.in-progress.error-vapifault-lmnt-voice-failed" | "call.in-progress.error-vapifault-azure-voice-failed" | "call.in-progress.error-vapifault-rime-ai-voice-failed" | "call.in-progress.error-vapifault-smallest-ai-voice-failed" | "call.in-progress.error-vapifault-vapi-voice-failed" | "call.in-progress.error-vapifault-neuphonic-voice-failed" | "call.in-progress.error-vapifault-hume-voice-failed" | "call.in-progress.error-vapifault-sesame-voice-failed" | "call.in-progress.error-vapifault-inworld-voice-failed" | "call.in-progress.error-vapifault-minimax-voice-failed" | "call.in-progress.error-vapifault-tavus-video-failed" | "pipeline-error-vapi-llm-failed" | "pipeline-error-vapi-400-bad-request-validation-failed" | "pipeline-error-vapi-401-unauthorized" | "pipeline-error-vapi-403-model-access-denied" | "pipeline-error-vapi-429-exceeded-quota" | "pipeline-error-vapi-500-server-error" | "pipeline-error-vapi-503-server-overloaded-error" | "call.in-progress.error-providerfault-vapi-llm-failed" | "call.in-progress.error-vapifault-vapi-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-vapi-401-unauthorized" | "call.in-progress.error-vapifault-vapi-403-model-access-denied" | "call.in-progress.error-vapifault-vapi-429-exceeded-quota" | "call.in-progress.error-providerfault-vapi-500-server-error" | "call.in-progress.error-providerfault-vapi-503-server-overloaded-error" | "pipeline-error-deepgram-transcriber-failed" | "pipeline-error-deepgram-transcriber-api-key-missing" | "call.in-progress.error-vapifault-deepgram-transcriber-failed" | "pipeline-error-gladia-transcriber-failed" | "call.in-progress.error-vapifault-gladia-transcriber-failed" | "pipeline-error-speechmatics-transcriber-failed" | "call.in-progress.error-vapifault-speechmatics-transcriber-failed" | "pipeline-error-assembly-ai-transcriber-failed" | "pipeline-error-assembly-ai-returning-400-insufficent-funds" | "pipeline-error-assembly-ai-returning-400-paid-only-feature" | "pipeline-error-assembly-ai-returning-401-invalid-credentials" | "pipeline-error-assembly-ai-returning-500-invalid-schema" | "pipeline-error-assembly-ai-returning-500-word-boost-parsing-failed" | "call.in-progress.error-vapifault-assembly-ai-transcriber-failed" | "call.in-progress.error-vapifault-assembly-ai-returning-400-insufficent-funds" | "call.in-progress.error-vapifault-assembly-ai-returning-400-paid-only-feature" | "call.in-progress.error-vapifault-assembly-ai-returning-401-invalid-credentials" | "call.in-progress.error-vapifault-assembly-ai-returning-500-invalid-schema" | "call.in-progress.error-vapifault-assembly-ai-returning-500-word-boost-parsing-failed" | "pipeline-error-talkscriber-transcriber-failed" | "call.in-progress.error-vapifault-talkscriber-transcriber-failed" | "pipeline-error-azure-speech-transcriber-failed" | "call.in-progress.error-vapifault-azure-speech-transcriber-failed" | "call.in-progress.error-pipeline-no-available-llm-model" | "worker-shutdown" | "vonage-disconnected" | "vonage-failed-to-connect-call" | "vonage-completed" | "phone-call-provider-bypass-enabled-but-no-call-received" | "call.in-progress.error-providerfault-transport-never-connected" | "call.in-progress.error-vapifault-worker-not-available" | "call.in-progress.error-vapifault-transport-never-connected" | "call.in-progress.error-vapifault-transport-connected-but-call-not-active" | "call.in-progress.error-vapifault-call-started-but-connection-to-transport-missing" | "call.in-progress.error-vapifault-worker-died" | "call.in-progress.twilio-completed-call" | "call.in-progress.sip-completed-call" | "call.in-progress.error-providerfault-openai-llm-failed" | "call.in-progress.error-providerfault-azure-openai-llm-failed" | "call.in-progress.error-providerfault-groq-llm-failed" | "call.in-progress.error-providerfault-google-llm-failed" | "call.in-progress.error-providerfault-xai-llm-failed" | "call.in-progress.error-providerfault-mistral-llm-failed" | "call.in-progress.error-providerfault-inflection-ai-llm-failed" | "call.in-progress.error-providerfault-cerebras-llm-failed" | "call.in-progress.error-providerfault-deep-seek-llm-failed" | "call.in-progress.error-vapifault-chat-pipeline-failed-to-start" | "pipeline-error-openai-400-bad-request-validation-failed" | "pipeline-error-openai-401-unauthorized" | "pipeline-error-openai-401-incorrect-api-key" | "pipeline-error-openai-401-account-not-in-organization" | "pipeline-error-openai-403-model-access-denied" | "pipeline-error-openai-429-exceeded-quota" | "pipeline-error-openai-429-rate-limit-reached" | "pipeline-error-openai-500-server-error" | "pipeline-error-openai-503-server-overloaded-error" | "pipeline-error-openai-llm-failed" | "call.in-progress.error-vapifault-openai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-openai-401-unauthorized" | "call.in-progress.error-vapifault-openai-401-incorrect-api-key" | "call.in-progress.error-vapifault-openai-401-account-not-in-organization" | "call.in-progress.error-vapifault-openai-403-model-access-denied" | "call.in-progress.error-vapifault-openai-429-exceeded-quota" | "call.in-progress.error-vapifault-openai-429-rate-limit-reached" | "call.in-progress.error-providerfault-openai-500-server-error" | "call.in-progress.error-providerfault-openai-503-server-overloaded-error" | "pipeline-error-azure-openai-400-bad-request-validation-failed" | "pipeline-error-azure-openai-401-unauthorized" | "pipeline-error-azure-openai-403-model-access-denied" | "pipeline-error-azure-openai-429-exceeded-quota" | "pipeline-error-azure-openai-500-server-error" | "pipeline-error-azure-openai-503-server-overloaded-error" | "pipeline-error-azure-openai-llm-failed" | "call.in-progress.error-vapifault-azure-openai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-azure-openai-401-unauthorized" | "call.in-progress.error-vapifault-azure-openai-403-model-access-denied" | "call.in-progress.error-vapifault-azure-openai-429-exceeded-quota" | "call.in-progress.error-providerfault-azure-openai-500-server-error" | "call.in-progress.error-providerfault-azure-openai-503-server-overloaded-error" | "pipeline-error-google-400-bad-request-validation-failed" | "pipeline-error-google-401-unauthorized" | "pipeline-error-google-403-model-access-denied" | "pipeline-error-google-429-exceeded-quota" | "pipeline-error-google-500-server-error" | "pipeline-error-google-503-server-overloaded-error" | "pipeline-error-google-llm-failed" | "call.in-progress.error-vapifault-google-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-google-401-unauthorized" | "call.in-progress.error-vapifault-google-403-model-access-denied" | "call.in-progress.error-vapifault-google-429-exceeded-quota" | "call.in-progress.error-providerfault-google-500-server-error" | "call.in-progress.error-providerfault-google-503-server-overloaded-error" | "pipeline-error-xai-400-bad-request-validation-failed" | "pipeline-error-xai-401-unauthorized" | "pipeline-error-xai-403-model-access-denied" | "pipeline-error-xai-429-exceeded-quota" | "pipeline-error-xai-500-server-error" | "pipeline-error-xai-503-server-overloaded-error" | "pipeline-error-xai-llm-failed" | "call.in-progress.error-vapifault-xai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-xai-401-unauthorized" | "call.in-progress.error-vapifault-xai-403-model-access-denied" | "call.in-progress.error-vapifault-xai-429-exceeded-quota" | "call.in-progress.error-providerfault-xai-500-server-error" | "call.in-progress.error-providerfault-xai-503-server-overloaded-error" | "pipeline-error-mistral-400-bad-request-validation-failed" | "pipeline-error-mistral-401-unauthorized" | "pipeline-error-mistral-403-model-access-denied" | "pipeline-error-mistral-429-exceeded-quota" | "pipeline-error-mistral-500-server-error" | "pipeline-error-mistral-503-server-overloaded-error" | "pipeline-error-mistral-llm-failed" | "call.in-progress.error-vapifault-mistral-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-mistral-401-unauthorized" | "call.in-progress.error-vapifault-mistral-403-model-access-denied" | "call.in-progress.error-vapifault-mistral-429-exceeded-quota" | "call.in-progress.error-providerfault-mistral-500-server-error" | "call.in-progress.error-providerfault-mistral-503-server-overloaded-error" | "pipeline-error-inflection-ai-400-bad-request-validation-failed" | "pipeline-error-inflection-ai-401-unauthorized" | "pipeline-error-inflection-ai-403-model-access-denied" | "pipeline-error-inflection-ai-429-exceeded-quota" | "pipeline-error-inflection-ai-500-server-error" | "pipeline-error-inflection-ai-503-server-overloaded-error" | "pipeline-error-inflection-ai-llm-failed" | "call.in-progress.error-vapifault-inflection-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-inflection-ai-401-unauthorized" | "call.in-progress.error-vapifault-inflection-ai-403-model-access-denied" | "call.in-progress.error-vapifault-inflection-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-inflection-ai-500-server-error" | "call.in-progress.error-providerfault-inflection-ai-503-server-overloaded-error" | "pipeline-error-deep-seek-400-bad-request-validation-failed" | "pipeline-error-deep-seek-401-unauthorized" | "pipeline-error-deep-seek-403-model-access-denied" | "pipeline-error-deep-seek-429-exceeded-quota" | "pipeline-error-deep-seek-500-server-error" | "pipeline-error-deep-seek-503-server-overloaded-error" | "pipeline-error-deep-seek-llm-failed" | "call.in-progress.error-vapifault-deep-seek-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-deep-seek-401-unauthorized" | "call.in-progress.error-vapifault-deep-seek-403-model-access-denied" | "call.in-progress.error-vapifault-deep-seek-429-exceeded-quota" | "call.in-progress.error-providerfault-deep-seek-500-server-error" | "call.in-progress.error-providerfault-deep-seek-503-server-overloaded-error" | "pipeline-error-groq-400-bad-request-validation-failed" | "pipeline-error-groq-401-unauthorized" | "pipeline-error-groq-403-model-access-denied" | "pipeline-error-groq-429-exceeded-quota" | "pipeline-error-groq-500-server-error" | "pipeline-error-groq-503-server-overloaded-error" | "pipeline-error-groq-llm-failed" | "call.in-progress.error-vapifault-groq-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-groq-401-unauthorized" | "call.in-progress.error-vapifault-groq-403-model-access-denied" | "call.in-progress.error-vapifault-groq-429-exceeded-quota" | "call.in-progress.error-providerfault-groq-500-server-error" | "call.in-progress.error-providerfault-groq-503-server-overloaded-error" | "pipeline-error-cerebras-400-bad-request-validation-failed" | "pipeline-error-cerebras-401-unauthorized" | "pipeline-error-cerebras-403-model-access-denied" | "pipeline-error-cerebras-429-exceeded-quota" | "pipeline-error-cerebras-500-server-error" | "pipeline-error-cerebras-503-server-overloaded-error" | "pipeline-error-cerebras-llm-failed" | "call.in-progress.error-vapifault-cerebras-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-cerebras-401-unauthorized" | "call.in-progress.error-vapifault-cerebras-403-model-access-denied" | "call.in-progress.error-vapifault-cerebras-429-exceeded-quota" | "call.in-progress.error-providerfault-cerebras-500-server-error" | "call.in-progress.error-providerfault-cerebras-503-server-overloaded-error" | "pipeline-error-anthropic-400-bad-request-validation-failed" | "pipeline-error-anthropic-401-unauthorized" | "pipeline-error-anthropic-403-model-access-denied" | "pipeline-error-anthropic-429-exceeded-quota" | "pipeline-error-anthropic-500-server-error" | "pipeline-error-anthropic-503-server-overloaded-error" | "pipeline-error-anthropic-llm-failed" | "call.in-progress.error-providerfault-anthropic-llm-failed" | "call.in-progress.error-vapifault-anthropic-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-500-server-error" | "call.in-progress.error-providerfault-anthropic-503-server-overloaded-error" | "pipeline-error-anthropic-bedrock-400-bad-request-validation-failed" | "pipeline-error-anthropic-bedrock-401-unauthorized" | "pipeline-error-anthropic-bedrock-403-model-access-denied" | "pipeline-error-anthropic-bedrock-429-exceeded-quota" | "pipeline-error-anthropic-bedrock-500-server-error" | "pipeline-error-anthropic-bedrock-503-server-overloaded-error" | "pipeline-error-anthropic-bedrock-llm-failed" | "call.in-progress.error-providerfault-anthropic-bedrock-llm-failed" | "call.in-progress.error-vapifault-anthropic-bedrock-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-bedrock-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-bedrock-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-bedrock-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-bedrock-500-server-error" | "call.in-progress.error-providerfault-anthropic-bedrock-503-server-overloaded-error" | "pipeline-error-anthropic-vertex-400-bad-request-validation-failed" | "pipeline-error-anthropic-vertex-401-unauthorized" | "pipeline-error-anthropic-vertex-403-model-access-denied" | "pipeline-error-anthropic-vertex-429-exceeded-quota" | "pipeline-error-anthropic-vertex-500-server-error" | "pipeline-error-anthropic-vertex-503-server-overloaded-error" | "pipeline-error-anthropic-vertex-llm-failed" | "call.in-progress.error-providerfault-anthropic-vertex-llm-failed" | "call.in-progress.error-vapifault-anthropic-vertex-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-vertex-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-vertex-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-vertex-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-vertex-500-server-error" | "call.in-progress.error-providerfault-anthropic-vertex-503-server-overloaded-error" | "pipeline-error-together-ai-400-bad-request-validation-failed" | "pipeline-error-together-ai-401-unauthorized" | "pipeline-error-together-ai-403-model-access-denied" | "pipeline-error-together-ai-429-exceeded-quota" | "pipeline-error-together-ai-500-server-error" | "pipeline-error-together-ai-503-server-overloaded-error" | "pipeline-error-together-ai-llm-failed" | "call.in-progress.error-providerfault-together-ai-llm-failed" | "call.in-progress.error-vapifault-together-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-together-ai-401-unauthorized" | "call.in-progress.error-vapifault-together-ai-403-model-access-denied" | "call.in-progress.error-vapifault-together-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-together-ai-500-server-error" | "call.in-progress.error-providerfault-together-ai-503-server-overloaded-error" | "pipeline-error-anyscale-400-bad-request-validation-failed" | "pipeline-error-anyscale-401-unauthorized" | "pipeline-error-anyscale-403-model-access-denied" | "pipeline-error-anyscale-429-exceeded-quota" | "pipeline-error-anyscale-500-server-error" | "pipeline-error-anyscale-503-server-overloaded-error" | "pipeline-error-anyscale-llm-failed" | "call.in-progress.error-providerfault-anyscale-llm-failed" | "call.in-progress.error-vapifault-anyscale-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anyscale-401-unauthorized" | "call.in-progress.error-vapifault-anyscale-403-model-access-denied" | "call.in-progress.error-vapifault-anyscale-429-exceeded-quota" | "call.in-progress.error-providerfault-anyscale-500-server-error" | "call.in-progress.error-providerfault-anyscale-503-server-overloaded-error" | "pipeline-error-openrouter-400-bad-request-validation-failed" | "pipeline-error-openrouter-401-unauthorized" | "pipeline-error-openrouter-403-model-access-denied" | "pipeline-error-openrouter-429-exceeded-quota" | "pipeline-error-openrouter-500-server-error" | "pipeline-error-openrouter-503-server-overloaded-error" | "pipeline-error-openrouter-llm-failed" | "call.in-progress.error-providerfault-openrouter-llm-failed" | "call.in-progress.error-vapifault-openrouter-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-openrouter-401-unauthorized" | "call.in-progress.error-vapifault-openrouter-403-model-access-denied" | "call.in-progress.error-vapifault-openrouter-429-exceeded-quota" | "call.in-progress.error-providerfault-openrouter-500-server-error" | "call.in-progress.error-providerfault-openrouter-503-server-overloaded-error" | "pipeline-error-perplexity-ai-400-bad-request-validation-failed" | "pipeline-error-perplexity-ai-401-unauthorized" | "pipeline-error-perplexity-ai-403-model-access-denied" | "pipeline-error-perplexity-ai-429-exceeded-quota" | "pipeline-error-perplexity-ai-500-server-error" | "pipeline-error-perplexity-ai-503-server-overloaded-error" | "pipeline-error-perplexity-ai-llm-failed" | "call.in-progress.error-providerfault-perplexity-ai-llm-failed" | "call.in-progress.error-vapifault-perplexity-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-perplexity-ai-401-unauthorized" | "call.in-progress.error-vapifault-perplexity-ai-403-model-access-denied" | "call.in-progress.error-vapifault-perplexity-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-perplexity-ai-500-server-error" | "call.in-progress.error-providerfault-perplexity-ai-503-server-overloaded-error" | "pipeline-error-deepinfra-400-bad-request-validation-failed" | "pipeline-error-deepinfra-401-unauthorized" | "pipeline-error-deepinfra-403-model-access-denied" | "pipeline-error-deepinfra-429-exceeded-quota" | "pipeline-error-deepinfra-500-server-error" | "pipeline-error-deepinfra-503-server-overloaded-error" | "pipeline-error-deepinfra-llm-failed" | "call.in-progress.error-providerfault-deepinfra-llm-failed" | "call.in-progress.error-vapifault-deepinfra-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-deepinfra-401-unauthorized" | "call.in-progress.error-vapifault-deepinfra-403-model-access-denied" | "call.in-progress.error-vapifault-deepinfra-429-exceeded-quota" | "call.in-progress.error-providerfault-deepinfra-500-server-error" | "call.in-progress.error-providerfault-deepinfra-503-server-overloaded-error" | "pipeline-error-runpod-400-bad-request-validation-failed" | "pipeline-error-runpod-401-unauthorized" | "pipeline-error-runpod-403-model-access-denied" | "pipeline-error-runpod-429-exceeded-quota" | "pipeline-error-runpod-500-server-error" | "pipeline-error-runpod-503-server-overloaded-error" | "pipeline-error-runpod-llm-failed" | "call.in-progress.error-providerfault-runpod-llm-failed" | "call.in-progress.error-vapifault-runpod-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-runpod-401-unauthorized" | "call.in-progress.error-vapifault-runpod-403-model-access-denied" | "call.in-progress.error-vapifault-runpod-429-exceeded-quota" | "call.in-progress.error-providerfault-runpod-500-server-error" | "call.in-progress.error-providerfault-runpod-503-server-overloaded-error" | "pipeline-error-custom-llm-400-bad-request-validation-failed" | "pipeline-error-custom-llm-401-unauthorized" | "pipeline-error-custom-llm-403-model-access-denied" | "pipeline-error-custom-llm-429-exceeded-quota" | "pipeline-error-custom-llm-500-server-error" | "pipeline-error-custom-llm-503-server-overloaded-error" | "pipeline-error-custom-llm-llm-failed" | "call.in-progress.error-providerfault-custom-llm-llm-failed" | "call.in-progress.error-vapifault-custom-llm-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-custom-llm-401-unauthorized" | "call.in-progress.error-vapifault-custom-llm-403-model-access-denied" | "call.in-progress.error-vapifault-custom-llm-429-exceeded-quota" | "call.in-progress.error-providerfault-custom-llm-500-server-error" | "call.in-progress.error-providerfault-custom-llm-503-server-overloaded-error" | "call.in-progress.error-pipeline-ws-model-connection-failed" | "pipeline-error-custom-voice-failed" | "pipeline-error-cartesia-socket-hang-up" | "pipeline-error-cartesia-requested-payment" | "pipeline-error-cartesia-500-server-error" | "pipeline-error-cartesia-502-server-error" | "pipeline-error-cartesia-503-server-error" | "pipeline-error-cartesia-522-server-error" | "call.in-progress.error-vapifault-cartesia-socket-hang-up" | "call.in-progress.error-vapifault-cartesia-requested-payment" | "call.in-progress.error-providerfault-cartesia-500-server-error" | "call.in-progress.error-providerfault-cartesia-503-server-error" | "call.in-progress.error-providerfault-cartesia-522-server-error" | "pipeline-error-eleven-labs-voice-not-found" | "pipeline-error-eleven-labs-quota-exceeded" | "pipeline-error-eleven-labs-unauthorized-access" | "pipeline-error-eleven-labs-unauthorized-to-access-model" | "pipeline-error-eleven-labs-professional-voices-only-for-creator-plus" | "pipeline-error-eleven-labs-blocked-free-plan-and-requested-upgrade" | "pipeline-error-eleven-labs-blocked-concurrent-requests-and-requested-upgrade" | "pipeline-error-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade" | "pipeline-error-eleven-labs-system-busy-and-requested-upgrade" | "pipeline-error-eleven-labs-voice-not-fine-tuned" | "pipeline-error-eleven-labs-invalid-api-key" | "pipeline-error-eleven-labs-invalid-voice-samples" | "pipeline-error-eleven-labs-voice-disabled-by-owner" | "pipeline-error-eleven-labs-vapi-voice-disabled-by-owner" | "pipeline-error-eleven-labs-blocked-account-in-probation" | "pipeline-error-eleven-labs-blocked-content-against-their-policy" | "pipeline-error-eleven-labs-missing-samples-for-voice-clone" | "pipeline-error-eleven-labs-voice-not-fine-tuned-and-cannot-be-used" | "pipeline-error-eleven-labs-voice-not-allowed-for-free-users" | "pipeline-error-eleven-labs-max-character-limit-exceeded" | "pipeline-error-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification" | "pipeline-error-eleven-labs-500-server-error" | "pipeline-error-eleven-labs-503-server-error" | "call.in-progress.error-vapifault-eleven-labs-voice-not-found" | "call.in-progress.error-vapifault-eleven-labs-quota-exceeded" | "call.in-progress.error-vapifault-eleven-labs-unauthorized-access" | "call.in-progress.error-vapifault-eleven-labs-unauthorized-to-access-model" | "call.in-progress.error-vapifault-eleven-labs-professional-voices-only-for-creator-plus" | "call.in-progress.error-vapifault-eleven-labs-blocked-free-plan-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-blocked-concurrent-requests-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-system-busy-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned" | "call.in-progress.error-vapifault-eleven-labs-invalid-api-key" | "call.in-progress.error-vapifault-eleven-labs-invalid-voice-samples" | "call.in-progress.error-vapifault-eleven-labs-voice-disabled-by-owner" | "call.in-progress.error-vapifault-eleven-labs-blocked-account-in-probation" | "call.in-progress.error-vapifault-eleven-labs-blocked-content-against-their-policy" | "call.in-progress.error-vapifault-eleven-labs-missing-samples-for-voice-clone" | "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned-and-cannot-be-used" | "call.in-progress.error-vapifault-eleven-labs-voice-not-allowed-for-free-users" | "call.in-progress.error-vapifault-eleven-labs-max-character-limit-exceeded" | "call.in-progress.error-vapifault-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification" | "call.in-progress.error-providerfault-eleven-labs-system-busy-and-requested-upgrade" | "call.in-progress.error-providerfault-eleven-labs-500-server-error" | "call.in-progress.error-providerfault-eleven-labs-503-server-error" | "pipeline-error-playht-request-timed-out" | "pipeline-error-playht-invalid-voice" | "pipeline-error-playht-unexpected-error" | "pipeline-error-playht-out-of-credits" | "pipeline-error-playht-invalid-emotion" | "pipeline-error-playht-voice-must-be-a-valid-voice-manifest-uri" | "pipeline-error-playht-401-unauthorized" | "pipeline-error-playht-403-forbidden-out-of-characters" | "pipeline-error-playht-403-forbidden-api-access-not-available" | "pipeline-error-playht-429-exceeded-quota" | "pipeline-error-playht-502-gateway-error" | "pipeline-error-playht-504-gateway-error" | "call.in-progress.error-vapifault-playht-request-timed-out" | "call.in-progress.error-vapifault-playht-invalid-voice" | "call.in-progress.error-vapifault-playht-unexpected-error" | "call.in-progress.error-vapifault-playht-out-of-credits" | "call.in-progress.error-vapifault-playht-invalid-emotion" | "call.in-progress.error-vapifault-playht-voice-must-be-a-valid-voice-manifest-uri" | "call.in-progress.error-vapifault-playht-401-unauthorized" | "call.in-progress.error-vapifault-playht-403-forbidden-out-of-characters" | "call.in-progress.error-vapifault-playht-403-forbidden-api-access-not-available" | "call.in-progress.error-vapifault-playht-429-exceeded-quota" | "call.in-progress.error-providerfault-playht-502-gateway-error" | "call.in-progress.error-providerfault-playht-504-gateway-error" | "pipeline-error-custom-transcriber-failed" | "call.in-progress.error-vapifault-custom-transcriber-failed" | "pipeline-error-eleven-labs-transcriber-failed" | "call.in-progress.error-vapifault-eleven-labs-transcriber-failed" | "pipeline-error-deepgram-returning-400-no-such-model-language-tier-combination" | "pipeline-error-deepgram-returning-401-invalid-credentials" | "pipeline-error-deepgram-returning-403-model-access-denied" | "pipeline-error-deepgram-returning-404-not-found" | "pipeline-error-deepgram-returning-500-invalid-json" | "pipeline-error-deepgram-returning-502-network-error" | "pipeline-error-deepgram-returning-502-bad-gateway-ehostunreach" | "pipeline-error-deepgram-returning-econnreset" | "call.in-progress.error-vapifault-deepgram-returning-400-no-such-model-language-tier-combination" | "call.in-progress.error-vapifault-deepgram-returning-401-invalid-credentials" | "call.in-progress.error-vapifault-deepgram-returning-404-not-found" | "call.in-progress.error-vapifault-deepgram-returning-403-model-access-denied" | "call.in-progress.error-providerfault-deepgram-returning-500-invalid-json" | "call.in-progress.error-providerfault-deepgram-returning-502-network-error" | "call.in-progress.error-providerfault-deepgram-returning-502-bad-gateway-ehostunreach" | "pipeline-error-google-transcriber-failed" | "call.in-progress.error-vapifault-google-transcriber-failed" | "pipeline-error-openai-transcriber-failed" | "call.in-progress.error-vapifault-openai-transcriber-failed" | "call.in-progress.error-warm-transfer-max-duration" | "call.in-progress.error-warm-transfer-assistant-cancelled" | "call.in-progress.error-warm-transfer-silence-timeout" | "call.in-progress.error-warm-transfer-microphone-timeout" | "assistant-ended-call" | "assistant-said-end-call-phrase" | "assistant-ended-call-with-hangup-task" | "assistant-ended-call-after-message-spoken" | "assistant-forwarded-call" | "assistant-join-timed-out" | "call.in-progress.error-assistant-did-not-receive-customer-audio" | "call.in-progress.error-transfer-failed" | "customer-busy" | "customer-ended-call" | "customer-ended-call-before-warm-transfer" | "customer-ended-call-after-warm-transfer-attempt" | "customer-did-not-answer" | "customer-did-not-give-microphone-permission" | "exceeded-max-duration" | "manually-canceled" | "phone-call-provider-closed-websocket" | "call.forwarding.operator-busy" | "silence-timed-out" | "call.in-progress.error-sip-inbound-call-failed-to-connect" | "call.in-progress.error-providerfault-outbound-sip-403-forbidden" | "call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required" | "call.in-progress.error-providerfault-outbound-sip-503-service-unavailable" | "call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable" | "call.in-progress.error-sip-outbound-call-failed-to-connect" | "call.ringing.hook-executed-say" | "call.ringing.hook-executed-transfer" | "call.ending.hook-executed-say" | "call.ending.hook-executed-transfer" | "call.ringing.sip-inbound-caller-hungup-before-call-connect" | "call.ringing.error-sip-inbound-call-failed-to-connect" | "twilio-failed-to-connect-call" | "twilio-reported-customer-misdialed" | "vonage-rejected" | "voicemail" | "call-deleted"; /** This is the destination where the call ended up being transferred to. If the call was not transferred, this will be empty. */ destination?: TransferDestinationNumber | TransferDestinationSip; /** This is the unique identifier for the call. */ id: string; /** This is the unique identifier for the org that this call belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the call was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the call was last updated. * @format date-time */ updatedAt: string; /** * This is the ISO 8601 date-time string of when the call was started. * @format date-time */ startedAt?: string; /** * This is the ISO 8601 date-time string of when the call was ended. * @format date-time */ endedAt?: string; /** This is the cost of the call in USD. */ cost?: number; /** This is the cost of the call in USD. */ costBreakdown?: CostBreakdown; /** This is a copy of assistant artifact plan. This isn't actually stored on the call but rather just returned in POST /call/web to enable artifact creation client side. */ artifactPlan?: ArtifactPlan; /** This is the analysis of the call. Configure in `assistant.analysisPlan`. */ analysis?: Analysis; /** This is to real-time monitor the call. Configure in `assistant.monitorPlan`. */ monitor?: Monitor; /** These are the artifacts created from the call. Configure in `assistant.artifactPlan`. */ artifact?: Artifact; /** This is the compliance of the call. Configure in `assistant.compliancePlan`. */ compliance?: Compliance; /** * The ID of the call as provided by the phone number service. callSid in Twilio. conversationUuid in Vonage. callControlId in Telnyx. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. * @deprecated */ phoneCallProviderId?: string; /** This is the campaign ID that the call belongs to. */ campaignId?: string; /** * This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead. * * To start a call with: * - Assistant, use `assistantId` or `assistant` * - Squad, use `squadId` or `squad` * - Workflow, use `workflowId` or `workflow` */ assistantId?: string; /** * This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead. * * To start a call with: * - Assistant, use `assistant` * - Squad, use `squad` * - Workflow, use `workflow` */ assistant?: CreateAssistantDTO; /** These are the overrides for the `assistant` or `assistantId`'s settings and template variables. */ assistantOverrides?: AssistantOverrides; /** * This is the squad that will be used for the call. To use a transient squad, use `squad` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squadId?: string; /** * This is a squad that will be used for the call. To use an existing squad, use `squadId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squad?: CreateSquadDTO; /** * These are the overrides for the `squad` or `squadId`'s member settings and template variables. * This will apply to all members of the squad. */ squadOverrides?: AssistantOverrides; /** * This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflowId?: string; /** * This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflow?: CreateWorkflowDTO; /** These are the overrides for the `workflow` or `workflowId`'s settings and template variables. */ workflowOverrides?: WorkflowOverrides; /** * This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ phoneNumberId?: string; /** * This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ phoneNumber?: ImportTwilioPhoneNumberDTO; /** * This is the customer that will be called. To call a transient customer , use `customer` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ customerId?: string; /** * This is the customer that will be called. To call an existing customer, use `customerId` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ customer?: CreateCustomerDTO; /** * This is the name of the call. This is just for your own reference. * @maxLength 40 */ name?: string; /** This is the schedule plan of the call. */ schedulePlan?: SchedulePlan; /** This is the transport of the call. */ transport?: object; } export interface CallBatchError { customer: CreateCustomerDTO; error: string; } export interface CallBatchResponse { /** Subscription limits at the end of this batch */ subscriptionLimits?: SubscriptionLimits; /** This is the list of calls that were created. */ results: Call[]; /** This is the list of calls that failed to be created. */ errors: CallBatchError[]; } export interface CreateCallDTO { /** * This is used to issue batch calls to multiple customers. * * Only relevant for `outboundPhoneCall`. To call a single customer, use `customer` instead. */ customers?: CreateCustomerDTO[]; /** * This is the name of the call. This is just for your own reference. * @maxLength 40 */ name?: string; /** This is the schedule plan of the call. */ schedulePlan?: SchedulePlan; /** This is the transport of the call. */ transport?: object; /** * This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead. * * To start a call with: * - Assistant, use `assistantId` or `assistant` * - Squad, use `squadId` or `squad` * - Workflow, use `workflowId` or `workflow` */ assistantId?: string; /** * This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead. * * To start a call with: * - Assistant, use `assistant` * - Squad, use `squad` * - Workflow, use `workflow` */ assistant?: CreateAssistantDTO; /** These are the overrides for the `assistant` or `assistantId`'s settings and template variables. */ assistantOverrides?: AssistantOverrides; /** * This is the squad that will be used for the call. To use a transient squad, use `squad` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squadId?: string; /** * This is a squad that will be used for the call. To use an existing squad, use `squadId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squad?: CreateSquadDTO; /** * These are the overrides for the `squad` or `squadId`'s member settings and template variables. * This will apply to all members of the squad. */ squadOverrides?: AssistantOverrides; /** * This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflowId?: string; /** * This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflow?: CreateWorkflowDTO; /** These are the overrides for the `workflow` or `workflowId`'s settings and template variables. */ workflowOverrides?: WorkflowOverrides; /** * This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ phoneNumberId?: string; /** * This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ phoneNumber?: ImportTwilioPhoneNumberDTO; /** * This is the customer that will be called. To call a transient customer , use `customer` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ customerId?: string; /** * This is the customer that will be called. To call an existing customer, use `customerId` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ customer?: CreateCustomerDTO; } export interface StructuredOutputFilterDTO { /** Equal to */ eq?: string; /** Not equal to */ neq?: string; /** Greater than */ gt?: string; /** Greater than or equal to */ gte?: string; /** Less than */ lt?: string; /** Less than or equal to */ lte?: string; /** Contains */ contains?: string; /** Not contains */ notContains?: string; } export interface CallPaginatedResponse { results: Call[]; metadata: PaginationMeta; } export interface CreateOutboundCallDTO { /** * This is used to issue batch calls to multiple customers. * * Only relevant for `outboundPhoneCall`. To call a single customer, use `customer` instead. */ customers?: CreateCustomerDTO[]; /** * This is the name of the call. This is just for your own reference. * @maxLength 40 */ name?: string; /** This is the schedule plan of the call. */ schedulePlan?: SchedulePlan; /** This is the transport of the call. */ transport?: object; /** * This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead. * * To start a call with: * - Assistant, use `assistantId` or `assistant` * - Squad, use `squadId` or `squad` * - Workflow, use `workflowId` or `workflow` */ assistantId?: string; /** * This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead. * * To start a call with: * - Assistant, use `assistant` * - Squad, use `squad` * - Workflow, use `workflow` */ assistant?: CreateAssistantDTO; /** These are the overrides for the `assistant` or `assistantId`'s settings and template variables. */ assistantOverrides?: AssistantOverrides; /** * This is the squad that will be used for the call. To use a transient squad, use `squad` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squadId?: string; /** * This is a squad that will be used for the call. To use an existing squad, use `squadId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squad?: CreateSquadDTO; /** * These are the overrides for the `squad` or `squadId`'s member settings and template variables. * This will apply to all members of the squad. */ squadOverrides?: AssistantOverrides; /** * This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflowId?: string; /** * This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflow?: CreateWorkflowDTO; /** These are the overrides for the `workflow` or `workflowId`'s settings and template variables. */ workflowOverrides?: WorkflowOverrides; /** * This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ phoneNumberId?: string; /** * This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ phoneNumber?: ImportTwilioPhoneNumberDTO; /** * This is the customer that will be called. To call a transient customer , use `customer` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ customerId?: string; /** * This is the customer that will be called. To call an existing customer, use `customerId` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ customer?: CreateCustomerDTO; } export interface CreateWebCallDTO { /** @default true */ roomDeleteOnUserLeaveEnabled?: boolean; /** * This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead. * * To start a call with: * - Assistant, use `assistantId` or `assistant` * - Squad, use `squadId` or `squad` * - Workflow, use `workflowId` or `workflow` */ assistantId?: string; /** * This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead. * * To start a call with: * - Assistant, use `assistant` * - Squad, use `squad` * - Workflow, use `workflow` */ assistant?: CreateAssistantDTO; /** These are the overrides for the `assistant` or `assistantId`'s settings and template variables. */ assistantOverrides?: AssistantOverrides; /** * This is the squad that will be used for the call. To use a transient squad, use `squad` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squadId?: string; /** * This is a squad that will be used for the call. To use an existing squad, use `squadId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squad?: CreateSquadDTO; /** * These are the overrides for the `squad` or `squadId`'s member settings and template variables. * This will apply to all members of the squad. */ squadOverrides?: AssistantOverrides; /** * This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflowId?: string; /** * This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflow?: CreateWorkflowDTO; /** These are the overrides for the `workflow` or `workflowId`'s settings and template variables. */ workflowOverrides?: WorkflowOverrides; } export interface UpdateCallDTO { /** * This is the name of the call. This is just for your own reference. * @maxLength 40 */ name?: string; } export interface DeleteCallDTO { /** * These are the Call IDs to be bulk deleted. * If provided, the call ID if any in the request query will be ignored * When requesting a bulk delete, updates when a call is deleted will be sent as a webhook to the server URL configured in the Org settings. * It may take up to a few hours to complete the bulk delete, and will be asynchronous. */ ids?: string[]; } export interface DeveloperMessage { /** * This is the role of the message author * @default "developer" */ role: "developer"; /** * This is the content of the developer message * @maxLength 10000 */ content: string; /** * This is an optional name for the participant * @maxLength 40 */ name?: string; /** This is an optional metadata for the message */ metadata?: object; } export interface SystemMessage { /** The role of the system in the conversation. */ role: string; /** The message content from the system. */ message: string; /** The timestamp when the message was sent. */ time: number; /** The number of seconds from the start of the conversation. */ secondsFromStart: number; } export interface UserMessage { /** The role of the user in the conversation. */ role: string; /** The message content from the user. */ message: string; /** The timestamp when the message was sent. */ time: number; /** The timestamp when the message ended. */ endTime: number; /** The number of seconds from the start of the conversation. */ secondsFromStart: number; /** The duration of the message in seconds. */ duration?: number; /** Indicates if the message was filtered for security reasons. */ isFiltered?: boolean; /** List of detected security threats if the message was filtered. */ detectedThreats?: string[]; /** The original message before filtering (only included if content was filtered). */ originalMessage?: string; /** The metadata associated with the message. Currently used to store the transcriber's word level confidence. */ metadata?: object; } export interface ToolCallFunction { /** This is the arguments to call the function with */ arguments: string; /** * This is the name of the function to call * @maxLength 80 */ name: string; } export interface ToolCall { /** This is the ID of the tool call */ id: string; /** This is the type of tool */ type: string; /** This is the function that was called */ function: ToolCallFunction; } export interface AssistantMessage { /** * This is the role of the message author * @default "assistant" */ role: "assistant"; /** * This is the content of the assistant message * @maxLength 10000 */ content?: string; /** * This is the refusal message generated by the model * @maxLength 10000 */ refusal?: string; /** This is the tool calls generated by the model */ tool_calls?: ToolCall[]; /** * This is an optional name for the participant * @maxLength 40 */ name?: string; /** This is an optional metadata for the message */ metadata?: object; } export interface ToolMessage { /** * This is the role of the message author * @default "tool" */ role: "tool"; /** * This is the content of the tool message * @maxLength 10000 */ content: string; /** This is the ID of the tool call this message is responding to */ tool_call_id: string; /** * This is an optional name for the participant * @maxLength 40 */ name?: string; /** This is an optional metadata for the message */ metadata?: object; } export interface FunctionCall { /** This is the arguments to call the function with */ arguments: string; /** * This is the name of the function to call * @maxLength 40 */ name: string; } export interface Chat { /** This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead. */ assistantId?: string; /** This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead. */ assistant?: CreateAssistantDTO; /** * These are the variable values that will be used to replace template variables in the assistant messages. * Only variable substitution is supported in chat contexts - other assistant properties cannot be overridden. */ assistantOverrides?: AssistantOverrides; /** This is the squad that will be used for the chat. To use a transient squad, use `squad` instead. */ squadId?: string; /** This is the squad that will be used for the chat. To use an existing squad, use `squadId` instead. */ squad?: CreateSquadDTO; /** * This is the name of the chat. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the ID of the session that will be used for the chat. * Mutually exclusive with previousChatId. */ sessionId?: string; /** * This is the input text for the chat. * Can be a string or an array of chat messages. */ input?: | string | ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** * This is a flag that determines whether the response should be streamed. * When true, the response will be sent as chunks of text. * @default false */ stream?: boolean; /** * This is the ID of the chat that will be used as context for the new chat. * The messages from the previous chat will be used as context. * Mutually exclusive with sessionId. */ previousChatId?: string; /** This is the unique identifier for the chat. */ id: string; /** This is the unique identifier for the org that this chat belongs to. */ orgId: string; /** * This is an array of messages used as context for the chat. * Used to provide message history for multi-turn conversations. */ messages?: ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** This is the output messages generated by the system in response to the input. */ output?: ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** * This is the ISO 8601 date-time string of when the chat was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the chat was last updated. * @format date-time */ updatedAt: string; /** These are the costs of individual components of the chat in USD. */ costs?: (ModelCost | ChatCost)[]; /** This is the cost of the chat in USD. */ cost?: number; } export interface TwilioSMSChatTransport { /** * This is the conversation type of the call (ie, voice or chat). * @default "chat" */ conversationType?: "chat"; /** * This is the phone number that will be used to send the SMS. * If provided, will create a new session. If not provided, uses existing session's phoneNumberId. * The phone number must have SMS enabled and belong to your organization. */ phoneNumberId?: string; /** * This is the customer who will receive the SMS. * If provided, will create a new session. If not provided, uses existing session's customer. */ customer?: CreateCustomerDTO; /** * Whether to use LLM-generated messages for outbound SMS. * When true (default), input is processed by the assistant for a response. * When false, the input text is forwarded directly as the SMS message without LLM processing. * Useful for sending pre-defined messages or notifications. * @default true */ useLLMGeneratedMessageForOutbound?: boolean; /** * The type of transport to use for sending the chat response. * Currently supports 'twilio.sms' for SMS delivery via Twilio. */ type: "twilio.sms"; } export interface CreateChatDTO { /** This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead. */ assistantId?: string; /** This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead. */ assistant?: CreateAssistantDTO; /** * These are the variable values that will be used to replace template variables in the assistant messages. * Only variable substitution is supported in chat contexts - other assistant properties cannot be overridden. */ assistantOverrides?: AssistantOverrides; /** This is the squad that will be used for the chat. To use a transient squad, use `squad` instead. */ squadId?: string; /** This is the squad that will be used for the chat. To use an existing squad, use `squadId` instead. */ squad?: CreateSquadDTO; /** * This is the name of the chat. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the ID of the session that will be used for the chat. * Mutually exclusive with previousChatId. */ sessionId?: string; /** * This is the input text for the chat. * Can be a string or an array of chat messages. * This field is REQUIRED for chat creation. */ input: | string | ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** * This is a flag that determines whether the response should be streamed. * When true, the response will be sent as chunks of text. * @default false */ stream?: boolean; /** * This is the ID of the chat that will be used as context for the new chat. * The messages from the previous chat will be used as context. * Mutually exclusive with sessionId. */ previousChatId?: string; /** * This is used to send the chat through a transport like SMS. * If transport.phoneNumberId and transport.customer are provided, creates a new session. * If sessionId is provided without transport fields, uses existing session data. * Cannot specify both sessionId and transport fields (phoneNumberId/customer) together. */ transport?: TwilioSMSChatTransport; } export interface GetChatPaginatedDTO { /** This is the unique identifier for the assistant that will be used for the chat. */ assistantId?: string; /** This is the unique identifier for the squad that will be used for the chat. */ squadId?: string; /** This is the unique identifier for the session that will be used for the chat. */ sessionId?: string; /** This is the unique identifier for the previous chat to filter by. */ previousChatId?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; } export interface ChatPaginatedResponse { results: Chat[]; metadata: PaginationMeta; } export interface CreateChatStreamResponse { /** This is the unique identifier for the streaming response. */ id: string; /** * This is the ID of the session that will be used for the chat. * Helps track conversation context across multiple messages. */ sessionId?: string; /** * This is the path to the content being updated. * Format: `chat.output[{contentIndex}].content` where contentIndex identifies the specific content item. * @example "chat.output[0].content" */ path: string; /** This is the incremental content chunk being streamed. */ delta: string; } export interface OpenAIResponsesRequest { /** This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead. */ assistantId?: string; /** This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead. */ assistant?: CreateAssistantDTO; /** * These are the variable values that will be used to replace template variables in the assistant messages. * Only variable substitution is supported in chat contexts - other assistant properties cannot be overridden. */ assistantOverrides?: AssistantOverrides; /** This is the squad that will be used for the chat. To use a transient squad, use `squad` instead. */ squadId?: string; /** This is the squad that will be used for the chat. To use an existing squad, use `squadId` instead. */ squad?: CreateSquadDTO; /** * This is the name of the chat. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the ID of the session that will be used for the chat. * Mutually exclusive with previousChatId. */ sessionId?: string; /** * This is the input text for the chat. * Can be a string or an array of chat messages. * This field is REQUIRED for chat creation. */ input: | string | ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** * Whether to stream the response or not. * @default true */ stream?: boolean; /** * This is the ID of the chat that will be used as context for the new chat. * The messages from the previous chat will be used as context. * Mutually exclusive with sessionId. */ previousChatId?: string; /** * This is used to send the chat through a transport like SMS. * If transport.phoneNumberId and transport.customer are provided, creates a new session. * If sessionId is provided without transport fields, uses existing session data. * Cannot specify both sessionId and transport fields (phoneNumberId/customer) together. */ transport?: TwilioSMSChatTransport; } export interface ChatAssistantOverrides { /** * Variable values for template substitution * @example {"name":"John","company":"ACME Corp"} */ variableValues?: object; } export interface CreateWebCustomerDTO { /** * This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it. * * Use cases: * - `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks. * - `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls. * * If `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\+?[a-zA-Z0-9]+$/`). * * @default true (E164 check is enabled) * @default true */ numberE164CheckEnabled?: boolean; /** * This is the extension that will be dialed after the call is answered. * @maxLength 10 * @example null */ extension?: string; /** * These are the variable values that will be used to replace template variables in the assistant messages. * Only variable substitution is supported in web chat - other assistant properties cannot be overridden. */ assistantOverrides?: ChatAssistantOverrides; /** * This is the number of the customer. * @minLength 3 * @maxLength 40 */ number?: string; /** This is the SIP URI of the customer. */ sipUri?: string; /** * This is the name of the customer. This is just for your own reference. * * For SIP inbound calls, this is extracted from the `From` SIP header with format `"Display Name" `. * @maxLength 40 */ name?: string; /** * This is the email of the customer. * @maxLength 40 */ email?: string; /** * This is the external ID of the customer. * @maxLength 40 */ externalId?: string; } export interface CreateWebChatDTO { /** The assistant ID to use for this chat */ assistantId: string; /** * This is the ID of the session that will be used for the chat. * If provided, the conversation will continue from the previous state. * If not provided or expired, a new session will be created. */ sessionId?: string; /** * This is the expiration time for the session. This can ONLY be set if starting a new chat and therefore a new session is created. * If session already exists, this will be ignored and NOT be updated for the existing session. Use PATCH /session/:id to update the session expiration time. * @min 60 * @max 2592000 */ sessionExpirationSeconds?: number; /** * These are the variable values that will be used to replace template variables in the assistant messages. * Only variable substitution is supported in web chat - other assistant properties cannot be overridden. */ assistantOverrides?: ChatAssistantOverrides; /** * This is the customer information for the chat. * Used to automatically manage sessions for repeat customers. */ customer?: CreateWebCustomerDTO; /** * This is the input text for the chat. * Can be a string or an array of chat messages. */ input: | string | ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** * This is a flag that determines whether the response should be streamed. * When true, the response will be sent as chunks of text. * @default false */ stream?: boolean; /** * This is a flag to indicate end of session. When true, the session will be marked as completed and the chat will be ended. * Used to end session to send End-of-session report to the customer. * When flag is set to true, any messages sent will not be processed and session will directly be marked as completed. * @default false */ sessionEnd?: boolean; } export interface WebChat { /** This is the unique identifier for the chat. */ id: string; /** This is the ID of the session for the chat. Send it in the next chat request to continue the conversation. */ sessionId?: string; /** This is the output messages generated by the system in response to the input. */ output: ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; } export interface OpenAIWebChatRequest { /** The assistant ID to use for this chat */ assistantId: string; /** * This is the ID of the session that will be used for the chat. * If provided, the conversation will continue from the previous state. * If not provided or expired, a new session will be created. */ sessionId?: string; /** * This is the expiration time for the session. This can ONLY be set if starting a new chat and therefore a new session is created. * If session already exists, this will be ignored and NOT be updated for the existing session. Use PATCH /session/:id to update the session expiration time. * @min 60 * @max 2592000 */ sessionExpirationSeconds?: number; /** * These are the variable values that will be used to replace template variables in the assistant messages. * Only variable substitution is supported in web chat - other assistant properties cannot be overridden. */ assistantOverrides?: ChatAssistantOverrides; /** * This is the customer information for the chat. * Used to automatically manage sessions for repeat customers. */ customer?: CreateWebCustomerDTO; /** * This is the input text for the chat. * Can be a string or an array of chat messages. */ input: | string | ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** * Whether to stream the response or not. * @default true */ stream?: boolean; /** * This is a flag to indicate end of session. When true, the session will be marked as completed and the chat will be ended. * Used to end session to send End-of-session report to the customer. * When flag is set to true, any messages sent will not be processed and session will directly be marked as completed. * @default false */ sessionEnd?: boolean; } export interface ResponseOutputText { /** * Annotations in the text output * @default [] */ annotations: object[]; /** The text output from the model */ text: string; /** * The type of the output text * @default "output_text" */ type: "output_text"; } export interface ResponseOutputMessage { /** The unique ID of the output message */ id: string; /** Content of the output message */ content: ResponseOutputText[]; /** * The role of the output message * @default "assistant" */ role: "assistant"; /** The status of the message */ status: "in_progress" | "completed" | "incomplete"; /** * The type of the output message * @default "message" */ type: "message"; } export interface ResponseObject { /** Unique identifier for this Response */ id: string; /** * The object type * @default "response" */ object: "response"; /** Unix timestamp (in seconds) of when this Response was created */ created_at: number; /** Status of the response */ status: "completed" | "failed" | "in_progress" | "incomplete"; /** * Error message if the response failed * @default null */ error?: string | null; /** Output messages from the model */ output: ResponseOutputMessage[]; } export interface ResponseTextDeltaEvent { /** Index of the content part */ content_index: number; /** Text delta being added */ delta: string; /** ID of the output item */ item_id: string; /** Index of the output item */ output_index: number; /** * Event type * @default "response.output_text.delta" */ type: "response.output_text.delta"; } export interface ResponseTextDoneEvent { /** Index of the content part */ content_index: number; /** ID of the output item */ item_id: string; /** Index of the output item */ output_index: number; /** Complete text content */ text: string; /** * Event type * @default "response.output_text.done" */ type: "response.output_text.done"; } export interface ResponseCompletedEvent { /** The completed response */ response: ResponseObject; /** * Event type * @default "response.completed" */ type: "response.completed"; } export interface ResponseErrorEvent { /** * Event type * @default "error" */ type: "error"; /** * Error code * @example "ERR_SOMETHING" */ code: string; /** * Error message * @example "Something went wrong" */ message: string; /** Parameter that caused the error */ param?: string | null; /** * Sequence number of the event * @example 1 */ sequence_number: number; } export interface CreateCampaignDTO { /** * This is the name of the campaign. This is just for your own reference. * @example "Q2 Sales Campaign" */ name: string; /** This is the assistant ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both. */ assistantId?: string; /** This is the workflow ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both. */ workflowId?: string; /** This is the phone number ID that will be used for the campaign calls. */ phoneNumberId: string; /** This is the schedule plan for the campaign. Calls will start at startedAt and continue until your organization’s concurrency limit is reached. Any remaining calls will be retried for up to one hour as capacity becomes available. After that hour or after latestAt, whichever comes first, any calls that couldn’t be placed won’t be retried. */ schedulePlan?: SchedulePlan; /** These are the customers that will be called in the campaign. */ customers: CreateCustomerDTO[]; } export interface Campaign { /** This is the status of the campaign. */ status: "scheduled" | "in-progress" | "ended"; /** This is the explanation for how the campaign ended. */ endedReason?: | "campaign.scheduled.ended-by-user" | "campaign.in-progress.ended-by-user" | "campaign.ended.success"; /** * This is the name of the campaign. This is just for your own reference. * @example "Q2 Sales Campaign" */ name: string; /** This is the assistant ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both. */ assistantId?: string; /** This is the workflow ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both. */ workflowId?: string; /** This is the phone number ID that will be used for the campaign calls. */ phoneNumberId: string; /** This is the schedule plan for the campaign. Calls will start at startedAt and continue until your organization’s concurrency limit is reached. Any remaining calls will be retried for up to one hour as capacity becomes available. After that hour or after latestAt, whichever comes first, any calls that couldn’t be placed won’t be retried. */ schedulePlan?: SchedulePlan; /** These are the customers that will be called in the campaign. */ customers: CreateCustomerDTO[]; /** This is the unique identifier for the campaign. */ id: string; /** This is the unique identifier for the org that this campaign belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the campaign was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the campaign was last updated. * @format date-time */ updatedAt: string; /** This is a map of call IDs to campaign call details. */ calls: object; /** This is the number of calls that have been scheduled. */ callsCounterScheduled: number; /** This is the number of calls that have been queued. */ callsCounterQueued: number; /** This is the number of calls that have been in progress. */ callsCounterInProgress: number; /** This is the number of calls whose ended reason is 'voicemail'. */ callsCounterEndedVoicemail: number; /** This is the number of calls that have ended. */ callsCounterEnded: number; } export interface CampaignPaginatedResponse { results: Campaign[]; metadata: PaginationMeta; } export interface UpdateCampaignDTO { /** This is the name of the campaign. This is just for your own reference. */ name?: string; /** * This is the assistant ID that will be used for the campaign calls. * Can only be updated if campaign is not in progress or has ended. */ assistantId?: string; /** * This is the workflow ID that will be used for the campaign calls. * Can only be updated if campaign is not in progress or has ended. */ workflowId?: string; /** * This is the phone number ID that will be used for the campaign calls. * Can only be updated if campaign is not in progress or has ended. */ phoneNumberId?: string; /** * This is the schedule plan for the campaign. * Can only be updated if campaign is not in progress or has ended. */ schedulePlan?: SchedulePlan; /** * This is the status of the campaign. * Can only be updated to 'ended' if you want to end the campaign. * When set to 'ended', it will delete all scheduled calls. Calls in progress will be allowed to complete. */ status?: "ended"; } export interface Session { /** This is the unique identifier for the session. */ id: string; /** This is the unique identifier for the organization that owns this session. */ orgId: string; /** * This is the ISO 8601 timestamp indicating when the session was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 timestamp indicating when the session was last updated. * @format date-time */ updatedAt: string; /** This is the cost of the session in USD. */ cost?: number; /** These are the costs of individual components of the session in USD. */ costs?: (ModelCost | AnalysisCost | SessionCost)[]; /** * This is a user-defined name for the session. Maximum length is 40 characters. * @maxLength 40 */ name?: string; /** This is the current status of the session. Can be either 'active' or 'completed'. */ status?: "active" | "completed"; /** * Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set. * @min 60 * @max 2592000 * @example 86400 */ expirationSeconds?: number; /** This is the ID of the assistant associated with this session. Use this when referencing an existing assistant. */ assistantId?: string; /** * This is the assistant configuration for this session. Use this when creating a new assistant configuration. * If assistantId is provided, this will be ignored. */ assistant?: CreateAssistantDTO; /** This is the squad ID associated with this session. Use this when referencing an existing squad. */ squadId?: string; /** * This is the squad configuration for this session. Use this when creating a new squad configuration. * If squadId is provided, this will be ignored. */ squad?: CreateSquadDTO; /** This is an array of chat messages in the session. */ messages?: ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** This is the customer information associated with this session. */ customer?: CreateCustomerDTO; /** This is the ID of the phone number associated with this session. */ phoneNumberId?: string; /** This is the phone number configuration for this session. */ phoneNumber?: ImportTwilioPhoneNumberDTO; /** * These are the artifacts that were extracted from the session messages. * They are only available after the session has completed. * The artifact plan from the assistant or active assistant of squad is used to generate the artifact. * Currently the only supported fields of assistant artifact plan are: * - structuredOutputIds */ artifact?: Artifact; } export interface CreateSessionDTO { /** * This is a user-defined name for the session. Maximum length is 40 characters. * @maxLength 40 */ name?: string; /** This is the current status of the session. Can be either 'active' or 'completed'. */ status?: "active" | "completed"; /** * Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set. * @min 60 * @max 2592000 * @example 86400 */ expirationSeconds?: number; /** This is the ID of the assistant associated with this session. Use this when referencing an existing assistant. */ assistantId?: string; /** * This is the assistant configuration for this session. Use this when creating a new assistant configuration. * If assistantId is provided, this will be ignored. */ assistant?: CreateAssistantDTO; /** This is the squad ID associated with this session. Use this when referencing an existing squad. */ squadId?: string; /** * This is the squad configuration for this session. Use this when creating a new squad configuration. * If squadId is provided, this will be ignored. */ squad?: CreateSquadDTO; /** This is an array of chat messages in the session. */ messages?: ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; /** This is the customer information associated with this session. */ customer?: CreateCustomerDTO; /** This is the ID of the phone number associated with this session. */ phoneNumberId?: string; /** This is the phone number configuration for this session. */ phoneNumber?: ImportTwilioPhoneNumberDTO; } export interface UpdateSessionDTO { /** * This is the new name for the session. Maximum length is 40 characters. * @maxLength 40 */ name?: string; /** This is the new status for the session. */ status?: "active" | "completed"; /** * Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set. * @min 60 * @max 2592000 * @example 86400 */ expirationSeconds?: number; /** This is the updated array of chat messages. */ messages?: ( | SystemMessage | UserMessage | AssistantMessage | ToolMessage | DeveloperMessage )[]; } export interface GetSessionPaginatedDTO { /** This is the name of the session to filter by. */ name?: string; /** This is the ID of the assistant to filter sessions by. */ assistantId?: string; /** This is the ID of the squad to filter sessions by. */ squadId?: string; /** This is the ID of the workflow to filter sessions by. */ workflowId?: string; /** This is the customer information to filter by. */ customer?: CreateCustomerDTO; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; } export interface SessionPaginatedResponse { results: Session[]; metadata: PaginationMeta; } export interface ByoPhoneNumber { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to bring your own phone numbers from your own SIP trunks or Carriers. */ provider: "byo-phone-number"; /** * This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it. * * Use cases: * - `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks. * - `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls. * * If `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\+?[a-zA-Z0-9]+$/`). * * @default true (E164 check is enabled) * @default true */ numberE164CheckEnabled?: boolean; /** This is the unique identifier for the phone number. */ id: string; /** This is the unique identifier for the org that this phone number belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the phone number was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the phone number was last updated. * @format date-time */ updatedAt: string; /** This is the status of the phone number. */ status?: "active" | "activating" | "blocked"; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** * This is the number of the customer. * @minLength 3 * @maxLength 40 */ number?: string; /** * This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number. * * You can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId. */ credentialId: string; } export interface TwilioPhoneNumber { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to use numbers bought on Twilio. */ provider: "twilio"; /** * Controls whether Vapi sets the messaging webhook URL on the Twilio number during import. * * If set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is. * If `true` or omitted (default), Vapi will configure both the voice and messaging URLs. * * @default true * @default true */ smsEnabled?: boolean; /** This is the unique identifier for the phone number. */ id: string; /** This is the unique identifier for the org that this phone number belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the phone number was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the phone number was last updated. * @format date-time */ updatedAt: string; /** This is the status of the phone number. */ status?: "active" | "activating" | "blocked"; /** This is the Twilio Auth Token for the phone number. */ twilioAuthToken?: string; /** This is the Twilio API Key for the phone number. */ twilioApiKey?: string; /** This is the Twilio API Secret for the phone number. */ twilioApiSecret?: string; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** These are the digits of the phone number you own on your Twilio. */ number: string; /** This is the Twilio Account SID for the phone number. */ twilioAccountSid: string; } export interface VonagePhoneNumber { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to use numbers bought on Vonage. */ provider: "vonage"; /** This is the unique identifier for the phone number. */ id: string; /** This is the unique identifier for the org that this phone number belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the phone number was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the phone number was last updated. * @format date-time */ updatedAt: string; /** This is the status of the phone number. */ status?: "active" | "activating" | "blocked"; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** These are the digits of the phone number you own on your Vonage. */ number: string; /** This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups. */ credentialId: string; } export interface SipAuthentication { /** This will be expected in the `realm` field of the `authorization` header of the SIP INVITE. Defaults to sip.vapi.ai. */ realm?: string; /** * This will be expected in the `username` field of the `authorization` header of the SIP INVITE. * @minLength 20 * @maxLength 40 */ username: string; /** * This will be expected to generate the `response` field of the `authorization` header of the SIP INVITE, through digest authentication. * @minLength 20 * @maxLength 40 */ password: string; } export interface VapiPhoneNumber { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to create free SIP phone numbers on Vapi. */ provider: "vapi"; /** This is the unique identifier for the phone number. */ id: string; /** This is the unique identifier for the org that this phone number belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the phone number was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the phone number was last updated. * @format date-time */ updatedAt: string; /** This is the status of the phone number. */ status?: "active" | "activating" | "blocked"; /** These are the digits of the phone number you purchased from Vapi. */ number?: string; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** * This is the area code of the phone number to purchase. * @minLength 3 * @maxLength 3 */ numberDesiredAreaCode?: string; /** * This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer. * * This is case-insensitive. */ sipUri?: string; /** * This enables authentication for incoming SIP INVITE requests to the `sipUri`. * * If not set, any username/password to the 401 challenge of the SIP INVITE will be accepted. */ authentication?: SipAuthentication; } export interface TelnyxPhoneNumber { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to use numbers bought on Telnyx. */ provider: "telnyx"; /** This is the unique identifier for the phone number. */ id: string; /** This is the unique identifier for the org that this phone number belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the phone number was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the phone number was last updated. * @format date-time */ updatedAt: string; /** This is the status of the phone number. */ status?: "active" | "activating" | "blocked"; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** These are the digits of the phone number you own on your Telnyx. */ number: string; /** This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups. */ credentialId: string; } export interface CreateByoPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to bring your own phone numbers from your own SIP trunks or Carriers. */ provider: "byo-phone-number"; /** * This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it. * * Use cases: * - `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks. * - `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls. * * If `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\+?[a-zA-Z0-9]+$/`). * * @default true (E164 check is enabled) * @default true */ numberE164CheckEnabled?: boolean; /** * This is the number of the customer. * @minLength 3 * @maxLength 40 */ number?: string; /** * This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number. * * You can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId. */ credentialId: string; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; } export interface CreateTwilioPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to use numbers bought on Twilio. */ provider: "twilio"; /** * Controls whether Vapi sets the messaging webhook URL on the Twilio number during import. * * If set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is. * If `true` or omitted (default), Vapi will configure both the voice and messaging URLs. * * @default true * @default true */ smsEnabled?: boolean; /** These are the digits of the phone number you own on your Twilio. */ number: string; /** This is the Twilio Account SID for the phone number. */ twilioAccountSid: string; /** This is the Twilio Auth Token for the phone number. */ twilioAuthToken?: string; /** This is the Twilio API Key for the phone number. */ twilioApiKey?: string; /** This is the Twilio API Secret for the phone number. */ twilioApiSecret?: string; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; } export interface CreateVonagePhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to use numbers bought on Vonage. */ provider: "vonage"; /** These are the digits of the phone number you own on your Vonage. */ number: string; /** This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups. */ credentialId: string; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; } export interface CreateVapiPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to create free SIP phone numbers on Vapi. */ provider: "vapi"; /** * This is the area code of the phone number to purchase. * @minLength 3 * @maxLength 3 */ numberDesiredAreaCode?: string; /** * This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer. * * This is case-insensitive. */ sipUri?: string; /** * This enables authentication for incoming SIP INVITE requests to the `sipUri`. * * If not set, any username/password to the 401 challenge of the SIP INVITE will be accepted. */ authentication?: SipAuthentication; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; } export interface CreateTelnyxPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** This is to use numbers bought on Telnyx. */ provider: "telnyx"; /** These are the digits of the phone number you own on your Telnyx. */ number: string; /** This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups. */ credentialId: string; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; } export interface UpdateByoPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** * This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it. * * Use cases: * - `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks. * - `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls. * * If `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\+?[a-zA-Z0-9]+$/`). * * @default true (E164 check is enabled) * @default true */ numberE164CheckEnabled?: boolean; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** * This is the number of the customer. * @minLength 3 * @maxLength 40 */ number?: string; /** * This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number. * * You can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId. */ credentialId?: string; } export interface UpdateTwilioPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** * Controls whether Vapi sets the messaging webhook URL on the Twilio number during import. * * If set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is. * If `true` or omitted (default), Vapi will configure both the voice and messaging URLs. * * @default true * @default true */ smsEnabled?: boolean; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** These are the digits of the phone number you own on your Twilio. */ number?: string; /** This is the Twilio Account SID for the phone number. */ twilioAccountSid?: string; /** This is the Twilio Auth Token for the phone number. */ twilioAuthToken?: string; /** This is the Twilio API Key for the phone number. */ twilioApiKey?: string; /** This is the Twilio API Secret for the phone number. */ twilioApiSecret?: string; } export interface UpdateVonagePhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** These are the digits of the phone number you own on your Vonage. */ number?: string; /** This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups. */ credentialId?: string; } export interface UpdateVapiPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** * This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer. * * This is case-insensitive. */ sipUri?: string; /** * This enables authentication for incoming SIP INVITE requests to the `sipUri`. * * If not set, any username/password to the 401 challenge of the SIP INVITE will be accepted. */ authentication?: SipAuthentication; } export interface UpdateTelnyxPhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** These are the digits of the phone number you own on your Telnyx. */ number?: string; /** This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups. */ credentialId?: string; } export interface ImportVonagePhoneNumberDTO { /** * This is the fallback destination an inbound call will be transferred to if: * 1. `assistantId` is not set * 2. `squadId` is not set * 3. and, `assistant-request` message to the `serverUrl` fails * * If this is not set and above conditions are met, the inbound call is hung up with an error message. */ fallbackDestination?: TransferDestinationNumber | TransferDestinationSip; /** This is the hooks that will be used for incoming calls to this phone number. */ hooks?: (PhoneNumberHookCallRinging | PhoneNumberHookCallEnding)[]; /** * These are the digits of the phone number you own on your Vonage. * @deprecated */ vonagePhoneNumber: string; /** This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups. */ credentialId: string; /** * This is the name of the phone number. This is just for your own reference. * @maxLength 40 */ name?: string; /** * This is the assistant that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ assistantId?: string; /** * This is the workflow that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ workflowId?: string; /** * This is the squad that will be used for incoming calls to this phone number. * * If neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected. */ squadId?: string; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; } export interface PhoneNumberPaginatedResponse { /** A list of phone numbers, which can be of any provider type. */ results: ( | ByoPhoneNumber | TwilioPhoneNumber | VonagePhoneNumber | VapiPhoneNumber | TelnyxPhoneNumber )[]; /** Metadata about the pagination. */ metadata: PaginationMeta; } export interface ApiRequestTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "apiRequest" for API request tool. */ type: "apiRequest"; method: "POST" | "GET" | "PUT" | "PATCH" | "DELETE"; /** * This is the timeout in seconds for the request. Defaults to 20 seconds. * * @default 20 * @min 1 * @max 300 * @example 20 */ timeoutSeconds?: number; /** * The credential ID for API request authentication * @example "550e8400-e29b-41d4-a716-446655440000" */ credentialId?: string; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * This is the name of the tool. This will be passed to the model. * * Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40. * @maxLength 40 * @pattern /^[a-zA-Z0-9_-]{1,40}$/ */ name?: string; /** This is the description of the tool. This will be passed to the model. */ description?: string; /** This is where the request will be sent. */ url: string; /** This is the body of the request. */ body?: JsonSchema; /** These are the headers to send with the request. */ headers?: JsonSchema; /** * This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried). * * @default undefined (the request will not be retried) */ backoffPlan?: BackoffPlan; /** * This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call. * * Usage: * 1. Use `aliases` to extract variables from the tool's response body. (Most common case) * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{customer.name}}" * }, * { * "key": "customerAge", * "value": "{{customer.age}}" * } * ] * } * ``` * * The tool response body is made available to the liquid template. * * 2. Use `aliases` to extract variables from the tool's response body if the response is an array. * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{$[0].name}}" * }, * { * "key": "customerAge", * "value": "{{$[0].age}}" * } * ] * } * ``` * * $ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array). * * 3. Use `aliases` to extract variables from the tool's response headers. * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{tool.response.headers.customer-name}}" * }, * { * "key": "customerAge", * "value": "{{tool.response.headers.customer-age}}" * } * ] * } * ``` * * `tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array). * * 4. Use `schema` to extract a large portion of the tool's response body. * * 4.1. If you hit example.com and it returns `{"name": "John", "age": 30}`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * } * } * } * } * ``` * These will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables. * * 4.2. If you hit example.com and it returns `{"name": {"first": "John", "last": "Doe"}}`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "object", * "properties": { * "name": { * "type": "object", * "properties": { * "first": { * "type": "string" * }, * "last": { * "type": "string" * } * } * } * } * } * } * ``` * * These will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible. * * 4.3. If you hit example.com and it returns `["94123", "94124"]`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "array", * "title": "zipCodes", * "items": { * "type": "string" * } * } * } * ``` * * This will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`. * * 4.4. If you hit example.com and it returns `[{"name": "John", "age": 30, "zipCodes": ["94123", "94124"]}, {"name": "Jane", "age": 25, "zipCodes": ["94125", "94126"]}]`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "array", * "title": "people", * "items": { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * }, * "zipCodes": { * "type": "array", * "items": { * "type": "string" * } * } * } * } * } * } * ``` * * This will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`. * * Note: Both `aliases` and `schema` can be used together. */ variableExtractionPlan?: VariableExtractionPlan; } export interface DtmfTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "dtmf" for DTMF tool. */ type: "dtmf"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface EndCallTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "endCall" for End Call tool. */ type: "endCall"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface FunctionTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "function" for Function tool. */ type: "function"; /** * This determines if the tool is async. * * If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server. * * If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server. * * Defaults to synchronous (`false`). * @example false */ async?: boolean; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** This is the function definition of the tool. */ function?: OpenAIFunction; } export interface GhlTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "ghl" for GHL tool. */ type: "ghl"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; metadata: GhlToolMetadata; } export interface MakeTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "make" for Make tool. */ type: "make"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; metadata: MakeToolMetadata; } export interface TransferCallTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; type: "transferCall"; /** These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called. */ destinations?: ( | TransferDestinationAssistant | TransferDestinationNumber | TransferDestinationSip )[]; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface HandoffTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the type of the tool. * When you're using handoff tool, we recommend adding this to your system prompt * --- * # System context * * You are part of a multi-agent system designed to make agent coordination and execution easy. Agents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate. Handoffs are achieved by calling a handoff function, generally named `handoff_to_`. Handoffs between agents are handled seamlessly in the background; do not mention or draw attention to these handoffs in your conversation with the user. * * # Agent context * * {put your agent system prompt here} * --- */ type: "handoff"; /** * These are the destinations that the call can be handed off to. * * Usage: * 1. Single destination * * Use `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad. * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", // or "assistantName": "Assistant123" * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 2. Multiple destinations * * 2.1. Multiple Tools, Each With One Destination (OpenAI recommended) * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * ], * }, * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 2.2. One Tool, Multiple Destinations (Anthropic recommended) * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 3. Dynamic destination * * 3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object. * VAPI will send a handoff-destination-request webhook to the `server.url`. * The response from the server will be used as the destination (if valid). * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "dynamic", * "server": { * "url": "https://example.com" * } * } * ], * } * ] * } * ``` * * 3.2. To pass custom parameters to the server, you can use the `function` object. * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "dynamic", * "server": { * "url": "https://example.com" * }, * } * ], * "function": { * "name": "handoff", * "description": "Call this function when the customer is ready to be handed off to the next assistant", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "description": "Use dynamic when customer is ready to be handed off to the next assistant", * "enum": ["dynamic"] * }, * "customerAreaCode": { * "type": "number", * "description": "Area code of the customer" * }, * "customerIntent": { * "type": "string", * "enum": ["new-customer", "existing-customer"], * "description": "Use new-customer when customer is a new customer, existing-customer when customer is an existing customer" * }, * "customerSentiment": { * "type": "string", * "enum": ["positive", "negative", "neutral"], * "description": "Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral" * } * } * } * } * } * ] * } * ``` * * The properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body. */ destinations?: (HandoffDestinationAssistant | HandoffDestinationDynamic)[]; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * This is the optional function definition that will be passed to the LLM. * If this is not defined, we will construct this based on the other properties. * * For example, given the following tools definition: * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * We will construct the following function definition: * ```json * { * "function": { * "name": "handoff_to_assistant-123", * "description": " * Use this function to handoff the call to the next assistant. * Only use it when instructions explicitly ask you to use the handoff_to_assistant function. * DO NOT call this function unless you are instructed to do so. * Here are the destinations you can handoff the call to: * 1. assistant-123. When: customer wants to be handed off to assistant-123 * 2. assistant-456. When: customer wants to be handed off to assistant-456 * ", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "description": "Options: assistant-123 (customer wants to be handed off to assistant-123), assistant-456 (customer wants to be handed off to assistant-456)", * "enum": ["assistant-123", "assistant-456"] * }, * }, * "required": ["destination"] * } * } * } * ``` * * To override this function, please provide an OpenAI function definition and refer to it in the system prompt. * You may override parts of the function definition (i.e. you may only want to change the function name for your prompt). * If you choose to override the function parameters, it must include `destination` as a required parameter, and it must evaluate to either an assistantId, assistantName, or a the string literal `dynamic`. * * To pass custom parameters to the server in a dynamic handoff, you can use the function parameters, with `dynamic` as the destination. * ```json * { * "function": { * "name": "dynamic_handoff", * "description": " * Call this function when the customer is ready to be handed off to the next assistant * ", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "enum": ["dynamic"] * }, * "customerAreaCode": { * "type": "number", * "description": "Area code of the customer" * }, * "customerIntent": { * "type": "string", * "enum": ["new-customer", "existing-customer"], * "description": "Use new-customer when customer is a new customer, existing-customer when customer is an existing customer" * }, * "customerSentiment": { * "type": "string", * "enum": ["positive", "negative", "neutral"], * "description": "Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral" * } * }, * "required": ["destination", "customerAreaCode", "customerIntent", "customerSentiment"] * } * } * } * ``` */ function?: OpenAIFunction; } export interface OutputTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "output" for Output tool. */ type: "output"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface BashTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "bash" for Bash tool. */ type: "bash"; /** The sub type of tool. */ subType: "bash_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * The name of the tool, fixed to 'bash' * @default "bash" */ name: "bash"; } export interface ComputerTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "computer" for Computer tool. */ type: "computer"; /** The sub type of tool. */ subType: "computer_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * The name of the tool, fixed to 'computer' * @default "computer" */ name: "computer"; /** The display width in pixels */ displayWidthPx: number; /** The display height in pixels */ displayHeightPx: number; /** Optional display number */ displayNumber?: number; } export interface TextEditorTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "textEditor" for Text Editor tool. */ type: "textEditor"; /** The sub type of tool. */ subType: "text_editor_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * The name of the tool, fixed to 'str_replace_editor' * @default "str_replace_editor" */ name: "str_replace_editor"; } export interface QueryTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "query" for Query tool. */ type: "query"; /** The knowledge bases to query */ knowledgeBases?: KnowledgeBase[]; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoogleCalendarCreateEventTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "google.calendar.event.create" for Google Calendar Create Event tool. */ type: "google.calendar.event.create"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoogleSheetsRowAppendTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "google.sheets.row.append" for Google Sheets Row Append tool. */ type: "google.sheets.row.append"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoogleCalendarCheckAvailabilityTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "google.calendar.availability.check" for Google Calendar Check Availability tool. */ type: "google.calendar.availability.check"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface SlackSendMessageTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "slack.message.send" for Slack Send Message tool. */ type: "slack.message.send"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface SmsTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "sms" for Twilio SMS sending tool. */ type: "sms"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface McpTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "mcp" for MCP tool. */ type: "mcp"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; metadata?: McpToolMetadata; } export interface GoHighLevelCalendarAvailabilityTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.calendar.availability.check" for GoHighLevel Calendar Availability Check tool. */ type: "gohighlevel.calendar.availability.check"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoHighLevelCalendarEventCreateTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.calendar.event.create" for GoHighLevel Calendar Event Create tool. */ type: "gohighlevel.calendar.event.create"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoHighLevelContactCreateTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.contact.create" for GoHighLevel Contact Create tool. */ type: "gohighlevel.contact.create"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoHighLevelContactGetTool { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.contact.get" for GoHighLevel Contact Get tool. */ type: "gohighlevel.contact.get"; /** This is the unique identifier for the tool. */ id: string; /** This is the unique identifier for the organization that this tool belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the tool was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the tool was last updated. * @format date-time */ updatedAt: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateApiRequestToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "apiRequest" for API request tool. */ type: "apiRequest"; method: "POST" | "GET" | "PUT" | "PATCH" | "DELETE"; /** * This is the timeout in seconds for the request. Defaults to 20 seconds. * * @default 20 * @min 1 * @max 300 * @example 20 */ timeoutSeconds?: number; /** * The credential ID for API request authentication * @example "550e8400-e29b-41d4-a716-446655440000" */ credentialId?: string; /** * This is the name of the tool. This will be passed to the model. * * Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40. * @maxLength 40 * @pattern /^[a-zA-Z0-9_-]{1,40}$/ */ name?: string; /** This is the description of the tool. This will be passed to the model. */ description?: string; /** This is where the request will be sent. */ url: string; /** This is the body of the request. */ body?: JsonSchema; /** These are the headers to send with the request. */ headers?: JsonSchema; /** * This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried). * * @default undefined (the request will not be retried) */ backoffPlan?: BackoffPlan; /** * This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call. * * Usage: * 1. Use `aliases` to extract variables from the tool's response body. (Most common case) * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{customer.name}}" * }, * { * "key": "customerAge", * "value": "{{customer.age}}" * } * ] * } * ``` * * The tool response body is made available to the liquid template. * * 2. Use `aliases` to extract variables from the tool's response body if the response is an array. * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{$[0].name}}" * }, * { * "key": "customerAge", * "value": "{{$[0].age}}" * } * ] * } * ``` * * $ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array). * * 3. Use `aliases` to extract variables from the tool's response headers. * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{tool.response.headers.customer-name}}" * }, * { * "key": "customerAge", * "value": "{{tool.response.headers.customer-age}}" * } * ] * } * ``` * * `tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array). * * 4. Use `schema` to extract a large portion of the tool's response body. * * 4.1. If you hit example.com and it returns `{"name": "John", "age": 30}`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * } * } * } * } * ``` * These will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables. * * 4.2. If you hit example.com and it returns `{"name": {"first": "John", "last": "Doe"}}`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "object", * "properties": { * "name": { * "type": "object", * "properties": { * "first": { * "type": "string" * }, * "last": { * "type": "string" * } * } * } * } * } * } * ``` * * These will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible. * * 4.3. If you hit example.com and it returns `["94123", "94124"]`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "array", * "title": "zipCodes", * "items": { * "type": "string" * } * } * } * ``` * * This will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`. * * 4.4. If you hit example.com and it returns `[{"name": "John", "age": 30, "zipCodes": ["94123", "94124"]}, {"name": "Jane", "age": 25, "zipCodes": ["94125", "94126"]}]`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "array", * "title": "people", * "items": { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * }, * "zipCodes": { * "type": "array", * "items": { * "type": "string" * } * } * } * } * } * } * ``` * * This will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`. * * Note: Both `aliases` and `schema` can be used together. */ variableExtractionPlan?: VariableExtractionPlan; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateOutputToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "output" for Output tool. */ type: "output"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateBashToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "bash" for Bash tool. */ type: "bash"; /** The sub type of tool. */ subType: "bash_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * The name of the tool, fixed to 'bash' * @default "bash" */ name: "bash"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateComputerToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "computer" for Computer tool. */ type: "computer"; /** The sub type of tool. */ subType: "computer_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * The name of the tool, fixed to 'computer' * @default "computer" */ name: "computer"; /** The display width in pixels */ displayWidthPx: number; /** The display height in pixels */ displayHeightPx: number; /** Optional display number */ displayNumber?: number; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateTextEditorToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "textEditor" for Text Editor tool. */ type: "textEditor"; /** The sub type of tool. */ subType: "text_editor_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * The name of the tool, fixed to 'str_replace_editor' * @default "str_replace_editor" */ name: "str_replace_editor"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateSmsToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "sms" for Twilio SMS sending tool. */ type: "sms"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateApiRequestToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; method?: "POST" | "GET" | "PUT" | "PATCH" | "DELETE"; /** * This is the timeout in seconds for the request. Defaults to 20 seconds. * * @default 20 * @min 1 * @max 300 * @example 20 */ timeoutSeconds?: number; /** * The credential ID for API request authentication * @example "550e8400-e29b-41d4-a716-446655440000" */ credentialId?: string; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * This is the name of the tool. This will be passed to the model. * * Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40. * @maxLength 40 * @pattern /^[a-zA-Z0-9_-]{1,40}$/ */ name?: string; /** This is the description of the tool. This will be passed to the model. */ description?: string; /** This is where the request will be sent. */ url?: string; /** This is the body of the request. */ body?: JsonSchema; /** These are the headers to send with the request. */ headers?: JsonSchema; /** * This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried). * * @default undefined (the request will not be retried) */ backoffPlan?: BackoffPlan; /** * This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call. * * Usage: * 1. Use `aliases` to extract variables from the tool's response body. (Most common case) * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{customer.name}}" * }, * { * "key": "customerAge", * "value": "{{customer.age}}" * } * ] * } * ``` * * The tool response body is made available to the liquid template. * * 2. Use `aliases` to extract variables from the tool's response body if the response is an array. * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{$[0].name}}" * }, * { * "key": "customerAge", * "value": "{{$[0].age}}" * } * ] * } * ``` * * $ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array). * * 3. Use `aliases` to extract variables from the tool's response headers. * * ```json * { * "aliases": [ * { * "key": "customerName", * "value": "{{tool.response.headers.customer-name}}" * }, * { * "key": "customerAge", * "value": "{{tool.response.headers.customer-age}}" * } * ] * } * ``` * * `tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array). * * 4. Use `schema` to extract a large portion of the tool's response body. * * 4.1. If you hit example.com and it returns `{"name": "John", "age": 30}`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * } * } * } * } * ``` * These will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables. * * 4.2. If you hit example.com and it returns `{"name": {"first": "John", "last": "Doe"}}`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "object", * "properties": { * "name": { * "type": "object", * "properties": { * "first": { * "type": "string" * }, * "last": { * "type": "string" * } * } * } * } * } * } * ``` * * These will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible. * * 4.3. If you hit example.com and it returns `["94123", "94124"]`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "array", * "title": "zipCodes", * "items": { * "type": "string" * } * } * } * ``` * * This will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`. * * 4.4. If you hit example.com and it returns `[{"name": "John", "age": 30, "zipCodes": ["94123", "94124"]}, {"name": "Jane", "age": 25, "zipCodes": ["94125", "94126"]}]`, then you can specify the schema as: * * ```json * { * "schema": { * "type": "array", * "title": "people", * "items": { * "type": "object", * "properties": { * "name": { * "type": "string" * }, * "age": { * "type": "number" * }, * "zipCodes": { * "type": "array", * "items": { * "type": "string" * } * } * } * } * } * } * ``` * * This will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`. * * Note: Both `aliases` and `schema` can be used together. */ variableExtractionPlan?: VariableExtractionPlan; } export interface UpdateDtmfToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateEndCallToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateFunctionToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This determines if the tool is async. * * If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server. * * If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server. * * Defaults to synchronous (`false`). * @example false */ async?: boolean; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** This is the function definition of the tool. */ function?: OpenAIFunction; } export interface UpdateGhlToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; metadata?: GhlToolMetadata; } export interface UpdateMakeToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; metadata?: MakeToolMetadata; } export interface UpdateHandoffToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * These are the destinations that the call can be handed off to. * * Usage: * 1. Single destination * * Use `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad. * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", // or "assistantName": "Assistant123" * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 2. Multiple destinations * * 2.1. Multiple Tools, Each With One Destination (OpenAI recommended) * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * ], * }, * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 2.2. One Tool, Multiple Destinations (Anthropic recommended) * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * 3. Dynamic destination * * 3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object. * VAPI will send a handoff-destination-request webhook to the `server.url`. * The response from the server will be used as the destination (if valid). * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "dynamic", * "server": { * "url": "https://example.com" * } * } * ], * } * ] * } * ``` * * 3.2. To pass custom parameters to the server, you can use the `function` object. * * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "dynamic", * "server": { * "url": "https://example.com" * }, * } * ], * "function": { * "name": "handoff", * "description": "Call this function when the customer is ready to be handed off to the next assistant", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "description": "Use dynamic when customer is ready to be handed off to the next assistant", * "enum": ["dynamic"] * }, * "customerAreaCode": { * "type": "number", * "description": "Area code of the customer" * }, * "customerIntent": { * "type": "string", * "enum": ["new-customer", "existing-customer"], * "description": "Use new-customer when customer is a new customer, existing-customer when customer is an existing customer" * }, * "customerSentiment": { * "type": "string", * "enum": ["positive", "negative", "neutral"], * "description": "Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral" * } * } * } * } * } * ] * } * ``` * * The properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body. */ destinations?: (HandoffDestinationAssistant | HandoffDestinationDynamic)[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * This is the optional function definition that will be passed to the LLM. * If this is not defined, we will construct this based on the other properties. * * For example, given the following tools definition: * ```json * { * "tools": [ * { * "type": "handoff", * "destinations": [ * { * "type": "assistant", * "assistantId": "assistant-123", * "description": "customer wants to be handed off to assistant-123", * "contextEngineeringPlan": { * "type": "all" * } * }, * { * "type": "assistant", * "assistantId": "assistant-456", * "description": "customer wants to be handed off to assistant-456", * "contextEngineeringPlan": { * "type": "all" * } * } * ], * } * ] * } * ``` * * We will construct the following function definition: * ```json * { * "function": { * "name": "handoff_to_assistant-123", * "description": " * Use this function to handoff the call to the next assistant. * Only use it when instructions explicitly ask you to use the handoff_to_assistant function. * DO NOT call this function unless you are instructed to do so. * Here are the destinations you can handoff the call to: * 1. assistant-123. When: customer wants to be handed off to assistant-123 * 2. assistant-456. When: customer wants to be handed off to assistant-456 * ", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "description": "Options: assistant-123 (customer wants to be handed off to assistant-123), assistant-456 (customer wants to be handed off to assistant-456)", * "enum": ["assistant-123", "assistant-456"] * }, * }, * "required": ["destination"] * } * } * } * ``` * * To override this function, please provide an OpenAI function definition and refer to it in the system prompt. * You may override parts of the function definition (i.e. you may only want to change the function name for your prompt). * If you choose to override the function parameters, it must include `destination` as a required parameter, and it must evaluate to either an assistantId, assistantName, or a the string literal `dynamic`. * * To pass custom parameters to the server in a dynamic handoff, you can use the function parameters, with `dynamic` as the destination. * ```json * { * "function": { * "name": "dynamic_handoff", * "description": " * Call this function when the customer is ready to be handed off to the next assistant * ", * "parameters": { * "type": "object", * "properties": { * "destination": { * "type": "string", * "enum": ["dynamic"] * }, * "customerAreaCode": { * "type": "number", * "description": "Area code of the customer" * }, * "customerIntent": { * "type": "string", * "enum": ["new-customer", "existing-customer"], * "description": "Use new-customer when customer is a new customer, existing-customer when customer is an existing customer" * }, * "customerSentiment": { * "type": "string", * "enum": ["positive", "negative", "neutral"], * "description": "Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral" * } * }, * "required": ["destination", "customerAreaCode", "customerIntent", "customerSentiment"] * } * } * } * ``` */ function?: OpenAIFunction; } export interface UpdateTransferCallToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called. */ destinations?: ( | TransferDestinationAssistant | TransferDestinationNumber | TransferDestinationSip )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateOutputToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateBashToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The sub type of tool. */ subType?: "bash_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * The name of the tool, fixed to 'bash' * @default "bash" */ name?: "bash"; } export interface UpdateComputerToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The sub type of tool. */ subType?: "computer_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * The name of the tool, fixed to 'computer' * @default "computer" */ name?: "computer"; /** The display width in pixels */ displayWidthPx?: number; /** The display height in pixels */ displayHeightPx?: number; /** Optional display number */ displayNumber?: number; } export interface UpdateTextEditorToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The sub type of tool. */ subType?: "text_editor_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; /** * The name of the tool, fixed to 'str_replace_editor' * @default "str_replace_editor" */ name?: "str_replace_editor"; } export interface UpdateQueryToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The knowledge bases to query */ knowledgeBases?: KnowledgeBase[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateGoogleCalendarCreateEventToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateGoogleSheetsRowAppendToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateGoogleCalendarCheckAvailabilityToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateSlackSendMessageToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateSmsToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateMcpToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; metadata?: McpToolMetadata; } export interface UpdateGoHighLevelCalendarAvailabilityToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateGoHighLevelCalendarEventCreateToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateGoHighLevelContactCreateToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface UpdateGoHighLevelContactGetToolDTO { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface CreateFileDTO { /** * This is the File you want to upload for use with the Knowledge Base. * @format binary */ file: File; } export interface File { object?: "file"; status?: "processing" | "done" | "failed"; /** * This is the name of the file. This is just for your own reference. * @maxLength 40 */ name?: string; originalName?: string; bytes?: number; purpose?: string; mimetype?: string; key?: string; path?: string; bucket?: string; url?: string; parsedTextUrl?: string; parsedTextBytes?: number; metadata?: object; /** This is the unique identifier for the file. */ id: string; /** This is the unique identifier for the org that this file belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the file was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the file was last updated. * @format date-time */ updatedAt: string; } export interface UpdateFileDTO { /** * This is the name of the file. This is just for your own reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface TrieveKnowledgeBaseSearchPlan { /** Specifies the number of top chunks to return. This corresponds to the `page_size` parameter in Trieve. */ topK?: number; /** If true, stop words (specified in server/src/stop-words.txt in the git repo) will be removed. This will preserve queries that are entirely stop words. */ removeStopWords?: boolean; /** This is the score threshold to filter out chunks with a score below the threshold for cosine distance metric. For Manhattan Distance, Euclidean Distance, and Dot Product, it will filter out scores above the threshold distance. This threshold applies before weight and bias modifications. If not specified, this defaults to no threshold. A threshold of 0 will default to no threshold. */ scoreThreshold?: number; /** This is the search method used when searching for relevant chunks from the vector store. */ searchType: "fulltext" | "semantic" | "hybrid" | "bm25"; } export interface TrieveKnowledgeBase { /** * This knowledge base is provided by Trieve. * * To learn more about Trieve, visit https://trieve.ai. */ provider: "trieve"; /** This is the name of the knowledge base. */ name?: string; /** * This is the searching plan used when searching for relevant chunks from the vector store. * * You should configure this if you're running into these issues: * - Too much unnecessary context is being fed as knowledge base context. * - Not enough relevant context is being fed as knowledge base context. */ searchPlan?: TrieveKnowledgeBaseSearchPlan; /** This is the plan if you want us to create/import a new vector store using Trieve. */ createPlan?: TrieveKnowledgeBaseImport; /** This is the id of the knowledge base. */ id: string; /** This is the org id of the knowledge base. */ orgId: string; } export interface CustomKnowledgeBase { /** This knowledge base is bring your own knowledge base implementation. */ provider: "custom-knowledge-base"; /** * This is where the knowledge base request will be sent. * * Request Example: * * POST https://{server.url} * Content-Type: application/json * * { * "messsage": { * "type": "knowledge-base-request", * "messages": [ * { * "role": "user", * "content": "Why is ocean blue?" * } * ], * ...other metadata about the call... * } * } * * Response Expected: * ``` * { * "message": { * "role": "assistant", * "content": "The ocean is blue because water absorbs everything but blue.", * }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK * "documents": [ * { * "content": "The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.", * "similarity": 1 * }, * { * "content": "Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.", * "similarity": .5 * } * ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL * } * ``` */ server: Server; /** This is the id of the knowledge base. */ id: string; /** This is the org id of the knowledge base. */ orgId: string; } export interface CreateTrieveKnowledgeBaseDTO { /** * This knowledge base is provided by Trieve. * * To learn more about Trieve, visit https://trieve.ai. */ provider: "trieve"; /** This is the name of the knowledge base. */ name?: string; /** * This is the searching plan used when searching for relevant chunks from the vector store. * * You should configure this if you're running into these issues: * - Too much unnecessary context is being fed as knowledge base context. * - Not enough relevant context is being fed as knowledge base context. */ searchPlan?: TrieveKnowledgeBaseSearchPlan; /** This is the plan if you want us to create/import a new vector store using Trieve. */ createPlan?: TrieveKnowledgeBaseImport; } export interface UpdateTrieveKnowledgeBaseDTO { /** This is the name of the knowledge base. */ name?: string; /** * This is the searching plan used when searching for relevant chunks from the vector store. * * You should configure this if you're running into these issues: * - Too much unnecessary context is being fed as knowledge base context. * - Not enough relevant context is being fed as knowledge base context. */ searchPlan?: TrieveKnowledgeBaseSearchPlan; /** This is the plan if you want us to create/import a new vector store using Trieve. */ createPlan?: TrieveKnowledgeBaseImport; } export interface UpdateCustomKnowledgeBaseDTO { /** * This is where the knowledge base request will be sent. * * Request Example: * * POST https://{server.url} * Content-Type: application/json * * { * "messsage": { * "type": "knowledge-base-request", * "messages": [ * { * "role": "user", * "content": "Why is ocean blue?" * } * ], * ...other metadata about the call... * } * } * * Response Expected: * ``` * { * "message": { * "role": "assistant", * "content": "The ocean is blue because water absorbs everything but blue.", * }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK * "documents": [ * { * "content": "The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.", * "similarity": 1 * }, * { * "content": "Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.", * "similarity": .5 * } * ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL * } * ``` */ server?: Server; } export interface TrieveKnowledgeBaseChunkPlan { /** These are the file ids that will be used to create the vector store. To upload files, use the `POST /files` endpoint. */ fileIds?: string[]; /** These are the websites that will be used to create the vector store. */ websites?: string[]; /** This is an optional field which allows you to specify the number of splits you want per chunk. If not specified, the default 20 is used. However, you may want to use a different number. */ targetSplitsPerChunk?: number; /** This is an optional field which allows you to specify the delimiters to use when splitting the file before chunking the text. If not specified, the default [.!?\n] are used to split into sentences. However, you may want to use spaces or other delimiters. */ splitDelimiters?: string[]; /** This is an optional field which allows you to specify whether or not to rebalance the chunks created from the file. If not specified, the default true is used. If true, Trieve will evenly distribute remainder splits across chunks such that 66 splits with a target_splits_per_chunk of 20 will result in 3 chunks with 22 splits each. */ rebalanceChunks?: boolean; } export interface TrieveKnowledgeBaseCreate { /** This is to create a new dataset on Trieve. */ type: "create"; /** These are the chunk plans used to create the dataset. */ chunkPlans: TrieveKnowledgeBaseChunkPlan[]; } export interface TrieveKnowledgeBaseImport { /** This is to import an existing dataset from Trieve. */ type: "import"; /** This is the `datasetId` of the dataset on your Trieve account. */ providerId: string; } export interface ComplianceOverride { /** * Force storage for this output under HIPAA. Only enable if output contains no sensitive data. * @example false */ forceStoreOnHipaaEnabled?: boolean; } export interface StructuredOutput { /** * This is the model that will be used to extract the structured output. * * To provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages. * Between the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history. * Between the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition. * i.e.: * {{structuredOutput}} * {{structuredOutput.name}} * {{structuredOutput.description}} * {{structuredOutput.schema}} * * If model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts. * If messages or required fields are not specified, the default system and user prompts will be used. */ model?: | WorkflowOpenAIModel | WorkflowAnthropicModel | WorkflowGoogleModel | WorkflowCustomModel; /** * Compliance configuration for this output. Only enable overrides if no sensitive data will be stored. * @example {"forceStoreOnHipaaEnabled":false} */ compliancePlan?: ComplianceOverride; /** This is the unique identifier for the structured output. */ id: string; /** This is the unique identifier for the org that this structured output belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the structured output was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the structured output was last updated. * @format date-time */ updatedAt: string; /** * This is the name of the structured output. * @minLength 1 * @maxLength 40 */ name: string; /** * This is the description of what the structured output extracts. * * Use this to provide context about what data will be extracted and how it will be used. */ description?: string; /** * These are the assistant IDs that this structured output is linked to. * * When linked to assistants, this structured output will be available for extraction during those assistant's calls. */ assistantIds?: string[]; /** * These are the workflow IDs that this structured output is linked to. * * When linked to workflows, this structured output will be available for extraction during those workflow's execution. */ workflowIds?: string[]; /** * This is the JSON Schema definition for the structured output. * * Defines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including: * - Objects and nested properties * - Arrays and array validation * - String, number, boolean, and null types * - Enums and const values * - Validation constraints (min/max, patterns, etc.) * - Composition with allOf, anyOf, oneOf */ schema: JsonSchema; } export interface StructuredOutputPaginatedResponse { results: StructuredOutput[]; metadata: PaginationMeta; } export interface CreateStructuredOutputDTO { /** * This is the model that will be used to extract the structured output. * * To provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages. * Between the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history. * Between the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition. * i.e.: * {{structuredOutput}} * {{structuredOutput.name}} * {{structuredOutput.description}} * {{structuredOutput.schema}} * * If model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts. * If messages or required fields are not specified, the default system and user prompts will be used. */ model?: | WorkflowOpenAIModel | WorkflowAnthropicModel | WorkflowGoogleModel | WorkflowCustomModel; /** * Compliance configuration for this output. Only enable overrides if no sensitive data will be stored. * @example {"forceStoreOnHipaaEnabled":false} */ compliancePlan?: ComplianceOverride; /** * This is the name of the structured output. * @minLength 1 * @maxLength 40 */ name: string; /** * This is the JSON Schema definition for the structured output. * * This is required when creating a structured output. Defines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including: * - Objects and nested properties * - Arrays and array validation * - String, number, boolean, and null types * - Enums and const values * - Validation constraints (min/max, patterns, etc.) * - Composition with allOf, anyOf, oneOf */ schema: JsonSchema; /** * This is the description of what the structured output extracts. * * Use this to provide context about what data will be extracted and how it will be used. */ description?: string; /** * These are the assistant IDs that this structured output is linked to. * * When linked to assistants, this structured output will be available for extraction during those assistant's calls. */ assistantIds?: string[]; /** * These are the workflow IDs that this structured output is linked to. * * When linked to workflows, this structured output will be available for extraction during those workflow's execution. */ workflowIds?: string[]; } export interface UpdateStructuredOutputDTO { /** * This is the model that will be used to extract the structured output. * * To provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages. * Between the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history. * Between the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition. * i.e.: * {{structuredOutput}} * {{structuredOutput.name}} * {{structuredOutput.description}} * {{structuredOutput.schema}} * * If model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts. * If messages or required fields are not specified, the default system and user prompts will be used. */ model?: | WorkflowOpenAIModel | WorkflowAnthropicModel | WorkflowGoogleModel | WorkflowCustomModel; /** * Compliance configuration for this output. Only enable overrides if no sensitive data will be stored. * @example {"forceStoreOnHipaaEnabled":false} */ compliancePlan?: ComplianceOverride; /** * This is the name of the structured output. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the description of what the structured output extracts. * * Use this to provide context about what data will be extracted and how it will be used. */ description?: string; /** * These are the assistant IDs that this structured output is linked to. * * When linked to assistants, this structured output will be available for extraction during those assistant's calls. */ assistantIds?: string[]; /** * These are the workflow IDs that this structured output is linked to. * * When linked to workflows, this structured output will be available for extraction during those workflow's execution. */ workflowIds?: string[]; /** * This is the JSON Schema definition for the structured output. * * Defines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including: * - Objects and nested properties * - Arrays and array validation * - String, number, boolean, and null types * - Enums and const values * - Validation constraints (min/max, patterns, etc.) * - Composition with allOf, anyOf, oneOf */ schema?: JsonSchema; } export interface StructuredOutputRunDTO { /** * This is the preview flag for the re-run. If true, the re-run will be executed and the response will be returned immediately and the call artifact will NOT be updated. * If false (default), the re-run will be executed and the response will be updated in the call artifact. * @default false */ previewEnabled?: boolean; /** * This is the ID of the structured output that will be run. This must be provided unless a transient structured output is provided. * When the re-run is executed, only the value of this structured output will be replaced with the new value, or added if not present. */ structuredOutputId?: string; /** * This is the transient structured output that will be run. This must be provided if a structured output ID is not provided. * When the re-run is executed, the structured output value will be added to the existing artifact. */ structuredOutput?: CreateStructuredOutputDTO; /** * This is the array of callIds that will be updated with the new structured output value. If preview is true, this array must be provided and contain exactly 1 callId. * If preview is false, up to 100 callIds may be provided. */ callIds: string[]; } export interface TesterPlan { /** * Pass a transient assistant to use for the test assistant. * * Make sure to write a detailed system prompt for a test assistant, and use the {{test.script}} variable to access the test script. */ assistant?: CreateAssistantDTO; /** * Pass an assistant id that can be access * * Make sure to write a detailed system prompt for the test assistant, and use the {{test.script}} variable to access the test script. */ assistantId?: string; /** * Add any assistant overrides to the test assistant. * * One use case is if you want to pass custom variables into the test using variableValues, that you can then access in the script * and rubric using {{varName}}. */ assistantOverrides?: AssistantOverrides; } export interface TestSuitePhoneNumber { /** This is the provider of the phone number. */ provider: "test-suite"; /** * This is the phone number that is being tested. * @maxLength 50 */ number: string; } export interface TargetPlan { /** * This is the phone number that is being tested. * During the actual test, it'll be called and the assistant attached to it will pick up and be tested. * To test an assistant directly, send assistantId instead. */ phoneNumberId?: string; /** * This can be any phone number (even not on Vapi). * During the actual test, it'll be called. * To test a Vapi number, send phoneNumberId. To test an assistant directly, send assistantId instead. */ phoneNumber?: TestSuitePhoneNumber; /** * This is the assistant being tested. * During the actual test, it'll invoked directly. * To test the assistant over phone number, send phoneNumberId instead. */ assistantId?: string; /** This is the assistant overrides applied to assistantId before it is tested. */ assistantOverrides?: AssistantOverrides; } export interface TestSuite { /** This is the unique identifier for the test suite. */ id: string; /** This is the unique identifier for the org that this test suite belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the test suite was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the test suite was last updated. * @format date-time */ updatedAt: string; /** * This is the name of the test suite. * @maxLength 80 */ name?: string; /** * This is the phone number ID associated with this test suite. * @deprecated */ phoneNumberId?: string; /** * Override the default tester plan by providing custom assistant configuration for the test agent. * * We recommend only using this if you are confident, as we have already set sensible defaults on the tester plan. */ testerPlan?: TesterPlan; /** These are the configuration for the assistant / phone number that is being tested. */ targetPlan?: TargetPlan; } export interface TestSuitesPaginatedResponse { results: TestSuite[]; metadata: PaginationMeta; } export interface CreateTestSuiteDto { /** * This is the name of the test suite. * @maxLength 80 */ name?: string; /** * This is the phone number ID associated with this test suite. * @deprecated */ phoneNumberId?: string; /** * Override the default tester plan by providing custom assistant configuration for the test agent. * * We recommend only using this if you are confident, as we have already set sensible defaults on the tester plan. */ testerPlan?: TesterPlan; /** These are the configuration for the assistant / phone number that is being tested. */ targetPlan?: TargetPlan; } export interface UpdateTestSuiteDto { /** * This is the name of the test suite. * @maxLength 80 */ name?: string; /** * This is the phone number ID associated with this test suite. * @deprecated */ phoneNumberId?: string; /** * Override the default tester plan by providing custom assistant configuration for the test agent. * * We recommend only using this if you are confident, as we have already set sensible defaults on the tester plan. */ testerPlan?: TesterPlan; /** These are the configuration for the assistant / phone number that is being tested. */ targetPlan?: TargetPlan; } export interface TestSuiteTestVoice { /** These are the scorers used to evaluate the test. */ scorers: TestSuiteTestScorerAI[]; /** * This is the type of the test, which must be voice. * @maxLength 100 */ type: "voice"; /** This is the unique identifier for the test. */ id: string; /** This is the unique identifier for the test suite this test belongs to. */ testSuiteId: string; /** This is the unique identifier for the organization this test belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the test was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the test was last updated. * @format date-time */ updatedAt: string; /** * This is the name of the test. * @maxLength 80 */ name?: string; /** * This is the script to be used for the voice test. * @maxLength 10000 */ script: string; /** * This is the number of attempts allowed for the test. * @min 1 * @max 10 */ numAttempts?: number; } export interface TestSuiteTestChat { /** These are the scorers used to evaluate the test. */ scorers: TestSuiteTestScorerAI[]; /** * This is the type of the test, which must be chat. * @maxLength 100 */ type: "chat"; /** This is the unique identifier for the test. */ id: string; /** This is the unique identifier for the test suite this test belongs to. */ testSuiteId: string; /** This is the unique identifier for the organization this test belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the test was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the test was last updated. * @format date-time */ updatedAt: string; /** * This is the name of the test. * @maxLength 80 */ name?: string; /** * This is the script to be used for the chat test. * @maxLength 10000 */ script: string; /** * This is the number of attempts allowed for the test. * @min 1 * @max 10 */ numAttempts?: number; } export interface CreateTestSuiteTestVoiceDto { /** These are the scorers used to evaluate the test. */ scorers: TestSuiteTestScorerAI[]; /** * This is the type of the test, which must be voice. * @maxLength 100 */ type: "voice"; /** * This is the script to be used for the voice test. * @maxLength 10000 */ script: string; /** * This is the number of attempts allowed for the test. * @min 1 * @max 10 */ numAttempts?: number; /** * This is the name of the test. * @maxLength 80 */ name?: string; } export interface CreateTestSuiteTestChatDto { /** These are the scorers used to evaluate the test. */ scorers: TestSuiteTestScorerAI[]; /** * This is the type of the test, which must be chat. * @maxLength 100 */ type: "chat"; /** * This is the script to be used for the chat test. * @maxLength 10000 */ script: string; /** * This is the number of attempts allowed for the test. * @min 1 * @max 10 */ numAttempts?: number; /** * This is the name of the test. * @maxLength 80 */ name?: string; } export interface UpdateTestSuiteTestVoiceDto { /** These are the scorers used to evaluate the test. */ scorers?: TestSuiteTestScorerAI[]; /** * This is the type of the test, which must be voice. * @maxLength 100 */ type?: "voice"; /** * This is the name of the test. * @maxLength 80 */ name?: string; /** * This is the script to be used for the voice test. * @maxLength 10000 */ script?: string; /** * This is the number of attempts allowed for the test. * @min 1 * @max 10 */ numAttempts?: number; } export interface UpdateTestSuiteTestChatDto { /** These are the scorers used to evaluate the test. */ scorers?: TestSuiteTestScorerAI[]; /** * This is the type of the test, which must be chat. * @maxLength 100 */ type?: "chat"; /** * This is the name of the test. * @maxLength 80 */ name?: string; /** * This is the script to be used for the chat test. * @maxLength 10000 */ script?: string; /** * This is the number of attempts allowed for the test. * @min 1 * @max 10 */ numAttempts?: number; } export interface TestSuiteTestScorerAI { /** * This is the type of the scorer, which must be AI. * @maxLength 100 */ type: "ai"; /** * This is the rubric used by the AI scorer. * @maxLength 10000 */ rubric: string; } export interface TestSuiteTestsPaginatedResponse { /** A list of test suite tests. */ results: (TestSuiteTestVoice | TestSuiteTestChat)[]; /** Metadata about the pagination. */ metadata: PaginationMeta; } export interface TestSuiteRunScorerAI { /** * This is the type of the scorer, which must be AI. * @maxLength 100 */ type: "ai"; /** * This is the result of the test suite. * @maxLength 100 */ result: "pass" | "fail"; /** * This is the reasoning provided by the AI scorer. * @maxLength 10000 */ reasoning: string; /** * This is the rubric used by the AI scorer. * @maxLength 10000 */ rubric: string; } export interface TestSuiteRunTestAttemptCall { /** This is the artifact of the call. */ artifact: Artifact; } export interface TestSuiteRunTestAttemptMetadata { /** This is the session ID for the test attempt. */ sessionId: string; } export interface TestSuiteRunTestAttempt { /** These are the results of the scorers used to evaluate the test attempt. */ scorerResults: TestSuiteRunScorerAI[]; /** This is the call made during the test attempt. */ call?: TestSuiteRunTestAttemptCall; /** This is the call ID for the test attempt. */ callId?: string; /** This is the metadata for the test attempt. */ metadata?: TestSuiteRunTestAttemptMetadata; } export interface TestSuiteRunTestResult { /** This is the test that was run. */ test: TestSuiteTestVoice; /** These are the attempts made for this test. */ attempts: TestSuiteRunTestAttempt[]; } export interface TestSuiteRun { /** This is the current status of the test suite run. */ status: "queued" | "in-progress" | "completed" | "failed"; /** This is the unique identifier for the test suite run. */ id: string; /** This is the unique identifier for the organization this run belongs to. */ orgId: string; /** This is the unique identifier for the test suite this run belongs to. */ testSuiteId: string; /** * This is the ISO 8601 date-time string of when the test suite run was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the test suite run was last updated. * @format date-time */ updatedAt: string; /** These are the results of the tests in this test suite run. */ testResults: TestSuiteRunTestResult[]; /** * This is the name of the test suite run. * @maxLength 80 */ name?: string; } export interface TestSuiteRunsPaginatedResponse { results: TestSuiteRun[]; metadata: PaginationMeta; } export interface CreateTestSuiteRunDto { /** * This is the name of the test suite run. * @maxLength 80 */ name?: string; } export interface UpdateTestSuiteRunDto { /** * This is the name of the test suite run. * @maxLength 80 */ name?: string; } export interface BarInsightMetadata { /** * @minLength 1 * @maxLength 40 */ xAxisLabel?: string; /** * @minLength 1 * @maxLength 40 */ yAxisLabel?: string; yAxisMin?: number; yAxisMax?: number; /** * @minLength 1 * @maxLength 40 */ name?: string; } export interface InsightTimeRangeWithStep { /** * This is the group by step for aggregation. * * If not provided, defaults to group by day. */ step?: "minute" | "hour" | "day" | "week" | "month" | "quarter" | "year"; /** * This is the start date for the time range. * * Should be a valid ISO 8601 date-time string or relative time string. * If not provided, defaults to the 7 days ago. * * Relative time strings of the format "-{number}{unit}" are allowed. * * Valid units are: * - d: days * - h: hours * - w: weeks * - m: months * - y: years * @example ""2025-01-01" or "-7d" or "now"" */ start?: object; /** * This is the end date for the time range. * * Should be a valid ISO 8601 date-time string or relative time string. * If not provided, defaults to now. * * Relative time strings of the format "-{number}{unit}" are allowed. * * Valid units are: * - d: days * - h: hours * - w: weeks * - m: months * - y: years * @example ""2025-01-01" or "now"" */ end?: object; /** * This is the timezone you want to set for the query. * * If not provided, defaults to UTC. */ timezone?: string; } export interface BarInsight { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `bar` to create a bar insight. */ type: "bar"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; /** This is the metadata for the insight. */ metadata?: BarInsightMetadata; timeRange?: InsightTimeRangeWithStep; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; /** This is the unique identifier for the Insight. */ id: string; /** This is the unique identifier for the org that this Insight belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the Insight was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the Insight was last updated. * @format date-time */ updatedAt: string; } export interface InsightTimeRange { /** * This is the start date for the time range. * * Should be a valid ISO 8601 date-time string or relative time string. * If not provided, defaults to the 7 days ago. * * Relative time strings of the format "-{number}{unit}" are allowed. * * Valid units are: * - d: days * - h: hours * - w: weeks * - m: months * - y: years * @example ""2025-01-01" or "-7d" or "now"" */ start?: object; /** * This is the end date for the time range. * * Should be a valid ISO 8601 date-time string or relative time string. * If not provided, defaults to now. * * Relative time strings of the format "-{number}{unit}" are allowed. * * Valid units are: * - d: days * - h: hours * - w: weeks * - m: months * - y: years * @example ""2025-01-01" or "now"" */ end?: object; /** * This is the timezone you want to set for the query. * * If not provided, defaults to UTC. */ timezone?: string; } export interface PieInsight { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `pie` to create a pie insight. */ type: "pie"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; timeRange?: InsightTimeRange; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; /** This is the unique identifier for the Insight. */ id: string; /** This is the unique identifier for the org that this Insight belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the Insight was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the Insight was last updated. * @format date-time */ updatedAt: string; } export interface LineInsightMetadata { /** * @minLength 1 * @maxLength 40 */ xAxisLabel?: string; /** * @minLength 1 * @maxLength 40 */ yAxisLabel?: string; yAxisMin?: number; yAxisMax?: number; /** * @minLength 1 * @maxLength 40 */ name?: string; } export interface LineInsight { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `line` to create a line insight. */ type: "line"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; /** This is the metadata for the insight. */ metadata?: LineInsightMetadata; timeRange?: InsightTimeRangeWithStep; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; /** This is the unique identifier for the Insight. */ id: string; /** This is the unique identifier for the org that this Insight belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the Insight was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the Insight was last updated. * @format date-time */ updatedAt: string; } export interface TextInsight { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `text` to create a text insight. */ type: "text"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formula?: object; timeRange?: InsightTimeRange; /** * These are the queries to run to generate the insight. * For Text Insights, we only allow a single query, or require a formula if multiple queries are provided */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; /** This is the unique identifier for the Insight. */ id: string; /** This is the unique identifier for the org that this Insight belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the Insight was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the Insight was last updated. * @format date-time */ updatedAt: string; } export interface UpdateBarInsightFromCallTableDTO { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `bar` to create a bar insight. */ type?: "bar"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; /** This is the metadata for the insight. */ metadata?: BarInsightMetadata; timeRange?: InsightTimeRangeWithStep; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries?: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface UpdatePieInsightFromCallTableDTO { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `pie` to create a pie insight. */ type?: "pie"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; timeRange?: InsightTimeRange; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries?: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface UpdateLineInsightFromCallTableDTO { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `line` to create a line insight. */ type?: "line"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; /** This is the metadata for the insight. */ metadata?: LineInsightMetadata; timeRange?: InsightTimeRangeWithStep; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries?: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface UpdateTextInsightFromCallTableDTO { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `text` to create a text insight. */ type?: "text"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formula?: object; timeRange?: InsightTimeRange; /** * These are the queries to run to generate the insight. * For Text Insights, we only allow a single query, or require a formula if multiple queries are provided */ queries?: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface CreateBarInsightFromCallTableDTO { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `bar` to create a bar insight. */ type: "bar"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; /** This is the metadata for the insight. */ metadata?: BarInsightMetadata; timeRange?: InsightTimeRangeWithStep; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface CreatePieInsightFromCallTableDTO { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `pie` to create a pie insight. */ type: "pie"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; timeRange?: InsightTimeRange; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface CreateLineInsightFromCallTableDTO { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `line` to create a line insight. */ type: "line"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; /** This is the metadata for the insight. */ metadata?: LineInsightMetadata; timeRange?: InsightTimeRangeWithStep; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface CreateTextInsightFromCallTableDTO { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `text` to create a text insight. */ type: "text"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formula?: object; timeRange?: InsightTimeRange; /** * These are the queries to run to generate the insight. * For Text Insights, we only allow a single query, or require a formula if multiple queries are provided */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface JSONQueryOnCallTableWithStringTypeColumn { /** * This is the type of query. Only allowed type is "vapiql-json". * @example "vapiql-json" */ type: "vapiql-json"; /** This is the table that will be queried. */ table: "call"; /** * This is the filters to apply to the insight. * The discriminator automatically selects the correct filter type based on column and operator. */ filters?: ( | FilterStringTypeColumnOnCallTable | FilterStringArrayTypeColumnOnCallTable | FilterNumberTypeColumnOnCallTable | FilterNumberArrayTypeColumnOnCallTable | FilterDateTypeColumnOnCallTable | FilterStructuredOutputColumnOnCallTable )[]; /** * This is the column that will be queried in the selected table. * Available columns depend on the selected table. * String Type columns are columns where the rows store String data * @example "id" */ column: "id" | "artifact.structuredOutputs[OutputID]"; /** * This is the aggregation operation to perform on the column. * When the column is a string type, the operation must be "count". * @example "count" */ operation: "count"; /** * This is the name of the query. * It will be used to label the query in the insight board on the UI. * @example "Total Calls" */ name?: string; } export interface JSONQueryOnCallTableWithNumberTypeColumn { /** * This is the type of query. Only allowed type is "vapiql-json". * @example "vapiql-json" */ type: "vapiql-json"; /** This is the table that will be queried. */ table: "call"; /** * This is the filters to apply to the insight. * The discriminator automatically selects the correct filter type based on column and operator. */ filters?: ( | FilterStringTypeColumnOnCallTable | FilterStringArrayTypeColumnOnCallTable | FilterNumberTypeColumnOnCallTable | FilterNumberArrayTypeColumnOnCallTable | FilterDateTypeColumnOnCallTable | FilterStructuredOutputColumnOnCallTable )[]; /** * This is the column that will be queried in the selected table. * Available columns depend on the selected table. * Number Type columns are columns where the rows store Number data * @example "duration" */ column: | "cost" | "duration" | "averageModelLatency" | "averageVoiceLatency" | "averageTranscriberLatency" | "averageTurnLatency" | "averageEndpointingLatency" | "artifact.structuredOutputs[OutputID]"; /** * This is the aggregation operation to perform on the column. * When the column is a number type, the operation must be one of the following: * - average * - sum * - min * - max * @example "sum" */ operation: "average" | "sum" | "min" | "max"; /** * This is the name of the query. * It will be used to label the query in the insight board on the UI. * @example "Total Calls" */ name?: string; } export interface JSONQueryOnCallTableWithStructuredOutputColumn { /** * This is the type of query. Only allowed type is "vapiql-json". * @example "vapiql-json" */ type: "vapiql-json"; /** This is the table that will be queried. */ table: "call"; /** * This is the filters to apply to the insight. * The discriminator automatically selects the correct filter type based on column and operator. */ filters?: ( | FilterStringTypeColumnOnCallTable | FilterStringArrayTypeColumnOnCallTable | FilterNumberTypeColumnOnCallTable | FilterNumberArrayTypeColumnOnCallTable | FilterDateTypeColumnOnCallTable | FilterStructuredOutputColumnOnCallTable )[]; /** * This is the column that will be queried in the call table. * Structured Output Type columns are only to query on artifact.structuredOutputs[OutputID] column. * @example "artifact.structuredOutputs[OutputID]" */ column: "artifact.structuredOutputs[OutputID]"; /** * This is the aggregation operation to perform on the column. * When the column is a structured output type, the operation depends on the value of the structured output. * If the structured output is a string or boolean, the operation must be "count". * If the structured output is a number, the operation can be "average", "sum", "min", or "max". * @example "count" */ operation: "average" | "count" | "sum" | "min" | "max"; /** * This is the name of the query. * It will be used to label the query in the insight board on the UI. * @example "Total Calls" */ name?: string; } export interface FilterStringTypeColumnOnCallTable { /** * This is the column in the call table that will be filtered on. * String Type columns are columns where the rows store data as a string. * Must be a valid column for the selected table. * @example "assistant_id" */ column: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "customerNumber" | "status" | "endedReason" | "forwardedPhoneNumber" | "campaignId"; /** * This is the operator to use for the filter. * For string type columns, the operator must be "=", "!=", "contains", "not contains" * @example ""=" or "!="" */ operator: "=" | "!=" | "contains" | "not_contains"; /** This is the value to filter on. */ value: string; } export interface FilterNumberTypeColumnOnCallTable { /** * This is the column in the call table that will be filtered on. * Number Type columns are columns where the rows store data as a number. * Must be a valid column for the selected table. * @example "duration" */ column: | "duration" | "cost" | "averageModelLatency" | "averageVoiceLatency" | "averageTranscriberLatency" | "averageTurnLatency" | "averageEndpointingLatency"; /** * This is the operator to use for the filter. * For number type columns, the operator must be "=", ">", "<", ">=", "<=" * @example ""=" or ">" or "<" or ">=" or "<="" */ operator: "=" | "!=" | ">" | "<" | ">=" | "<="; /** This is the value to filter on. */ value: number; } export interface FilterDateTypeColumnOnCallTable { /** * This is the column in the call table that will be filtered on. * Date Type columns are columns where the rows store data as a date. * Must be a valid column for the selected table. * @example "created_at" */ column: "startedAt" | "endedAt"; /** * This is the operator to use for the filter. * For date type columns, the operator must be "=", ">", "<", ">=", "<=" * @example ""=" or ">" or "<" or ">=" or "<="" */ operator: "=" | "!=" | ">" | "<" | ">=" | "<="; /** * This is the value to filter on. * Must be a valid ISO 8601 date-time string. * @example "2025-01-01T00:00:00Z" */ value: string; } export interface FilterStructuredOutputColumnOnCallTable { /** * This is the column in the call table that will be filtered on. * Structured Output Type columns are only to filter on artifact.structuredOutputs[OutputID] column. * @example "artifact.structuredOutputs[OutputID]" */ column: "artifact.structuredOutputs[OutputID]"; /** * This is the operator to use for the filter. * The operator depends on the value type of the structured output. * If the structured output is a string or boolean, the operator must be "=", "!=" * If the structured output is a number, the operator must be "=", ">", "<", ">=", "<=" * If the structured output is an array, the operator must be "in" or "not_in" * @example ""=" or ">" or "<" or "in" or "not_in"" */ operator: | "=" | "!=" | ">" | "<" | ">=" | "<=" | "in" | "not_in" | "contains" | "not_contains"; /** * This is the value to filter on. * The value type depends on the structured output type being filtered. */ value: object; } export interface FilterStringArrayTypeColumnOnCallTable { /** * This is the column in the call table that will be filtered on. * String Array Type columns are the same as String Type columns, but provides the ability to filter on multiple values provided as an array. * Must be a valid column for the selected table. * @example "assistant_id" */ column: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "customerNumber" | "status" | "endedReason" | "forwardedPhoneNumber" | "campaignId"; /** * This is the operator to use for the filter. * The operator must be `in` or `not_in`. * @example ""in" or "not_in"" */ operator: "in" | "not_in"; /** These are the values to filter on. */ value: string[]; } export interface FilterNumberArrayTypeColumnOnCallTable { /** * This is the column in the call table that will be filtered on. * Number Array Type columns are the same as Number Type columns, but provides the ability to filter on multiple values provided as an array. * Must be a valid column for the selected table. * @example "duration" */ column: | "duration" | "cost" | "averageModelLatency" | "averageVoiceLatency" | "averageTranscriberLatency" | "averageTurnLatency" | "averageEndpointingLatency"; /** * This is the operator to use for the filter. * The operator must be `in` or `not_in`. * @example ""in" or "not_in"" */ operator: "in" | "not_in"; /** This is the value to filter on. */ value: number[]; } export interface BarInsightFromCallTable { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `bar` to create a bar insight. */ type: "bar"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; /** This is the metadata for the insight. */ metadata?: BarInsightMetadata; timeRange?: InsightTimeRangeWithStep; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface PieInsightFromCallTable { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `pie` to create a pie insight. */ type: "pie"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; timeRange?: InsightTimeRange; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface LineInsightFromCallTable { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `line` to create a line insight. */ type: "line"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formulas?: InsightFormula[]; /** This is the metadata for the insight. */ metadata?: LineInsightMetadata; timeRange?: InsightTimeRangeWithStep; /** * This is the group by column for the insight when table is `call`. * These are the columns to group the results by. * All results are grouped by the time range step by default. * @example ["assistant_id"] */ groupBy?: | "assistantId" | "workflowId" | "squadId" | "phoneNumberId" | "type" | "endedReason" | "campaignId" | "artifact.structuredOutputs[OutputID]"; /** These are the queries to run to generate the insight. */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface TextInsightFromCallTable { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the type of the Insight. * It is required to be `text` to create a text insight. */ type: "text"; /** * Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight. * The formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html * A formula is created by using the query names as the variable. * The formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * For example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this: * ``` * {{['Query 1']}} / {{['Query 2']}} * 100 * ``` * * ``` * ({{[Query 1]}} * 10) + {{[Query 2]}} * ``` * This will take the * * You can also use the query names as the variable in the formula. */ formula?: object; timeRange?: InsightTimeRange; /** * These are the queries to run to generate the insight. * For Text Insights, we only allow a single query, or require a formula if multiple queries are provided */ queries: ( | JSONQueryOnCallTableWithStringTypeColumn | JSONQueryOnCallTableWithNumberTypeColumn | JSONQueryOnCallTableWithStructuredOutputColumn )[]; } export interface InsightFormula { /** * This is the name of the formula. * It will be used to label the formula in the insight board on the UI. * @minLength 1 * @maxLength 40 * @example "Booking Rate" */ name?: string; /** * This is the formula to calculate the insight from the queries. * The formula needs to be a valid mathematical expression. * The formula must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result. * Any MathJS formula is allowed - https://mathjs.org/docs/expressions/syntax.html * * Common valid math operations are +, -, *, /, % * @minLength 1 * @maxLength 1000 */ formula: string; } export interface InsightRunFormatPlan { /** * This is the format of the data to return. * If not provided, defaults to "raw". * Raw provides the data as fetched from the database, with formulas evaluated. * Recharts provides the data in a format that can is ready to be used by recharts.js to render charts. * @example "raw" */ format?: "raw" | "recharts"; } export interface InsightRunDTO { formatPlan?: InsightRunFormatPlan; /** * This is the optional time range override for the insight. * If provided, overrides every field in the insight's timeRange. * If this is provided with missing fields, defaults will be used, not the insight's timeRange. * start default - "-7d" * end default - "now" * step default - "day" * For Pie and Text Insights, step will be ignored even if provided. * @example "{ start: "2025-01-01", end: "2025-01-07", step: "day" }" */ timeRangeOverride?: InsightTimeRangeWithStep; } export interface InsightRunResponse { id: string; insightId: string; orgId: string; /** @format date-time */ createdAt: string; /** @format date-time */ updatedAt: string; } export interface Insight { /** * This is the name of the Insight. * @minLength 1 * @maxLength 40 */ name?: string; /** This is the type of the Insight. */ type: "bar" | "line" | "pie" | "text"; /** This is the unique identifier for the Insight. */ id: string; /** This is the unique identifier for the org that this Insight belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the Insight was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the Insight was last updated. * @format date-time */ updatedAt: string; } export interface InsightPaginatedResponse { results: Insight[]; metadata: PaginationMeta; } export interface CreateEvalDTO { /** * This is the mock conversation that will be used to evaluate the flow of the conversation. * * Mock Messages are used to simulate the flow of the conversation * * Evaluation Messages are used as checkpoints in the flow where the model's response to previous conversation needs to be evaluated to check the content and tool calls * @example "[{ role: "user", content: "Hello, how are you?" }, { role: "assistant", judgePlan: { type: "exact", content: "I am good, thank you!" } }]" */ messages: ( | ChatEvalAssistantMessageMock | ChatEvalSystemMessageMock | ChatEvalToolResponseMessageMock | ChatEvalToolResponseMessageEvaluation | ChatEvalUserMessageMock | ChatEvalAssistantMessageEvaluation )[]; /** * This is the name of the eval. * It helps identify what the eval is checking for. * @minLength 1 * @maxLength 80 * @example "Verified User Flow Eval" */ name?: string; /** * This is the description of the eval. * This helps describe the eval and its purpose in detail. It will not be used to evaluate the flow of the conversation. * @maxLength 500 * @example "This eval checks if the user flow is verified." */ description?: string; /** * This is the type of the eval. * Currently it is fixed to `chat.mockConversation`. * @example "chat.mockConversation" */ type: "chat.mockConversation"; } export interface Eval { /** * This is the mock conversation that will be used to evaluate the flow of the conversation. * * Mock Messages are used to simulate the flow of the conversation * * Evaluation Messages are used as checkpoints in the flow where the model's response to previous conversation needs to be evaluated to check the content and tool calls * @example "[{ role: "user", content: "Hello, how are you?" }, { role: "assistant", judgePlan: { type: "exact", content: "I am good, thank you!" } }]" */ messages: ( | ChatEvalAssistantMessageMock | ChatEvalSystemMessageMock | ChatEvalToolResponseMessageMock | ChatEvalToolResponseMessageEvaluation | ChatEvalUserMessageMock | ChatEvalAssistantMessageEvaluation )[]; id: string; orgId: string; /** @format date-time */ createdAt: string; /** @format date-time */ updatedAt: string; /** * This is the name of the eval. * It helps identify what the eval is checking for. * @minLength 1 * @maxLength 80 * @example "Verified User Flow Eval" */ name?: string; /** * This is the description of the eval. * This helps describe the eval and its purpose in detail. It will not be used to evaluate the flow of the conversation. * @maxLength 500 * @example "This eval checks if the user flow is verified." */ description?: string; /** * This is the type of the eval. * Currently it is fixed to `chat.mockConversation`. * @example "chat.mockConversation" */ type: "chat.mockConversation"; } export interface EvalModelListOptions { /** This is the provider of the model. */ provider: "openai" | "anthropic" | "google" | "groq" | "custom-llm"; } export interface EvalUserEditable { /** * This is the mock conversation that will be used to evaluate the flow of the conversation. * * Mock Messages are used to simulate the flow of the conversation * * Evaluation Messages are used as checkpoints in the flow where the model's response to previous conversation needs to be evaluated to check the content and tool calls * @example "[{ role: "user", content: "Hello, how are you?" }, { role: "assistant", judgePlan: { type: "exact", content: "I am good, thank you!" } }]" */ messages: ( | ChatEvalAssistantMessageMock | ChatEvalSystemMessageMock | ChatEvalToolResponseMessageMock | ChatEvalToolResponseMessageEvaluation | ChatEvalUserMessageMock | ChatEvalAssistantMessageEvaluation )[]; /** * This is the name of the eval. * It helps identify what the eval is checking for. * @minLength 1 * @maxLength 80 * @example "Verified User Flow Eval" */ name?: string; /** * This is the description of the eval. * This helps describe the eval and its purpose in detail. It will not be used to evaluate the flow of the conversation. * @maxLength 500 * @example "This eval checks if the user flow is verified." */ description?: string; /** * This is the type of the eval. * Currently it is fixed to `chat.mockConversation`. * @example "chat.mockConversation" */ type: "chat.mockConversation"; } export interface ChatEvalAssistantMessageMockToolCall { /** * This is the name of the tool that will be called. * It should be one of the tools created in the organization. * @maxLength 100 * @example "get_weather" */ name: string; /** * This is the arguments that will be passed to the tool call. * @example ""{"city": "San Francisco"}"" */ arguments?: object; } export interface ChatEvalAssistantMessageMock { /** * This is the role of the message author. * For a mock assistant message, the role is always 'assistant' * @default 'assistant' * @default "assistant" */ role: "assistant"; /** * This is the content of the assistant message. * This is the message that the assistant would have sent. * @maxLength 1000 * @example "The weather in San Francisco is sunny." */ content?: string; /** * This is the tool calls that will be made by the assistant. * @example "[{ name: "get_weather", arguments: { city: "San Francisco" } }]" */ toolCalls?: ChatEvalAssistantMessageMockToolCall[]; } export interface ChatEvalSystemMessageMock { /** * This is the role of the message author. * For a mock system message, the role is always 'system' * @default 'system' * @default "system" */ role: "system"; /** * This is the content of the system message that would have been added in the middle of the conversation. * Do not include the assistant prompt as a part of this message. It will automatically be fetched during runtime. * @example "You are a helpful assistant." */ content: string; } export interface ChatEvalToolResponseMessageMock { /** * This is the role of the message author. * For a mock tool response message, the role is always 'tool' * @default 'tool' * @default "tool" */ role: "tool"; /** This is the content of the tool response message. JSON Objects should be stringified. */ content: string; } export interface ChatEvalUserMessageMock { /** * This is the role of the message author. * For a mock user message, the role is always 'user' * @default 'user' * @default "user" */ role: "user"; /** * This is the content of the user message. * This is the message that the user would have sent. * @maxLength 1000 * @example "Hello, how are you?" */ content: string; } export interface AssistantMessageEvaluationContinuePlan { /** * This is whether the evaluation should exit if the assistant message evaluates to false. * By default, it is false and the evaluation will continue. * @default false */ exitOnFailureEnabled?: boolean; /** * This is the content that will be used in the conversation for this assistant turn moving forward if provided. * It will override the content received from the model. * @maxLength 1000 * @example "The weather in San Francisco is sunny." */ contentOverride?: string; /** * This is the tool calls that will be used in the conversation for this assistant turn moving forward if provided. * It will override the tool calls received from the model. * @example "[{ name: "get_weather", arguments: { city: "San Francisco" } }]" */ toolCallsOverride?: ChatEvalAssistantMessageMockToolCall[]; } export interface ChatEvalAssistantMessageEvaluation { /** * This is the role of the message author. * For an assistant message evaluation, the role is always 'assistant' * @default 'assistant' * @default "assistant" */ role: "assistant"; /** * This is the judge plan that instructs how to evaluate the assistant message. * The assistant message can be evaluated against fixed content (exact match or RegEx) or with an LLM-as-judge by defining the evaluation criteria in a prompt. */ judgePlan: | AssistantMessageJudgePlanExact | AssistantMessageJudgePlanRegex | AssistantMessageJudgePlanAI; /** * This is the plan for how the overall evaluation will proceed after the assistant message is evaluated. * This lets you configure whether to stop the evaluation if this message fails, and whether to override any content for future turns */ continuePlan?: AssistantMessageEvaluationContinuePlan; } export interface EvalOpenAIModel { /** This is the provider of the model (`openai`). */ provider: "openai"; /** * This is the OpenAI model that will be used. * * When using Vapi OpenAI or your own Azure Credentials, you have the option to specify the region for the selected model. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest region that make sense. * This is helpful when you are required to comply with Data Residency rules. Learn more about Azure regions here https://azure.microsoft.com/en-us/explore/global-infrastructure/data-residency/. * @maxLength 100 */ model: | "gpt-5.1" | "gpt-5.1-chat-latest" | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gpt-4.1-2025-04-14" | "gpt-4.1-mini-2025-04-14" | "gpt-4.1-nano-2025-04-14" | "gpt-4.1" | "gpt-4.1-mini" | "gpt-4.1-nano" | "chatgpt-4o-latest" | "o3" | "o3-mini" | "o4-mini" | "o1-mini" | "o1-mini-2024-09-12" | "gpt-4o-mini-2024-07-18" | "gpt-4o-mini" | "gpt-4o" | "gpt-4o-2024-05-13" | "gpt-4o-2024-08-06" | "gpt-4o-2024-11-20" | "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-turbo-preview" | "gpt-4-0125-preview" | "gpt-4-1106-preview" | "gpt-4" | "gpt-4-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0613" | "gpt-4.1-2025-04-14:westus" | "gpt-4.1-2025-04-14:eastus2" | "gpt-4.1-2025-04-14:eastus" | "gpt-4.1-2025-04-14:westus3" | "gpt-4.1-2025-04-14:northcentralus" | "gpt-4.1-2025-04-14:southcentralus" | "gpt-4.1-mini-2025-04-14:westus" | "gpt-4.1-mini-2025-04-14:eastus2" | "gpt-4.1-mini-2025-04-14:eastus" | "gpt-4.1-mini-2025-04-14:westus3" | "gpt-4.1-mini-2025-04-14:northcentralus" | "gpt-4.1-mini-2025-04-14:southcentralus" | "gpt-4.1-nano-2025-04-14:westus" | "gpt-4.1-nano-2025-04-14:eastus2" | "gpt-4.1-nano-2025-04-14:westus3" | "gpt-4.1-nano-2025-04-14:northcentralus" | "gpt-4.1-nano-2025-04-14:southcentralus" | "gpt-4o-2024-11-20:swedencentral" | "gpt-4o-2024-11-20:westus" | "gpt-4o-2024-11-20:eastus2" | "gpt-4o-2024-11-20:eastus" | "gpt-4o-2024-11-20:westus3" | "gpt-4o-2024-11-20:southcentralus" | "gpt-4o-2024-08-06:westus" | "gpt-4o-2024-08-06:westus3" | "gpt-4o-2024-08-06:eastus" | "gpt-4o-2024-08-06:eastus2" | "gpt-4o-2024-08-06:northcentralus" | "gpt-4o-2024-08-06:southcentralus" | "gpt-4o-mini-2024-07-18:westus" | "gpt-4o-mini-2024-07-18:westus3" | "gpt-4o-mini-2024-07-18:eastus" | "gpt-4o-mini-2024-07-18:eastus2" | "gpt-4o-mini-2024-07-18:northcentralus" | "gpt-4o-mini-2024-07-18:southcentralus" | "gpt-4o-2024-05-13:eastus2" | "gpt-4o-2024-05-13:eastus" | "gpt-4o-2024-05-13:northcentralus" | "gpt-4o-2024-05-13:southcentralus" | "gpt-4o-2024-05-13:westus3" | "gpt-4o-2024-05-13:westus" | "gpt-4-turbo-2024-04-09:eastus2" | "gpt-4-0125-preview:eastus" | "gpt-4-0125-preview:northcentralus" | "gpt-4-0125-preview:southcentralus" | "gpt-4-1106-preview:australia" | "gpt-4-1106-preview:canadaeast" | "gpt-4-1106-preview:france" | "gpt-4-1106-preview:india" | "gpt-4-1106-preview:norway" | "gpt-4-1106-preview:swedencentral" | "gpt-4-1106-preview:uk" | "gpt-4-1106-preview:westus" | "gpt-4-1106-preview:westus3" | "gpt-4-0613:canadaeast" | "gpt-3.5-turbo-0125:canadaeast" | "gpt-3.5-turbo-0125:northcentralus" | "gpt-3.5-turbo-0125:southcentralus" | "gpt-3.5-turbo-1106:canadaeast" | "gpt-3.5-turbo-1106:westus"; /** * This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * If your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response. * @min 50 * @max 10000 */ maxTokens?: number; /** * These are the messages which will instruct the AI Judge on how to evaluate the assistant message. * The LLM-Judge must respond with "pass" or "fail" to indicate if the assistant message passes the eval. * * To access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`. * The assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`. * * It is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated. * @example "{" */ messages: object[]; } export interface EvalAnthropicModel { /** This is the provider of the model (`anthropic`). */ provider: "anthropic"; /** * This is the specific model that will be used. * @maxLength 100 */ model: | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-3-5-sonnet-20240620" | "claude-3-5-sonnet-20241022" | "claude-3-5-haiku-20241022" | "claude-3-7-sonnet-20250219" | "claude-opus-4-20250514" | "claude-opus-4-5-20251101" | "claude-sonnet-4-20250514" | "claude-sonnet-4-5-20250929" | "claude-haiku-4-5-20251001"; /** * This is the optional configuration for Anthropic's thinking feature. * * - If provided, `maxTokens` must be greater than `thinking.budgetTokens`. */ thinking?: AnthropicThinkingConfig; /** * This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * If your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response. * @min 50 * @max 10000 */ maxTokens?: number; /** * These are the messages which will instruct the AI Judge on how to evaluate the assistant message. * The LLM-Judge must respond with "pass" or "fail" to indicate if the assistant message passes the eval. * * To access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`. * The assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`. * * It is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated. * @example "{" */ messages: object[]; } export interface EvalGoogleModel { /** This is the provider of the model (`google`). */ provider: "google"; /** * This is the name of the model. Ex. gpt-4o * @maxLength 100 */ model: | "gemini-2.5-pro" | "gemini-2.5-flash" | "gemini-2.5-flash-lite" | "gemini-2.0-flash-thinking-exp" | "gemini-2.0-pro-exp-02-05" | "gemini-2.0-flash" | "gemini-2.0-flash-lite" | "gemini-2.0-flash-exp" | "gemini-2.0-flash-realtime-exp" | "gemini-1.5-flash" | "gemini-1.5-flash-002" | "gemini-1.5-pro" | "gemini-1.5-pro-002" | "gemini-1.0-pro"; /** * This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * If your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response. * @min 50 * @max 10000 */ maxTokens?: number; /** * These are the messages which will instruct the AI Judge on how to evaluate the assistant message. * The LLM-Judge must respond with "pass" or "fail" to indicate if the assistant message passes the eval. * * To access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`. * The assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`. * * It is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated. * @example "{" */ messages: object[]; } export interface EvalGroqModel { /** This is the provider of the model (`groq`). */ provider: "groq"; /** * This is the name of the model. Ex. gpt-4o * @maxLength 100 */ model: | "openai/gpt-oss-20b" | "openai/gpt-oss-120b" | "deepseek-r1-distill-llama-70b" | "llama-3.3-70b-versatile" | "llama-3.1-405b-reasoning" | "llama-3.1-8b-instant" | "llama3-8b-8192" | "llama3-70b-8192" | "gemma2-9b-it" | "moonshotai/kimi-k2-instruct-0905" | "meta-llama/llama-4-maverick-17b-128e-instruct" | "meta-llama/llama-4-scout-17b-16e-instruct" | "mistral-saba-24b" | "compound-beta" | "compound-beta-mini"; /** * This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * If your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response. * @min 50 * @max 10000 */ maxTokens?: number; /** * These are the messages which will instruct the AI Judge on how to evaluate the assistant message. * The LLM-Judge must respond with "pass" or "fail" to indicate if the assistant message passes the eval. * * To access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`. * The assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`. * * It is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated. * @example "{" */ messages: object[]; } export interface EvalCustomModel { /** This is the provider of the model (`custom-llm`). */ provider: "custom-llm"; /** These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1 */ url: string; /** These are the headers we'll use for the OpenAI client's `headers`. */ headers?: object; /** * This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds. * @min 20 * @max 600 */ timeoutSeconds?: number; /** * This is the name of the model. Ex. gpt-4o * @maxLength 100 */ model: string; /** * This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions. * @min 0 * @max 2 */ temperature?: number; /** * This is the max tokens of the model. * If your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response. * @min 50 * @max 10000 */ maxTokens?: number; /** * These are the messages which will instruct the AI Judge on how to evaluate the assistant message. * The LLM-Judge must respond with "pass" or "fail" to indicate if the assistant message passes the eval. * * To access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`. * The assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`. * * It is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated. * @example "{" */ messages: object[]; } export interface AssistantMessageJudgePlanAI { /** * This is the model to use for the LLM-as-a-judge. * If not provided, will default to the assistant's model. * * The instructions on how to evaluate the model output with this LLM-Judge must be passed as a system message in the messages array of the model. * * The Mock conversation can be passed to the LLM-Judge to evaluate using the prompt {{messages}} and will be evaluated as a LiquidJS Variable. To access and judge only the last message, use {{messages[-1]}} * * The LLM-Judge must respond with "pass" or "fail" and only those two responses are allowed. * @example "{" */ model: | EvalOpenAIModel | EvalAnthropicModel | EvalGoogleModel | EvalCustomModel; /** * This is the type of the judge plan. * Use 'ai' to evaluate the assistant message content using LLM-as-a-judge. * @default 'ai' */ type: "ai"; } export interface ChatEvalToolResponseMessageEvaluation { /** * This is the role of the message author. * For a tool response message evaluation, the role is always 'tool' * @default 'tool' * @default "tool" */ role: "tool"; /** * This is the judge plan that instructs how to evaluate the tool response message. * The tool response message can be evaluated with an LLM-as-judge by defining the evaluation criteria in a prompt. */ judgePlan: AssistantMessageJudgePlanAI; } export interface AssistantMessageJudgePlanExact { /** * This is the type of the judge plan. * Use 'exact' for an exact match on the content and tool calls - without using LLM-as-a-judge. * @default 'exact' */ type: "exact"; /** * This is what that will be used to evaluate the model's message content. * If you provide a string, the assistant message content will be evaluated against it as an exact match, case-insensitive. * @maxLength 1000 * @example "The weather in San Francisco is sunny." */ content: string; /** * This is the tool calls that will be used to evaluate the model's message content. * The tool name must be a valid tool that the assistant is allowed to call. * * For the Query tool, the arguments for the tool call are in the format - {knowledgeBaseNames: ['kb_name', 'kb_name_2']} * * For the DTMF tool, the arguments for the tool call are in the format - {dtmf: "1234*"} * * For the Handoff tool, the arguments for the tool call are in the format - {destination: "assistant_id"} * * For the Transfer Call tool, the arguments for the tool call are in the format - {destination: "phone_number_or_assistant_id"} * * For all other tools, they are called without arguments or with user-defined arguments * @example "[{ name: "get_weather", arguments: { city: "San Francisco" } }]" */ toolCalls?: ChatEvalAssistantMessageMockToolCall[]; } export interface AssistantMessageJudgePlanRegex { /** * This is the type of the judge plan. * Use 'regex' for a regex match on the content and tool calls - without using LLM-as-a-judge. * @default 'regex' */ type: "regex"; /** * This is what that will be used to evaluate the model's message content. * The content will be evaluated against the regex pattern provided in the Judge Plan content field. * Evaluation is considered successful if the regex pattern matches any part of the assistant message content. * @maxLength 1000 * @example "/sunny/i" */ content: string; /** * This is the tool calls that will be used to evaluate the model's message content. * The tool name must be a valid tool that the assistant is allowed to call. * The values to the arguments for the tool call should be a Regular Expression. * Evaluation is considered successful if the regex pattern matches any part of each tool call argument. * * For the Query tool, the arguments for the tool call are in the format - {knowledgeBaseNames: ['kb_name', 'kb_name_2']} * * For the DTMF tool, the arguments for the tool call are in the format - {dtmf: "1234*"} * * For the Handoff tool, the arguments for the tool call are in the format - {destination: "assistant_id"} * * For the Transfer Call tool, the arguments for the tool call are in the format - {destination: "phone_number_or_assistant_id"} * * For all other tools, they are called without arguments or with user-defined arguments * @example "[{ name: "get_weather", arguments: { city: "/San Francisco/i" } }]" */ toolCalls?: ChatEvalAssistantMessageMockToolCall[]; } export interface GetEvalPaginatedDTO { id?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; } export interface EvalPaginatedResponse { results: Eval[]; metadata: PaginationMeta; } export interface UpdateEvalDTO { /** * This is the mock conversation that will be used to evaluate the flow of the conversation. * * Mock Messages are used to simulate the flow of the conversation * * Evaluation Messages are used as checkpoints in the flow where the model's response to previous conversation needs to be evaluated to check the content and tool calls * @example "[{ role: "user", content: "Hello, how are you?" }, { role: "assistant", judgePlan: { type: "exact", content: "I am good, thank you!" } }]" */ messages?: ( | ChatEvalAssistantMessageMock | ChatEvalSystemMessageMock | ChatEvalToolResponseMessageMock | ChatEvalToolResponseMessageEvaluation | ChatEvalUserMessageMock | ChatEvalAssistantMessageEvaluation )[]; /** * This is the name of the eval. * It helps identify what the eval is checking for. * @minLength 1 * @maxLength 80 * @example "Verified User Flow Eval" */ name?: string; /** * This is the description of the eval. * This helps describe the eval and its purpose in detail. It will not be used to evaluate the flow of the conversation. * @maxLength 500 * @example "This eval checks if the user flow is verified." */ description?: string; /** * This is the type of the eval. * Currently it is fixed to `chat.mockConversation`. * @example "chat.mockConversation" */ type?: "chat.mockConversation"; } export interface CreateEvalRunDTO { /** This is the transient eval that will be run */ eval?: CreateEvalDTO; /** This is the target that will be run against the eval */ target: EvalRunTargetAssistant | EvalRunTargetSquad; /** * This is the type of the run. * Currently it is fixed to `eval`. * @example "eval" */ type: "eval"; /** * This is the id of the eval that will be run. * @example "123e4567-e89b-12d3-a456-426614174000" */ evalId?: string; } export interface EvalRunResult { /** * This is the status of the eval run result. * The status is only 'pass' or 'fail' for an eval run result. * Currently, An eval is considered `pass` only if all the Assistant Judge messages are evaluated to pass. * @example "pass" */ status: "pass" | "fail"; /** * This is the messages of the eval run result. * It contains the user/system messages */ messages: ( | ChatEvalUserMessageMock | ChatEvalSystemMessageMock | ChatEvalToolResponseMessageMock | ChatEvalAssistantMessageMock )[]; /** * This is the start time of the eval run result. * @format date-time * @example "2021-01-01T00:00:00.000Z" */ startedAt: string; /** * This is the end time of the eval run result. * @format date-time * @example "2021-01-01T00:00:00.000Z" */ endedAt: string; } export interface EvalRun { /** * This is the status of the eval run. When an eval run is created, the status is 'running'. * When the eval run is completed, the status is 'ended'. * @example "running" */ status: "running" | "ended" | "queued"; /** * This is the reason for the eval run to end. * When the eval run is completed normally i.e end of mock conversation, the status is 'mockConversation.done'. * When the eval fails due to an error like Chat error or incorrect configuration, the status is 'error'. * When the eval runs for too long, due to model issues or tool call issues, the status is 'timeout'. * When the eval run is cancelled by the user, the status is 'cancelled'. * When the eval run is cancelled by Vapi for any reason, the status is 'aborted'. * @example "mockConversation.done" */ endedReason: | "mockConversation.done" | "error" | "timeout" | "cancelled" | "aborted"; /** This is the transient eval that will be run */ eval?: CreateEvalDTO; /** This is the target that will be run against the eval */ target: EvalRunTargetAssistant | EvalRunTargetSquad; id: string; orgId: string; /** @format date-time */ createdAt: string; /** @format date-time */ startedAt: string; /** @format date-time */ endedAt: string; /** * This is the ended message when the eval run ended for any reason apart from mockConversation.done * @example "The Assistant returned an error" */ endedMessage?: string; /** * This is the results of the eval or suite run. * The array will have a single item for an eval run, and multiple items each corresponding to the an eval in a suite run in the same order as the evals in the suite. */ results: EvalRunResult[]; /** * This is the cost of the eval or suite run in USD. * @example 0.01 */ cost: number; /** * This is the break up of costs of the eval or suite run. * @example "[{ type: "model", model: "gpt-4o", cost: 0.01 }]" */ costs: object[]; /** * This is the type of the run. * Currently it is fixed to `eval`. * @example "eval" */ type: "eval"; /** * This is the id of the eval that will be run. * @example "123e4567-e89b-12d3-a456-426614174000" */ evalId?: string; } export interface EvalRunPaginatedResponse { results: EvalRun[]; metadata: PaginationMeta; } export interface GetEvalRunPaginatedDTO { id?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; } export interface EvalRunTargetAssistant { /** This is the transient assistant that will be run against the eval */ assistant?: CreateAssistantDTO; /** * This is the overrides that will be applied to the assistant. * @example "{" */ assistantOverrides?: AssistantOverrides; /** * This is the type of the target. * Currently it is fixed to `assistant`. * @example "assistant" */ type: "assistant"; /** * This is the id of the assistant that will be run against the eval * @example "123e4567-e89b-12d3-a456-426614174000" */ assistantId?: string; } export interface EvalRunTargetSquad { /** This is the transient squad that will be run against the eval */ squad?: CreateSquadDTO; /** * This is the overrides that will be applied to the assistants. * @example "{" */ assistantOverrides?: AssistantOverrides; /** * This is the type of the target. * Currently it is fixed to `squad`. * @example "squad" */ type: "squad"; /** * This is the id of the squad that will be run against the eval * @example "123e4567-e89b-12d3-a456-426614174000" */ squadId?: string; } export interface Scorecard { /** This is the unique identifier for the scorecard. */ id: string; /** This is the unique identifier for the org that this scorecard belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the scorecard was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the scorecard was last updated. * @format date-time */ updatedAt: string; /** * This is the name of the scorecard. It is only for user reference and will not be used for any evaluation. * @maxLength 80 */ name?: string; /** * This is the description of the scorecard. It is only for user reference and will not be used for any evaluation. * @maxLength 500 */ description?: string; /** * These are the metrics that will be used to evaluate the scorecard. * Each metric will have a set of conditions and points that will be used to generate the score. */ metrics: ScorecardMetric[]; /** * These are the assistant IDs that this scorecard is linked to. * When linked to assistants, this scorecard will be available for evaluation during those assistants' calls. */ assistantIds?: string[]; } export interface ScorecardPaginatedResponse { results: Scorecard[]; metadata: PaginationMeta; } export interface UpdateScorecardDTO { /** * This is the name of the scorecard. It is only for user reference and will not be used for any evaluation. * @maxLength 80 */ name?: string; /** * This is the description of the scorecard. It is only for user reference and will not be used for any evaluation. * @maxLength 500 */ description?: string; /** * These are the metrics that will be used to evaluate the scorecard. * Each metric will have a set of conditions and points that will be used to generate the score. */ metrics?: ScorecardMetric[]; /** * These are the assistant IDs that this scorecard is linked to. * When linked to assistants, this scorecard will be available for evaluation during those assistants' calls. */ assistantIds?: string[]; } export interface CreateOrgDTO { /** * When this is enabled, no logs, recordings, or transcriptions will be stored. At the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false. * When HIPAA is enabled, only OpenAI/Custom LLM or Azure Providers will be available for LLM and Voice respectively. * This is due to the compliance requirements of HIPAA. Other providers may not meet these requirements. * @example false */ hipaaEnabled?: boolean; /** This is the ID of the subscription the org belongs to. */ subscriptionId?: string; /** * This is the name of the org. This is just for your own reference. * @maxLength 40 */ name?: string; /** This is the channel of the org. There is the cluster the API traffic for the org will be directed. */ channel?: "daily" | "default" | "weekly" | "intuit"; /** * This is the monthly billing limit for the org. To go beyond $1000/mo, please contact us at support@vapi.ai. * @min 0 * @max 1000 */ billingLimit?: number; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** * This is the concurrency limit for the org. This is the maximum number of calls that can be active at any given time. To go beyond 10, please contact us at support@vapi.ai. * @deprecated * @min 1 * @max 10 */ concurrencyLimit?: number; /** * Stores the information about the compliance plan enforced at the organization level. Currently pciEnabled is supported through this field. * When this is enabled, any logs, recordings, or transcriptions will be shipped to the customer endpoints if provided else lost. * At the end of the call, you will receive an end-of-call-report message to store on your server, if webhook is provided. * Defaults to false. * When PCI is enabled, only PCI-compliant Providers will be available for LLM, Voice and transcribers. * This is due to the compliance requirements of PCI. Other providers may not meet these requirements. */ compliancePlan?: CompliancePlan; } export interface AutoReloadPlan { /** This the amount of credits to reload. */ credits: number; /** This is the limit at which the reload is triggered. */ threshold: number; } export interface InvoicePlan { /** This is the name of the company. */ companyName?: string; /** This is the address of the company. */ companyAddress?: string; /** This is the tax ID of the company. */ companyTaxId?: string; /** This is the preferred invoicing email of the company. If not specified, defaults to the subscription's email. */ companyEmail?: string; } export interface Subscription { /** This is the unique identifier for the subscription. */ id: string; /** * This is the timestamp when the subscription was created. * @format date-time */ createdAt: string; /** * This is the timestamp when the subscription was last updated. * @format date-time */ updatedAt: string; /** This is the type / tier of the subscription. */ type: | "pay-as-you-go" | "enterprise" | "agency" | "startup" | "growth" | "scale"; /** * This is the status of the subscription. Past due subscriptions are subscriptions * with past due payments. */ status: "active" | "frozen"; /** * This is the number of credits the subscription currently has. * * Note: This is a string to avoid floating point precision issues. */ credits: string; /** * This is the total number of active calls (concurrency) across all orgs under this subscription. * @min 1 */ concurrencyCounter: number; /** * This is the default concurrency limit for the subscription. * @min 1 */ concurrencyLimitIncluded: number; /** * This is the number of free phone numbers the subscription has * @min 1 */ phoneNumbersCounter?: number; /** * This is the maximum number of free phone numbers the subscription can have * @min 1 */ phoneNumbersIncluded?: number; /** * This is the purchased add-on concurrency limit for the subscription. * @min 1 */ concurrencyLimitPurchased: number; /** This is the ID of the monthly job that charges for subscription add ons and phone numbers. */ monthlyChargeScheduleId?: number; /** * This is the ID of the monthly job that checks whether the credit balance of the subscription * is sufficient for the monthly charge. */ monthlyCreditCheckScheduleId?: number; /** This is the Stripe customer ID. */ stripeCustomerId?: string; /** This is the Stripe payment ID. */ stripePaymentMethodId?: string; /** If this flag is true, then the user has purchased slack support. */ slackSupportEnabled?: boolean; /** If this subscription has a slack support subscription, the slack channel's ID will be stored here. */ slackChannelId?: string; /** * This is the HIPAA enabled flag for the subscription. It determines whether orgs under this * subscription have the option to enable HIPAA compliance. */ hipaaEnabled?: boolean; /** * This is the data retention enabled flag for the subscription. It determines whether orgs under this * subscription have the option to enable data retention. */ dataRetentionEnabled?: boolean; /** This is the ID for the Common Paper agreement outlining the HIPAA contract. */ hipaaCommonPaperAgreementId?: string; /** * This is the Stripe fingerprint of the payment method (card). It allows us * to detect users who try to abuse our system through multiple sign-ups. */ stripePaymentMethodFingerprint?: string; /** This is the customer's email on Stripe. */ stripeCustomerEmail?: string; /** This is the email of the referrer for the subscription. */ referredByEmail?: string; /** This is the auto reload plan configured for the subscription. */ autoReloadPlan?: AutoReloadPlan; /** * The number of minutes included in the subscription. * @min 0 */ minutesIncluded?: number; /** * The number of minutes used in the subscription. * @min 0 */ minutesUsed?: number; /** * This is the timestamp at which the number of monthly free minutes is scheduled to reset at. * @format date-time */ minutesUsedNextResetAt?: string; /** The per minute charge on minutes that exceed the included minutes. Enterprise only. */ minutesOverageCost?: number; /** The list of providers included in the subscription. Enterprise only. */ providersIncluded?: string[]; /** * The maximum number of outbound calls this subscription may make in a day. Resets every night. * @min 1 */ outboundCallsDailyLimit?: number; /** * The current number of outbound calls the subscription has made in the current day. * @min 1 */ outboundCallsCounter?: number; /** * This is the timestamp at which the outbound calls counter is scheduled to reset at. * @format date-time */ outboundCallsCounterNextResetAt?: string; /** This is the IDs of the coupons applicable to this subscription. */ couponIds?: string[]; /** This is the number of credits left obtained from a coupon. */ couponUsageLeft?: string; /** This is the invoice plan for the subscription. */ invoicePlan?: InvoicePlan; /** * This is the PCI enabled flag for the subscription. It determines whether orgs under this * subscription have the option to enable PCI compliance. */ pciEnabled?: boolean; /** This is the ID for the Common Paper agreement outlining the PCI contract. */ pciCommonPaperAgreementId?: string; /** This is the call retention days for the subscription. */ callRetentionDays?: number; /** This is the chat retention days for the subscription. */ chatRetentionDays?: number; /** This is the minutes_included reset frequency for the subscription. */ minutesIncludedResetFrequency?: "monthly" | "annually"; /** This is the Role Based Access Control (RBAC) enabled flag for the subscription. */ rbacEnabled?: boolean; /** This is the platform fee for the subscription. */ platformFee?: number; } export interface Org { /** * When this is enabled, no logs, recordings, or transcriptions will be stored. At the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false. * When HIPAA is enabled, only OpenAI/Custom LLM or Azure Providers will be available for LLM and Voice respectively. * This is due to the compliance requirements of HIPAA. Other providers may not meet these requirements. * @example false */ hipaaEnabled?: boolean; subscription?: Subscription; /** This is the ID of the subscription the org belongs to. */ subscriptionId?: string; /** This is the unique identifier for the org. */ id: string; /** * This is the ISO 8601 date-time string of when the org was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the org was last updated. * @format date-time */ updatedAt: string; /** This is the subscription for the org. */ stripeSubscriptionId?: string; /** This is the subscription's subscription item. */ stripeSubscriptionItemId?: string; /** * This is the subscription's current period start. * @format date-time */ stripeSubscriptionCurrentPeriodStart?: string; /** This is the subscription's status. */ stripeSubscriptionStatus?: string; /** This is the secret key used for signing JWT tokens for the org. */ jwtSecret?: string; /** This is the total number of call minutes used by this org across all time. */ minutesUsed?: number; /** * This is the name of the org. This is just for your own reference. * @maxLength 40 */ name?: string; /** This is the channel of the org. There is the cluster the API traffic for the org will be directed. */ channel?: "daily" | "default" | "weekly" | "intuit"; /** * This is the monthly billing limit for the org. To go beyond $1000/mo, please contact us at support@vapi.ai. * @min 0 * @max 1000 */ billingLimit?: number; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** * This is the concurrency limit for the org. This is the maximum number of calls that can be active at any given time. To go beyond 10, please contact us at support@vapi.ai. * @deprecated * @min 1 * @max 10 */ concurrencyLimit?: number; /** * Stores the information about the compliance plan enforced at the organization level. Currently pciEnabled is supported through this field. * When this is enabled, any logs, recordings, or transcriptions will be shipped to the customer endpoints if provided else lost. * At the end of the call, you will receive an end-of-call-report message to store on your server, if webhook is provided. * Defaults to false. * When PCI is enabled, only PCI-compliant Providers will be available for LLM, Voice and transcribers. * This is due to the compliance requirements of PCI. Other providers may not meet these requirements. */ compliancePlan?: CompliancePlan; } export interface UpdateOrgDTO { /** * When this is enabled, no logs, recordings, or transcriptions will be stored. At the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false. * When HIPAA is enabled, only OpenAI/Custom LLM or Azure Providers will be available for LLM and Voice respectively. * This is due to the compliance requirements of HIPAA. Other providers may not meet these requirements. * @example false */ hipaaEnabled?: boolean; /** This is the ID of the subscription the org belongs to. */ subscriptionId?: string; /** * This is the name of the org. This is just for your own reference. * @maxLength 40 */ name?: string; /** This is the channel of the org. There is the cluster the API traffic for the org will be directed. */ channel?: "daily" | "default" | "weekly" | "intuit"; /** * This is the monthly billing limit for the org. To go beyond $1000/mo, please contact us at support@vapi.ai. * @min 0 * @max 1000 */ billingLimit?: number; /** * This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. * * The order of precedence is: * * 1. assistant.server * 2. phoneNumber.server * 3. org.server */ server?: Server; /** * This is the concurrency limit for the org. This is the maximum number of calls that can be active at any given time. To go beyond 10, please contact us at support@vapi.ai. * @deprecated * @min 1 * @max 10 */ concurrencyLimit?: number; /** * Stores the information about the compliance plan enforced at the organization level. Currently pciEnabled is supported through this field. * When this is enabled, any logs, recordings, or transcriptions will be shipped to the customer endpoints if provided else lost. * At the end of the call, you will receive an end-of-call-report message to store on your server, if webhook is provided. * Defaults to false. * When PCI is enabled, only PCI-compliant Providers will be available for LLM, Voice and transcribers. * This is due to the compliance requirements of PCI. Other providers may not meet these requirements. */ compliancePlan?: CompliancePlan; } export interface User { /** This is the unique identifier for the profile or user. */ id: string; /** * This is the ISO 8601 date-time string of when the profile was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the profile was last updated. * @format date-time */ updatedAt: string; /** This is the email of the user that is associated with the profile. */ email: string; /** This is the full name of the user that is associated with the profile. */ fullName?: string; } export interface InviteUserDTO { /** @maxItems 100 */ emails: string[]; role: "admin" | "editor" | "viewer"; redirectTo?: string; } export interface UpdateUserRoleDTO { userId: string; role: "admin" | "editor" | "viewer"; } export interface JwtResponse { accessToken: string; aud: object; } export interface TokenRestrictions { /** This determines whether the token is enabled or disabled. Default is true, it's enabled. */ enabled?: boolean; /** * This determines the allowed origins for this token. Validates the `Origin` header. Default is any origin. * * Only relevant for `public` tokens. */ allowedOrigins?: string[]; /** * This determines which assistantIds can be used when creating a call. Default is any assistantId. * * Only relevant for `public` tokens. */ allowedAssistantIds?: string[]; /** * This determines whether transient assistants can be used when creating a call. Default is true. * * If `allowedAssistantIds` is provided, this is automatically false. * * Only relevant for `public` tokens. */ allowTransientAssistant?: boolean; } export interface CreateTokenDTO { /** This is the tag for the token. It represents its scope. */ tag?: "private" | "public"; /** * This is the name of the token. This is just for your own reference. * @maxLength 40 */ name?: string; /** This are the restrictions for the token. */ restrictions?: TokenRestrictions; } export interface Token { /** This is the tag for the token. It represents its scope. */ tag?: "private" | "public"; /** This is the unique identifier for the token. */ id: string; /** This is unique identifier for the org that this token belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the token was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the token was last updated. * @format date-time */ updatedAt: string; /** This is the token key. */ value?: string; /** * This is the name of the token. This is just for your own reference. * @maxLength 40 */ name?: string; /** This are the restrictions for the token. */ restrictions?: TokenRestrictions; } export interface UpdateTokenDTO { /** This is the tag for the token. It represents its scope. */ tag?: "private" | "public"; /** * This is the name of the token. This is just for your own reference. * @maxLength 40 */ name?: string; /** This are the restrictions for the token. */ restrictions?: TokenRestrictions; } export interface AnthropicCredential { provider: "anthropic"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface AnyscaleCredential { provider: "anyscale"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface AssemblyAICredential { provider: "assembly-ai"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface AzureCredential { provider: "azure"; /** * This is the service being used in Azure. * @default "speech" */ service: "speech" | "blob_storage"; /** This is the region of the Azure resource. */ region?: | "australia" | "canadaeast" | "canadacentral" | "eastus2" | "eastus" | "france" | "india" | "japaneast" | "japanwest" | "uaenorth" | "northcentralus" | "norway" | "southcentralus" | "swedencentral" | "switzerland" | "uk" | "westus" | "westus3"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** This is the bucket plan that can be provided to store call artifacts in Azure Blob Storage. */ bucketPlan?: AzureBlobStorageBucketPlan; } export interface AzureOpenAICredential { provider: "azure-openai"; region: | "australia" | "canadaeast" | "canadacentral" | "eastus2" | "eastus" | "france" | "india" | "japaneast" | "japanwest" | "uaenorth" | "northcentralus" | "norway" | "southcentralus" | "swedencentral" | "switzerland" | "uk" | "westus" | "westus3"; /** @example ["gpt-4-0125-preview","gpt-4-0613"] */ models: | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gpt-4.1-2025-04-14" | "gpt-4.1-mini-2025-04-14" | "gpt-4.1-nano-2025-04-14" | "gpt-4o-2024-11-20" | "gpt-4o-2024-08-06" | "gpt-4o-2024-05-13" | "gpt-4o-mini-2024-07-18" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" | "gpt-4-1106-preview" | "gpt-4-0613" | "gpt-35-turbo-0125" | "gpt-35-turbo-1106"; /** * This is not returned in the API. * @maxLength 10000 */ openAIKey: string; /** This is not returned in the API. */ ocpApimSubscriptionKey?: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** @maxLength 10000 */ openAIEndpoint: string; } export interface ByoSipTrunkCredential { /** This can be used to bring your own SIP trunks or to connect to a Carrier. */ provider?: "byo-sip-trunk"; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** This is the list of SIP trunk's gateways. */ gateways: SipTrunkGateway[]; /** This can be used to configure the outbound authentication if required by the SIP trunk. */ outboundAuthenticationPlan?: SipTrunkOutboundAuthenticationPlan; /** * This ensures the outbound origination attempts have a leading plus. Defaults to false to match conventional telecom behavior. * * Usage: * - Vonage/Twilio requires leading plus for all outbound calls. Set this to true. * * @default false */ outboundLeadingPlusEnabled?: boolean; /** * This can be used to configure the tech prefix on outbound calls. This is an advanced property. * @maxLength 10000 */ techPrefix?: string; /** * This can be used to enable the SIP diversion header for authenticating the calling number if the SIP trunk supports it. This is an advanced property. * @maxLength 10000 */ sipDiversionHeader?: string; /** This is an advanced configuration for enterprise deployments. This uses the onprem SBC to trunk into the SIP trunk's `gateways`, rather than the managed SBC provided by Vapi. */ sbcConfiguration?: SbcConfiguration; } export interface CartesiaCredential { provider: "cartesia"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CerebrasCredential { provider: "cerebras"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CloudflareCredential { /** Credential provider. Only allowed value is cloudflare */ provider: "cloudflare"; /** Cloudflare Account Id. */ accountId?: string; /** Cloudflare API Key / Token. */ apiKey?: string; /** Cloudflare Account Email. */ accountEmail?: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** This is the bucket plan that can be provided to store call artifacts in R2 */ bucketPlan?: CloudflareR2BucketPlan; } export interface Oauth2AuthenticationSession { /** This is the OAuth2 access token. */ accessToken?: string; /** * This is the OAuth2 access token expiration. * @format date-time */ expiresAt?: string; /** This is the OAuth2 refresh token. */ refreshToken?: string; } export interface CustomLLMCredential { provider: "custom-llm"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the authentication plan. Currently supports OAuth2 RFC 6749. To use Bearer authentication, use apiKey */ authenticationPlan?: OAuth2AuthenticationPlan; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** This is the authentication session for the credential. Available for credentials that have an authentication plan. */ authenticationSession?: Oauth2AuthenticationSession; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface DeepgramCredential { provider: "deepgram"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** This can be used to point to an onprem Deepgram instance. Defaults to api.deepgram.com. */ apiUrl?: string; } export interface DeepInfraCredential { provider: "deepinfra"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface DeepSeekCredential { provider: "deep-seek"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface ElevenLabsCredential { provider: "11labs"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GcpCredential { provider: "gcp"; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the GCP key. This is the JSON that can be generated in the Google Cloud Console at https://console.cloud.google.com/iam-admin/serviceaccounts/details//keys. * * The schema is identical to the JSON that GCP outputs. */ gcpKey: GcpKey; /** * This is the region of the GCP resource. * @maxLength 40 */ region?: string; bucketPlan?: BucketPlan; } export interface GladiaCredential { provider: "gladia"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GoHighLevelCredential { provider: "gohighlevel"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GoogleCredential { /** This is the key for Gemini in Google AI Studio. Get it from here: https://aistudio.google.com/app/apikey */ provider: "google"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GroqCredential { provider: "groq"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface HumeCredential { provider: "hume"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface InflectionAICredential { /** This is the api key for Pi in InflectionAI's console. Get it from here: https://developers.inflection.ai/keys, billing will need to be setup */ provider: "inflection-ai"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface LangfuseCredential { provider: "langfuse"; /** The public key for Langfuse project. Eg: pk-lf-... */ publicKey: string; /** The secret key for Langfuse project. Eg: sk-lf-... .This is not returned in the API. */ apiKey: string; /** The host URL for Langfuse project. Eg: https://cloud.langfuse.com */ apiUrl: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface LmntCredential { provider: "lmnt"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface MakeCredential { provider: "make"; /** Team ID */ teamId: string; /** Region of your application. For example: eu1, eu2, us1, us2 */ region: string; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface MistralCredential { provider: "mistral"; /** * This is not returned in the API. * @maxLength 100 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface NeuphonicCredential { provider: "neuphonic"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface OpenAICredential { provider: "openai"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface OpenRouterCredential { provider: "openrouter"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface PerplexityAICredential { provider: "perplexity-ai"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface PlayHTCredential { provider: "playht"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; userId: string; } export interface RimeAICredential { provider: "rime-ai"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface RunpodCredential { provider: "runpod"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface S3Credential { /** Credential provider. Only allowed value is s3 */ provider: "s3"; /** AWS access key ID. */ awsAccessKeyId: string; /** AWS access key secret. This is not returned in the API. */ awsSecretAccessKey: string; /** AWS region in which the S3 bucket is located. */ region: string; /** AWS S3 bucket name. */ s3BucketName: string; /** The path prefix for the uploaded recording. Ex. "recordings/" */ s3PathPrefix: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface SmallestAICredential { provider: "smallest-ai"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface SpeechmaticsCredential { provider: "speechmatics"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface SupabaseCredential { /** This is for supabase storage. */ provider: "supabase"; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; bucketPlan?: SupabaseBucketPlan; } export interface TavusCredential { provider: "tavus"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface TogetherAICredential { provider: "together-ai"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface TrieveCredential { provider: "trieve"; /** This is not returned in the API. */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface TwilioCredential { provider: "twilio"; /** This is not returned in the API. */ authToken?: string; /** This is not returned in the API. */ apiKey?: string; /** This is not returned in the API. */ apiSecret?: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; accountSid: string; } export interface VonageCredential { /** * This is not returned in the API. * @maxLength 10000 */ vonageApplicationPrivateKey: string; provider: "vonage"; /** This is not returned in the API. */ apiSecret: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the Vonage Application ID for the credential. * * Only relevant for Vonage credentials. * @maxLength 10000 */ vonageApplicationId: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; apiKey: string; } export interface WebhookCredential { provider: "webhook"; /** This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication. */ authenticationPlan: | ({ type: "oauth2"; } & OAuth2AuthenticationPlan) | ({ type: "hmac"; } & HMACAuthenticationPlan) | ({ type: "bearer"; } & BearerAuthenticationPlan); /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** This is the authentication session for the credential. Available for credentials that have an authentication plan. */ authenticationSession: Oauth2AuthenticationSession; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CustomCredential { provider: "custom-credential"; /** This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication. */ authenticationPlan: | ({ type: "oauth2"; } & OAuth2AuthenticationPlan) | ({ type: "hmac"; } & HMACAuthenticationPlan) | ({ type: "bearer"; } & BearerAuthenticationPlan); /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** This is the authentication session for the credential. Available for credentials that have an authentication plan. */ authenticationSession: Oauth2AuthenticationSession; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface XAiCredential { /** This is the api key for Grok in XAi's console. Get it from here: https://console.x.ai */ provider: "xai"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GoogleCalendarOAuth2ClientCredential { provider: "google.calendar.oauth2-client"; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GoogleCalendarOAuth2AuthorizationCredential { provider: "google.calendar.oauth2-authorization"; /** The authorization ID for the OAuth2 authorization */ authorizationId: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GoogleSheetsOAuth2AuthorizationCredential { provider: "google.sheets.oauth2-authorization"; /** The authorization ID for the OAuth2 authorization */ authorizationId: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface SlackOAuth2AuthorizationCredential { provider: "slack.oauth2-authorization"; /** The authorization ID for the OAuth2 authorization */ authorizationId: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface GoHighLevelMCPCredential { provider: "ghl.oauth2-authorization"; /** This is the authentication session for the credential. */ authenticationSession: Oauth2AuthenticationSession; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface InworldCredential { provider: "inworld"; /** * This is the Inworld Basic (Base64) authentication token. This is not returned in the API. * @example "your-base64-token-here" */ apiKey: string; /** This is the unique identifier for the credential. */ id: string; /** This is the unique identifier for the org that this credential belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the credential was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the assistant was last updated. * @format date-time */ updatedAt: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateCerebrasCredentialDTO { provider: "cerebras"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateGoogleCredentialDTO { /** This is the key for Gemini in Google AI Studio. Get it from here: https://aistudio.google.com/app/apikey */ provider: "google"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateHumeCredentialDTO { provider: "hume"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateInflectionAICredentialDTO { /** This is the api key for Pi in InflectionAI's console. Get it from here: https://developers.inflection.ai/keys, billing will need to be setup */ provider: "inflection-ai"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateMistralCredentialDTO { provider: "mistral"; /** * This is not returned in the API. * @maxLength 100 */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateNeuphonicCredentialDTO { provider: "neuphonic"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateSpeechmaticsCredentialDTO { provider: "speechmatics"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateTrieveCredentialDTO { provider: "trieve"; /** This is not returned in the API. */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateCustomCredentialDTO { provider: "custom-credential"; /** This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication. */ authenticationPlan: | ({ type: "oauth2"; } & OAuth2AuthenticationPlan) | ({ type: "hmac"; } & HMACAuthenticationPlan) | ({ type: "bearer"; } & BearerAuthenticationPlan); /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateGoHighLevelMCPCredentialDTO { provider: "ghl.oauth2-authorization"; /** This is the authentication session for the credential. */ authenticationSession: Oauth2AuthenticationSession; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CreateInworldCredentialDTO { provider: "inworld"; /** * This is the Inworld Basic (Base64) authentication token. This is not returned in the API. * @example "your-base64-token-here" */ apiKey: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateAnthropicCredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateAnyscaleCredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateAssemblyAICredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateAzureCredentialDTO { /** * This is the service being used in Azure. * @default "speech" */ service?: "speech" | "blob_storage"; /** This is the region of the Azure resource. */ region?: | "australia" | "canadaeast" | "canadacentral" | "eastus2" | "eastus" | "france" | "india" | "japaneast" | "japanwest" | "uaenorth" | "northcentralus" | "norway" | "southcentralus" | "swedencentral" | "switzerland" | "uk" | "westus" | "westus3"; /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** This is the bucket plan that can be provided to store call artifacts in Azure Blob Storage. */ bucketPlan?: AzureBlobStorageBucketPlan; } export interface UpdateAzureOpenAICredentialDTO { region?: | "australia" | "canadaeast" | "canadacentral" | "eastus2" | "eastus" | "france" | "india" | "japaneast" | "japanwest" | "uaenorth" | "northcentralus" | "norway" | "southcentralus" | "swedencentral" | "switzerland" | "uk" | "westus" | "westus3"; /** @example ["gpt-4-0125-preview","gpt-4-0613"] */ models?: | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gpt-4.1-2025-04-14" | "gpt-4.1-mini-2025-04-14" | "gpt-4.1-nano-2025-04-14" | "gpt-4o-2024-11-20" | "gpt-4o-2024-08-06" | "gpt-4o-2024-05-13" | "gpt-4o-mini-2024-07-18" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" | "gpt-4-1106-preview" | "gpt-4-0613" | "gpt-35-turbo-0125" | "gpt-35-turbo-1106"; /** * This is not returned in the API. * @maxLength 10000 */ openAIKey?: string; /** This is not returned in the API. */ ocpApimSubscriptionKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** @maxLength 10000 */ openAIEndpoint?: string; } export interface UpdateByoSipTrunkCredentialDTO { /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** This is the list of SIP trunk's gateways. */ gateways?: SipTrunkGateway[]; /** This can be used to configure the outbound authentication if required by the SIP trunk. */ outboundAuthenticationPlan?: SipTrunkOutboundAuthenticationPlan; /** * This ensures the outbound origination attempts have a leading plus. Defaults to false to match conventional telecom behavior. * * Usage: * - Vonage/Twilio requires leading plus for all outbound calls. Set this to true. * * @default false */ outboundLeadingPlusEnabled?: boolean; /** * This can be used to configure the tech prefix on outbound calls. This is an advanced property. * @maxLength 10000 */ techPrefix?: string; /** * This can be used to enable the SIP diversion header for authenticating the calling number if the SIP trunk supports it. This is an advanced property. * @maxLength 10000 */ sipDiversionHeader?: string; /** This is an advanced configuration for enterprise deployments. This uses the onprem SBC to trunk into the SIP trunk's `gateways`, rather than the managed SBC provided by Vapi. */ sbcConfiguration?: SbcConfiguration; } export interface UpdateCartesiaCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateCerebrasCredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateCloudflareCredentialDTO { /** Cloudflare Account Id. */ accountId?: string; /** Cloudflare API Key / Token. */ apiKey?: string; /** Cloudflare Account Email. */ accountEmail?: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** This is the bucket plan that can be provided to store call artifacts in R2 */ bucketPlan?: CloudflareR2BucketPlan; } export interface UpdateCustomLLMCredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** This is the authentication plan. Currently supports OAuth2 RFC 6749. To use Bearer authentication, use apiKey */ authenticationPlan?: OAuth2AuthenticationPlan; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateDeepgramCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** This can be used to point to an onprem Deepgram instance. Defaults to api.deepgram.com. */ apiUrl?: string; } export interface UpdateDeepInfraCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateDeepSeekCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateElevenLabsCredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateGcpCredentialDTO { /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; /** * This is the GCP key. This is the JSON that can be generated in the Google Cloud Console at https://console.cloud.google.com/iam-admin/serviceaccounts/details//keys. * * The schema is identical to the JSON that GCP outputs. */ gcpKey?: GcpKey; /** * This is the region of the GCP resource. * @maxLength 40 */ region?: string; bucketPlan?: BucketPlan; } export interface UpdateGladiaCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateGoHighLevelCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateGoogleCredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateGroqCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateHumeCredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateInflectionAICredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateLangfuseCredentialDTO { /** The public key for Langfuse project. Eg: pk-lf-... */ publicKey?: string; /** The secret key for Langfuse project. Eg: sk-lf-... .This is not returned in the API. */ apiKey?: string; /** The host URL for Langfuse project. Eg: https://cloud.langfuse.com */ apiUrl?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateLmntCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateMakeCredentialDTO { /** Team ID */ teamId?: string; /** Region of your application. For example: eu1, eu2, us1, us2 */ region?: string; /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateMistralCredentialDTO { /** * This is not returned in the API. * @maxLength 100 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateNeuphonicCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateOpenAICredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateOpenRouterCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdatePerplexityAICredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdatePlayHTCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; userId?: string; } export interface UpdateRimeAICredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateRunpodCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateS3CredentialDTO { /** AWS access key ID. */ awsAccessKeyId?: string; /** AWS access key secret. This is not returned in the API. */ awsSecretAccessKey?: string; /** AWS region in which the S3 bucket is located. */ region?: string; /** AWS S3 bucket name. */ s3BucketName?: string; /** The path prefix for the uploaded recording. Ex. "recordings/" */ s3PathPrefix?: string; /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateSmallestAICredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateSpeechmaticsCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateSupabaseCredentialDTO { /** * This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order. * @min 1 */ fallbackIndex?: number; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; bucketPlan?: SupabaseBucketPlan; } export interface UpdateTavusCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateTogetherAICredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateTrieveCredentialDTO { /** This is not returned in the API. */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateTwilioCredentialDTO { /** This is not returned in the API. */ authToken?: string; /** This is not returned in the API. */ apiKey?: string; /** This is not returned in the API. */ apiSecret?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; accountSid?: string; } export interface UpdateVonageCredentialDTO { /** This is not returned in the API. */ apiSecret?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; apiKey?: string; } export interface UpdateWebhookCredentialDTO { /** This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication. */ authenticationPlan?: | ({ type: "oauth2"; } & OAuth2AuthenticationPlan) | ({ type: "hmac"; } & HMACAuthenticationPlan) | ({ type: "bearer"; } & BearerAuthenticationPlan); /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateCustomCredentialDTO { /** This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication. */ authenticationPlan?: | ({ type: "oauth2"; } & OAuth2AuthenticationPlan) | ({ type: "hmac"; } & HMACAuthenticationPlan) | ({ type: "bearer"; } & BearerAuthenticationPlan); /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateXAiCredentialDTO { /** * This is not returned in the API. * @maxLength 10000 */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateGoogleCalendarOAuth2ClientCredentialDTO { /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateGoogleCalendarOAuth2AuthorizationCredentialDTO { /** The authorization ID for the OAuth2 authorization */ authorizationId?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateGoogleSheetsOAuth2AuthorizationCredentialDTO { /** The authorization ID for the OAuth2 authorization */ authorizationId?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateSlackOAuth2AuthorizationCredentialDTO { /** The authorization ID for the OAuth2 authorization */ authorizationId?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateGoHighLevelMCPCredentialDTO { /** This is the authentication session for the credential. */ authenticationSession?: Oauth2AuthenticationSession; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface UpdateInworldCredentialDTO { /** * This is the Inworld Basic (Base64) authentication token. This is not returned in the API. * @example "your-base64-token-here" */ apiKey?: string; /** * This is the name of credential. This is just for your reference. * @minLength 1 * @maxLength 40 */ name?: string; } export interface CredentialSessionResponse { sessionToken: string; } export interface CredentialEndUser { endUserId: string; organizationId: string; tags?: object; } export interface CredentialSessionError { type: string; description: string; } export interface CredentialWebhookDTO { type: "auth" | "sync" | "forward"; operation: "creation" | "override" | "refresh"; from: string; connectionId: string; authMode: "OAUTH2" | "API_KEY" | "BASIC"; providerConfigKey: string; provider: string; environment: string; success: boolean; endUser: CredentialEndUser; error?: CredentialSessionError; } export interface CredentialActionRequest { action_name: string; input: object; } export interface HMACAuthenticationPlan { type: "hmac"; /** This is the HMAC secret key used to sign requests. */ secretKey: string; /** This is the HMAC algorithm to use for signing. */ algorithm: "sha256" | "sha512" | "sha1"; /** This is the header name where the signature will be sent. Defaults to 'x-signature'. */ signatureHeader?: string; /** This is the header name where the timestamp will be sent. Defaults to 'x-timestamp'. */ timestampHeader?: string; /** This is the prefix for the signature. For example, 'sha256=' for GitHub-style signatures. */ signaturePrefix?: string; /** Whether to include a timestamp in the signature payload. Defaults to true. */ includeTimestamp?: boolean; /** Custom payload format. Use {body} for request body, {timestamp} for timestamp, {method} for HTTP method, {url} for URL, {svix-id} for unique message ID. Defaults to '{timestamp}.{body}'. */ payloadFormat?: string; /** This is the header name where the unique message ID will be sent. Used for Svix-style webhooks. */ messageIdHeader?: string; /** The encoding format for the signature. Defaults to 'hex'. */ signatureEncoding?: "hex" | "base64"; /** Whether the secret key is base64-encoded and should be decoded before use. Defaults to false. */ secretIsBase64?: boolean; } export interface BearerAuthenticationPlan { type: "bearer"; /** This is the bearer token value. */ token: string; /** This is the header name where the bearer token will be sent. Defaults to 'Authorization'. */ headerName?: string; /** Whether to include the 'Bearer ' prefix in the header value. Defaults to true. */ bearerPrefixEnabled?: boolean; } export interface ToolTemplateSetup { title: string; description?: string; videoUrl?: string; docsUrl?: string; } export interface MakeToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "make" for Make tool. */ type: "make"; scenarioId?: number; scenarioName?: string; triggerHookId?: number; triggerHookName?: string; } export interface GhlToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "ghl" for GHL tool. */ type: "ghl"; workflowId?: string; workflowName?: string; webhookHookId?: string; webhookHookName?: string; locationId?: string; } export interface FunctionToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "function" for Function tool. */ type: "function"; } export interface GoogleCalendarCreateEventToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "google.calendar.event.create" for Google Calendar tool. */ type: "google.calendar.event.create"; } export interface GoogleSheetsRowAppendToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "google.sheets.row.append" for Google Sheets tool. */ type: "google.sheets.row.append"; } export interface GoHighLevelCalendarAvailabilityToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "gohighlevel.calendar.availability.check" for GoHighLevel Calendar availability check tool. */ type: "gohighlevel.calendar.availability.check"; } export interface GoHighLevelCalendarEventCreateToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "gohighlevel.calendar.event.create" for GoHighLevel Calendar event create tool. */ type: "gohighlevel.calendar.event.create"; } export interface GoHighLevelContactCreateToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "gohighlevel.contact.create" for GoHighLevel contact create tool. */ type: "gohighlevel.contact.create"; } export interface GoHighLevelContactGetToolProviderDetails { /** This is the Template URL or the Snapshot URL corresponding to the Template. */ templateUrl?: string; setupInstructions?: ToolTemplateSetup[]; /** The type of tool. "gohighlevel.contact.get" for GoHighLevel contact get tool. */ type: "gohighlevel.contact.get"; } export interface ToolTemplateMetadata { collectionType?: string; collectionId?: string; collectionName?: string; } export interface CreateToolTemplateDTO { details?: | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO; providerDetails?: | MakeToolProviderDetails | GhlToolProviderDetails | FunctionToolProviderDetails | GoogleCalendarCreateEventToolProviderDetails | GoogleSheetsRowAppendToolProviderDetails | GoHighLevelCalendarAvailabilityToolProviderDetails | GoHighLevelCalendarEventCreateToolProviderDetails | GoHighLevelContactCreateToolProviderDetails | GoHighLevelContactGetToolProviderDetails; metadata?: ToolTemplateMetadata; /** @default "private" */ visibility?: "public" | "private"; /** @default "tool" */ type: "tool"; /** * The name of the template. This is just for your own reference. * @maxLength 40 */ name?: string; provider?: "make" | "gohighlevel" | "function"; } export interface Template { details?: | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO; providerDetails?: | MakeToolProviderDetails | GhlToolProviderDetails | FunctionToolProviderDetails | GoogleCalendarCreateEventToolProviderDetails | GoogleSheetsRowAppendToolProviderDetails | GoHighLevelCalendarAvailabilityToolProviderDetails | GoHighLevelCalendarEventCreateToolProviderDetails | GoHighLevelContactCreateToolProviderDetails | GoHighLevelContactGetToolProviderDetails; metadata?: ToolTemplateMetadata; /** @default "private" */ visibility?: "public" | "private"; /** @default "tool" */ type: "tool"; /** * The name of the template. This is just for your own reference. * @maxLength 40 */ name?: string; provider?: "make" | "gohighlevel" | "function"; /** The unique identifier for the template. */ id: string; /** The unique identifier for the organization that this template belongs to. */ orgId: string; /** * The ISO 8601 date-time string of when the template was created. * @format date-time */ createdAt: string; /** * The ISO 8601 date-time string of when the template was last updated. * @format date-time */ updatedAt: string; } export interface UpdateToolTemplateDTO { details?: | CreateApiRequestToolDTO | CreateBashToolDTO | CreateComputerToolDTO | CreateDtmfToolDTO | CreateEndCallToolDTO | CreateFunctionToolDTO | CreateGoHighLevelCalendarAvailabilityToolDTO | CreateGoHighLevelCalendarEventCreateToolDTO | CreateGoHighLevelContactCreateToolDTO | CreateGoHighLevelContactGetToolDTO | CreateGoogleCalendarCheckAvailabilityToolDTO | CreateGoogleCalendarCreateEventToolDTO | CreateGoogleSheetsRowAppendToolDTO | CreateHandoffToolDTO | CreateMcpToolDTO | CreateQueryToolDTO | CreateSlackSendMessageToolDTO | CreateSmsToolDTO | CreateTextEditorToolDTO | CreateTransferCallToolDTO; providerDetails?: | MakeToolProviderDetails | GhlToolProviderDetails | FunctionToolProviderDetails | GoogleCalendarCreateEventToolProviderDetails | GoogleSheetsRowAppendToolProviderDetails | GoHighLevelCalendarAvailabilityToolProviderDetails | GoHighLevelCalendarEventCreateToolProviderDetails | GoHighLevelContactCreateToolProviderDetails | GoHighLevelContactGetToolProviderDetails; metadata?: ToolTemplateMetadata; /** @default "private" */ visibility?: "public" | "private"; /** @default "tool" */ type: "tool"; /** * The name of the template. This is just for your own reference. * @maxLength 40 */ name?: string; provider?: "make" | "gohighlevel" | "function"; } export interface VoiceLibrary { /** This is the voice provider that will be used. */ provider?: | "vapi" | "11labs" | "azure" | "cartesia" | "custom-voice" | "deepgram" | "hume" | "lmnt" | "neuphonic" | "openai" | "playht" | "rime-ai" | "smallest-ai" | "tavus" | "sesame" | "inworld" | "minimax" | "orpheus"; /** The ID of the voice provided by the provider. */ providerId?: string; /** The unique slug of the voice. */ slug?: string; /** The name of the voice. */ name?: string; /** The language of the voice. */ language?: string; /** The language code of the voice. */ languageCode?: string; /** The model of the voice. */ model?: string; /** The supported models of the voice. */ supportedModels?: string; /** The gender of the voice. */ gender?: "male" | "female"; /** The accent of the voice. */ accent?: string; /** The preview URL of the voice. */ previewUrl?: string; /** The description of the voice. */ description?: string; /** The credential ID of the voice. */ credentialId?: string; /** The unique identifier for the voice library. */ id: string; /** The unique identifier for the organization that this voice library belongs to. */ orgId: string; /** The Public voice is shared accross all the organizations. */ isPublic: boolean; /** The deletion status of the voice. */ isDeleted: boolean; /** * The ISO 8601 date-time string of when the voice library was created. * @format date-time */ createdAt: string; /** * The ISO 8601 date-time string of when the voice library was last updated. * @format date-time */ updatedAt: string; } export interface SyncVoiceLibraryDTO { /** List of providers you want to sync. */ providers?: | "vapi" | "11labs" | "azure" | "cartesia" | "custom-voice" | "deepgram" | "hume" | "lmnt" | "neuphonic" | "openai" | "playht" | "rime-ai" | "smallest-ai" | "tavus" | "sesame" | "inworld" | "minimax" | "orpheus"; } export interface CreateSesameVoiceDTO { /** The name of the voice. */ voiceName?: string; /** The transcript of the utterance. */ transcription?: string; } export interface ElevenLabsPronunciationDictionary { /** * The ID of the pronunciation dictionary * @example "5xM3yVvZQKV0EfqQpLrJ" */ pronunciationDictionaryId: string; /** * The name of the pronunciation dictionary * @example "My Dictionary" */ dictionaryName: string; /** * The user ID of the creator * @example "ar6633Es2kUjFXBdR1iVc9ztsXl1" */ createdBy: string; /** * The creation time in Unix timestamp * @example 1714156800 */ creationTimeUnix: number; /** * The version ID of the pronunciation dictionary * @example "5xM3yVvZQKV0EfqQpLrJ" */ versionId: string; /** * The number of rules in this version * @example 5 */ versionRulesNum: number; /** The permission level on this resource */ permissionOnResource?: "admin" | "editor" | "viewer"; /** * The description of the pronunciation dictionary * @example "This is a test dictionary" */ description?: string; } export interface ProviderResource { /** This is the unique identifier for the provider resource. */ id: string; /** This is the unique identifier for the org that this provider resource belongs to. */ orgId: string; /** * This is the ISO 8601 date-time string of when the provider resource was created. * @format date-time */ createdAt: string; /** * This is the ISO 8601 date-time string of when the provider resource was last updated. * @format date-time */ updatedAt: string; /** This is the provider that manages this resource. */ provider: "11labs"; /** This is the name/type of the resource. */ resourceName: "pronunciation-dictionary"; /** This is the provider-specific identifier for the resource. */ resourceId: string; /** This is the full resource data from the provider's API. */ resource: ElevenLabsPronunciationDictionary; } export interface ProviderResourcePaginatedResponse { results: ProviderResource[]; metadata: PaginationMeta; } export interface VoiceLibraryVoiceResponse { voiceId: string; name: string; publicOwnerId?: string; description?: string; gender?: string; age?: object; accent?: string; } export interface AddVoiceToProviderDTO { /** This is the owner_id of your shared voice which you want to add to your provider Account from Provider Voice Library */ ownerId: string; /** This is the voice_id of the shared voice which you want to add to your provider Account from Provider Voice Library */ voiceId: string; /** This is the new name of the voice which you want to have once you have added voice to your provider Account from Provider Voice Library */ name: string; } export interface CloneVoiceDTO { /** This is the name of the cloned voice in the provider account. */ name: string; /** This is the description of your cloned voice. */ description?: string; /** Serialized labels dictionary for the voice. */ labels?: string; /** These are the files you want to use to clone your voice. Only Audio files are supported. */ files: File[]; } export interface VariableValueGroupBy { /** * This is the key of the variable value to group by. * @maxLength 100 */ key: string; } export interface TimeRange { /** * This is the time step for aggregations. * * If not provided, defaults to returning for the entire time range. */ step?: | "second" | "minute" | "hour" | "day" | "week" | "month" | "quarter" | "year" | "decade" | "century" | "millennium"; /** * This is the start date for the time range. * * If not provided, defaults to the 7 days ago. * @format date-time */ start?: string; /** * This is the end date for the time range. * * If not provided, defaults to now. * @format date-time */ end?: string; /** * This is the timezone you want to set for the query. * * If not provided, defaults to UTC. */ timezone?: string; } export interface AnalyticsOperation { /** This is the aggregation operation you want to perform. */ operation: "sum" | "avg" | "count" | "min" | "max" | "history"; /** This is the columns you want to perform the aggregation operation on. */ column: | "id" | "cost" | "costBreakdown.llm" | "costBreakdown.stt" | "costBreakdown.tts" | "costBreakdown.vapi" | "costBreakdown.transport" | "costBreakdown.analysisBreakdown.summary" | "costBreakdown.transcriber" | "costBreakdown.ttsCharacters" | "costBreakdown.llmPromptTokens" | "costBreakdown.llmCompletionTokens" | "duration" | "concurrency" | "minutesUsed"; /** * This is the alias for column name returned. Defaults to `${operation}${column}`. * @maxLength 40 */ alias?: string; } export interface AnalyticsQuery { /** This is the table you want to query. */ table: "call" | "subscription"; /** This is the list of columns you want to group by. */ groupBy?: | "type" | "assistantId" | "endedReason" | "analysis.successEvaluation" | "status"; /** This is the list of variable value keys you want to group by. */ groupByVariableValue?: VariableValueGroupBy[]; /** * This is the name of the query. This will be used to identify the query in the response. * @maxLength 40 */ name: string; /** This is the time range for the query. */ timeRange?: TimeRange; /** This is the list of operations you want to perform. */ operations: AnalyticsOperation[]; } export interface AnalyticsQueryDTO { /** This is the list of metric queries you want to perform. */ queries: AnalyticsQuery[]; } export interface AnalyticsQueryResult { /** This is the unique key for the query. */ name: string; /** This is the time range for the query. */ timeRange: TimeRange; /** * This is the result of the query, a list of unique groups with result of their aggregations. * * Example: * "result": [ * { "date": "2023-01-01", "assistantId": "123", "endedReason": "customer-ended-call", "sumDuration": 120, "avgCost": 10.5 }, * { "date": "2023-01-02", "assistantId": "123", "endedReason": "customer-did-not-give-microphone-permission", "sumDuration": 0, "avgCost": 0 }, * // Additional results * ] */ result: object[]; } export interface ClientMessageWorkflowNodeStarted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "workflow.node.started" is sent when the active node changes. */ type: "workflow.node.started"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the active node. */ node: object; } export interface ClientMessageAssistantStarted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "assistant.started" is sent when the assistant is started. */ type: "assistant.started"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the assistant that was updated. */ newAssistant: CreateAssistantDTO; } export interface ClientMessageConversationUpdate { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "conversation-update" is sent when an update is committed to the conversation history. */ type: "conversation-update"; /** This is the most up-to-date conversation history at the time the message is sent. */ messages?: ( | UserMessage | SystemMessage | BotMessage | ToolCallMessage | ToolCallResultMessage )[]; /** This is the most up-to-date conversation history at the time the message is sent, formatted for OpenAI. */ messagesOpenAIFormatted: OpenAIMessage[]; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; } export interface ClientMessageHang { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** * This is the type of the message. "hang" is sent when the assistant is hanging due to a delay. The delay can be caused by many factors, such as: * - the model is too slow to respond * - the voice is too slow to respond * - the tool call is still waiting for a response from your server * - etc. */ type: "hang"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; } export interface ClientMessageMetadata { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "metadata" is sent to forward metadata to the client. */ type: "metadata"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the metadata content */ metadata: string; } export interface ClientMessageModelOutput { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "model-output" is sent as the model outputs tokens. */ type: "model-output"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the output of the model. It can be a token or tool call. */ output: object; } export interface ClientMessageSpeechUpdate { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "speech-update" is sent whenever assistant or user start or stop speaking. */ type: "speech-update"; /** This is the status of the speech update. */ status: "started" | "stopped"; /** This is the role which the speech update is for. */ role: "assistant" | "user"; /** This is the turn number of the speech update (0-indexed). */ turn?: number; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; } export interface ClientMessageTranscript { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "transcript" is sent as transcriber outputs partial or final transcript. */ type: "transcript" | "transcript[transcriptType='final']"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the role for which the transcript is for. */ role: "assistant" | "user"; /** This is the type of the transcript. */ transcriptType: "partial" | "final"; /** This is the transcript content. */ transcript: string; /** Indicates if the transcript was filtered for security reasons. */ isFiltered?: boolean; /** List of detected security threats if the transcript was filtered. */ detectedThreats?: string[]; /** The original transcript before filtering (only included if content was filtered). */ originalTranscript?: string; } export interface ClientMessageToolCalls { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "tool-calls" is sent to call a tool. */ type?: "tool-calls"; /** This is the list of tools calls that the model is requesting along with the original tool configuration. */ toolWithToolCallList: ( | FunctionToolWithToolCall | GhlToolWithToolCall | MakeToolWithToolCall | BashToolWithToolCall | ComputerToolWithToolCall | TextEditorToolWithToolCall | GoogleCalendarCreateEventToolWithToolCall )[]; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the list of tool calls that the model is requesting. */ toolCallList: ToolCall[]; } export interface ClientMessageToolCallsResult { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "tool-calls-result" is sent to forward the result of a tool call to the client. */ type: "tool-calls-result"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the result of the tool call. */ toolCallResult: object; } export interface ClientMessageTransferUpdate { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "transfer-update" is sent whenever a transfer happens. */ type: "transfer-update"; /** This is the destination of the transfer. */ destination?: | TransferDestinationAssistant | TransferDestinationNumber | TransferDestinationSip; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the assistant that the call is being transferred to. This is only sent if `destination.type` is "assistant". */ toAssistant?: CreateAssistantDTO; /** This is the assistant that the call is being transferred from. This is only sent if `destination.type` is "assistant". */ fromAssistant?: CreateAssistantDTO; /** This is the step that the conversation moved to. */ toStepRecord?: object; /** This is the step that the conversation moved from. = */ fromStepRecord?: object; } export interface ClientMessageUserInterrupted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "user-interrupted" is sent when the user interrupts the assistant. */ type: "user-interrupted"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; } export interface ClientMessageLanguageChangeDetected { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "language-change-detected" is sent when the transcriber is automatically switched based on the detected language. */ type: "language-change-detected"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the language the transcriber is switched to. */ language: string; } export interface ClientMessageVoiceInput { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "voice-input" is sent when a generation is requested from voice provider. */ type: "voice-input"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the voice input content */ input: string; } export interface ClientMessageChatCreated { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "chat.created" is sent when a new chat is created. */ type: "chat.created"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the chat that was created. */ chat: Chat; } export interface ClientMessageChatDeleted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "chat.deleted" is sent when a chat is deleted. */ type: "chat.deleted"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the chat that was deleted. */ chat: Chat; } export interface ClientMessageSessionCreated { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "session.created" is sent when a new session is created. */ type: "session.created"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the session that was created. */ session: Session; } export interface ClientMessageSessionUpdated { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "session.updated" is sent when a session is updated. */ type: "session.updated"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the session that was updated. */ session: Session; } export interface ClientMessageSessionDeleted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "session.deleted" is sent when a session is deleted. */ type: "session.deleted"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the session that was deleted. */ session: Session; } export interface ClientMessageCallDeleted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "call.deleted" is sent when a call is deleted. */ type: "call.deleted"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; } export interface ClientMessageCallDeleteFailed { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "call.deleted" is sent when a call is deleted. */ type: "call.delete.failed"; /** This is the timestamp of the message. */ timestamp?: number; /** This is the call that the message is associated with. */ call?: Call; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; } export interface ClientMessage { /** These are all the messages that can be sent to the client-side SDKs during the call. Configure the messages you'd like to receive in `assistant.clientMessages`. */ message: | ClientMessageWorkflowNodeStarted | ClientMessageAssistantStarted | ClientMessageConversationUpdate | ClientMessageHang | ClientMessageMetadata | ClientMessageModelOutput | ClientMessageSpeechUpdate | ClientMessageTranscript | ClientMessageToolCalls | ClientMessageToolCallsResult | ClientMessageTransferUpdate | ClientMessageUserInterrupted | ClientMessageLanguageChangeDetected | ClientMessageVoiceInput | ClientMessageChatCreated | ClientMessageChatDeleted | ClientMessageSessionCreated | ClientMessageSessionUpdated | ClientMessageSessionDeleted | ClientMessageCallDeleted | ClientMessageCallDeleteFailed; } export interface ServerMessageAssistantRequest { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "assistant-request" is sent to fetch assistant configuration for an incoming call. */ type: "assistant-request"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageConversationUpdate { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "conversation-update" is sent when an update is committed to the conversation history. */ type: "conversation-update"; /** This is the most up-to-date conversation history at the time the message is sent. */ messages?: ( | UserMessage | SystemMessage | BotMessage | ToolCallMessage | ToolCallResultMessage )[]; /** This is the most up-to-date conversation history at the time the message is sent, formatted for OpenAI. */ messagesOpenAIFormatted: OpenAIMessage[]; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageEndOfCallReport { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "end-of-call-report" is sent when the call ends and post-processing is complete. */ type: "end-of-call-report"; /** This is the reason the call ended. This can also be found at `call.endedReason` on GET /call/:id. */ endedReason: | "call-start-error-neither-assistant-nor-server-set" | "assistant-request-failed" | "assistant-request-returned-error" | "assistant-request-returned-unspeakable-error" | "assistant-request-returned-invalid-assistant" | "assistant-request-returned-no-assistant" | "assistant-request-returned-forwarding-phone-number" | "scheduled-call-deleted" | "call.start.error-vapifault-get-org" | "call.start.error-vapifault-get-subscription" | "call.start.error-get-assistant" | "call.start.error-get-phone-number" | "call.start.error-get-customer" | "call.start.error-get-resources-validation" | "call.start.error-vapi-number-international" | "call.start.error-vapi-number-outbound-daily-limit" | "call.start.error-get-transport" | "call.start.error-subscription-wallet-does-not-exist" | "call.start.error-fraud-check-failed" | "call.start.error-subscription-frozen" | "call.start.error-subscription-insufficient-credits" | "call.start.error-subscription-upgrade-failed" | "call.start.error-subscription-concurrency-limit-reached" | "call.start.error-enterprise-feature-not-available-recording-consent" | "assistant-not-valid" | "call.start.error-vapifault-database-error" | "assistant-not-found" | "pipeline-error-openai-voice-failed" | "pipeline-error-cartesia-voice-failed" | "pipeline-error-deepgram-voice-failed" | "pipeline-error-eleven-labs-voice-failed" | "pipeline-error-playht-voice-failed" | "pipeline-error-lmnt-voice-failed" | "pipeline-error-azure-voice-failed" | "pipeline-error-rime-ai-voice-failed" | "pipeline-error-smallest-ai-voice-failed" | "pipeline-error-vapi-voice-failed" | "pipeline-error-neuphonic-voice-failed" | "pipeline-error-hume-voice-failed" | "pipeline-error-sesame-voice-failed" | "pipeline-error-inworld-voice-failed" | "pipeline-error-minimax-voice-failed" | "pipeline-error-tavus-video-failed" | "call.in-progress.error-vapifault-openai-voice-failed" | "call.in-progress.error-vapifault-cartesia-voice-failed" | "call.in-progress.error-vapifault-deepgram-voice-failed" | "call.in-progress.error-vapifault-eleven-labs-voice-failed" | "call.in-progress.error-vapifault-playht-voice-failed" | "call.in-progress.error-vapifault-lmnt-voice-failed" | "call.in-progress.error-vapifault-azure-voice-failed" | "call.in-progress.error-vapifault-rime-ai-voice-failed" | "call.in-progress.error-vapifault-smallest-ai-voice-failed" | "call.in-progress.error-vapifault-vapi-voice-failed" | "call.in-progress.error-vapifault-neuphonic-voice-failed" | "call.in-progress.error-vapifault-hume-voice-failed" | "call.in-progress.error-vapifault-sesame-voice-failed" | "call.in-progress.error-vapifault-inworld-voice-failed" | "call.in-progress.error-vapifault-minimax-voice-failed" | "call.in-progress.error-vapifault-tavus-video-failed" | "pipeline-error-vapi-llm-failed" | "pipeline-error-vapi-400-bad-request-validation-failed" | "pipeline-error-vapi-401-unauthorized" | "pipeline-error-vapi-403-model-access-denied" | "pipeline-error-vapi-429-exceeded-quota" | "pipeline-error-vapi-500-server-error" | "pipeline-error-vapi-503-server-overloaded-error" | "call.in-progress.error-providerfault-vapi-llm-failed" | "call.in-progress.error-vapifault-vapi-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-vapi-401-unauthorized" | "call.in-progress.error-vapifault-vapi-403-model-access-denied" | "call.in-progress.error-vapifault-vapi-429-exceeded-quota" | "call.in-progress.error-providerfault-vapi-500-server-error" | "call.in-progress.error-providerfault-vapi-503-server-overloaded-error" | "pipeline-error-deepgram-transcriber-failed" | "pipeline-error-deepgram-transcriber-api-key-missing" | "call.in-progress.error-vapifault-deepgram-transcriber-failed" | "pipeline-error-gladia-transcriber-failed" | "call.in-progress.error-vapifault-gladia-transcriber-failed" | "pipeline-error-speechmatics-transcriber-failed" | "call.in-progress.error-vapifault-speechmatics-transcriber-failed" | "pipeline-error-assembly-ai-transcriber-failed" | "pipeline-error-assembly-ai-returning-400-insufficent-funds" | "pipeline-error-assembly-ai-returning-400-paid-only-feature" | "pipeline-error-assembly-ai-returning-401-invalid-credentials" | "pipeline-error-assembly-ai-returning-500-invalid-schema" | "pipeline-error-assembly-ai-returning-500-word-boost-parsing-failed" | "call.in-progress.error-vapifault-assembly-ai-transcriber-failed" | "call.in-progress.error-vapifault-assembly-ai-returning-400-insufficent-funds" | "call.in-progress.error-vapifault-assembly-ai-returning-400-paid-only-feature" | "call.in-progress.error-vapifault-assembly-ai-returning-401-invalid-credentials" | "call.in-progress.error-vapifault-assembly-ai-returning-500-invalid-schema" | "call.in-progress.error-vapifault-assembly-ai-returning-500-word-boost-parsing-failed" | "pipeline-error-talkscriber-transcriber-failed" | "call.in-progress.error-vapifault-talkscriber-transcriber-failed" | "pipeline-error-azure-speech-transcriber-failed" | "call.in-progress.error-vapifault-azure-speech-transcriber-failed" | "call.in-progress.error-pipeline-no-available-llm-model" | "worker-shutdown" | "vonage-disconnected" | "vonage-failed-to-connect-call" | "vonage-completed" | "phone-call-provider-bypass-enabled-but-no-call-received" | "call.in-progress.error-providerfault-transport-never-connected" | "call.in-progress.error-vapifault-worker-not-available" | "call.in-progress.error-vapifault-transport-never-connected" | "call.in-progress.error-vapifault-transport-connected-but-call-not-active" | "call.in-progress.error-vapifault-call-started-but-connection-to-transport-missing" | "call.in-progress.error-vapifault-worker-died" | "call.in-progress.twilio-completed-call" | "call.in-progress.sip-completed-call" | "call.in-progress.error-providerfault-openai-llm-failed" | "call.in-progress.error-providerfault-azure-openai-llm-failed" | "call.in-progress.error-providerfault-groq-llm-failed" | "call.in-progress.error-providerfault-google-llm-failed" | "call.in-progress.error-providerfault-xai-llm-failed" | "call.in-progress.error-providerfault-mistral-llm-failed" | "call.in-progress.error-providerfault-inflection-ai-llm-failed" | "call.in-progress.error-providerfault-cerebras-llm-failed" | "call.in-progress.error-providerfault-deep-seek-llm-failed" | "call.in-progress.error-vapifault-chat-pipeline-failed-to-start" | "pipeline-error-openai-400-bad-request-validation-failed" | "pipeline-error-openai-401-unauthorized" | "pipeline-error-openai-401-incorrect-api-key" | "pipeline-error-openai-401-account-not-in-organization" | "pipeline-error-openai-403-model-access-denied" | "pipeline-error-openai-429-exceeded-quota" | "pipeline-error-openai-429-rate-limit-reached" | "pipeline-error-openai-500-server-error" | "pipeline-error-openai-503-server-overloaded-error" | "pipeline-error-openai-llm-failed" | "call.in-progress.error-vapifault-openai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-openai-401-unauthorized" | "call.in-progress.error-vapifault-openai-401-incorrect-api-key" | "call.in-progress.error-vapifault-openai-401-account-not-in-organization" | "call.in-progress.error-vapifault-openai-403-model-access-denied" | "call.in-progress.error-vapifault-openai-429-exceeded-quota" | "call.in-progress.error-vapifault-openai-429-rate-limit-reached" | "call.in-progress.error-providerfault-openai-500-server-error" | "call.in-progress.error-providerfault-openai-503-server-overloaded-error" | "pipeline-error-azure-openai-400-bad-request-validation-failed" | "pipeline-error-azure-openai-401-unauthorized" | "pipeline-error-azure-openai-403-model-access-denied" | "pipeline-error-azure-openai-429-exceeded-quota" | "pipeline-error-azure-openai-500-server-error" | "pipeline-error-azure-openai-503-server-overloaded-error" | "pipeline-error-azure-openai-llm-failed" | "call.in-progress.error-vapifault-azure-openai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-azure-openai-401-unauthorized" | "call.in-progress.error-vapifault-azure-openai-403-model-access-denied" | "call.in-progress.error-vapifault-azure-openai-429-exceeded-quota" | "call.in-progress.error-providerfault-azure-openai-500-server-error" | "call.in-progress.error-providerfault-azure-openai-503-server-overloaded-error" | "pipeline-error-google-400-bad-request-validation-failed" | "pipeline-error-google-401-unauthorized" | "pipeline-error-google-403-model-access-denied" | "pipeline-error-google-429-exceeded-quota" | "pipeline-error-google-500-server-error" | "pipeline-error-google-503-server-overloaded-error" | "pipeline-error-google-llm-failed" | "call.in-progress.error-vapifault-google-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-google-401-unauthorized" | "call.in-progress.error-vapifault-google-403-model-access-denied" | "call.in-progress.error-vapifault-google-429-exceeded-quota" | "call.in-progress.error-providerfault-google-500-server-error" | "call.in-progress.error-providerfault-google-503-server-overloaded-error" | "pipeline-error-xai-400-bad-request-validation-failed" | "pipeline-error-xai-401-unauthorized" | "pipeline-error-xai-403-model-access-denied" | "pipeline-error-xai-429-exceeded-quota" | "pipeline-error-xai-500-server-error" | "pipeline-error-xai-503-server-overloaded-error" | "pipeline-error-xai-llm-failed" | "call.in-progress.error-vapifault-xai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-xai-401-unauthorized" | "call.in-progress.error-vapifault-xai-403-model-access-denied" | "call.in-progress.error-vapifault-xai-429-exceeded-quota" | "call.in-progress.error-providerfault-xai-500-server-error" | "call.in-progress.error-providerfault-xai-503-server-overloaded-error" | "pipeline-error-mistral-400-bad-request-validation-failed" | "pipeline-error-mistral-401-unauthorized" | "pipeline-error-mistral-403-model-access-denied" | "pipeline-error-mistral-429-exceeded-quota" | "pipeline-error-mistral-500-server-error" | "pipeline-error-mistral-503-server-overloaded-error" | "pipeline-error-mistral-llm-failed" | "call.in-progress.error-vapifault-mistral-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-mistral-401-unauthorized" | "call.in-progress.error-vapifault-mistral-403-model-access-denied" | "call.in-progress.error-vapifault-mistral-429-exceeded-quota" | "call.in-progress.error-providerfault-mistral-500-server-error" | "call.in-progress.error-providerfault-mistral-503-server-overloaded-error" | "pipeline-error-inflection-ai-400-bad-request-validation-failed" | "pipeline-error-inflection-ai-401-unauthorized" | "pipeline-error-inflection-ai-403-model-access-denied" | "pipeline-error-inflection-ai-429-exceeded-quota" | "pipeline-error-inflection-ai-500-server-error" | "pipeline-error-inflection-ai-503-server-overloaded-error" | "pipeline-error-inflection-ai-llm-failed" | "call.in-progress.error-vapifault-inflection-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-inflection-ai-401-unauthorized" | "call.in-progress.error-vapifault-inflection-ai-403-model-access-denied" | "call.in-progress.error-vapifault-inflection-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-inflection-ai-500-server-error" | "call.in-progress.error-providerfault-inflection-ai-503-server-overloaded-error" | "pipeline-error-deep-seek-400-bad-request-validation-failed" | "pipeline-error-deep-seek-401-unauthorized" | "pipeline-error-deep-seek-403-model-access-denied" | "pipeline-error-deep-seek-429-exceeded-quota" | "pipeline-error-deep-seek-500-server-error" | "pipeline-error-deep-seek-503-server-overloaded-error" | "pipeline-error-deep-seek-llm-failed" | "call.in-progress.error-vapifault-deep-seek-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-deep-seek-401-unauthorized" | "call.in-progress.error-vapifault-deep-seek-403-model-access-denied" | "call.in-progress.error-vapifault-deep-seek-429-exceeded-quota" | "call.in-progress.error-providerfault-deep-seek-500-server-error" | "call.in-progress.error-providerfault-deep-seek-503-server-overloaded-error" | "pipeline-error-groq-400-bad-request-validation-failed" | "pipeline-error-groq-401-unauthorized" | "pipeline-error-groq-403-model-access-denied" | "pipeline-error-groq-429-exceeded-quota" | "pipeline-error-groq-500-server-error" | "pipeline-error-groq-503-server-overloaded-error" | "pipeline-error-groq-llm-failed" | "call.in-progress.error-vapifault-groq-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-groq-401-unauthorized" | "call.in-progress.error-vapifault-groq-403-model-access-denied" | "call.in-progress.error-vapifault-groq-429-exceeded-quota" | "call.in-progress.error-providerfault-groq-500-server-error" | "call.in-progress.error-providerfault-groq-503-server-overloaded-error" | "pipeline-error-cerebras-400-bad-request-validation-failed" | "pipeline-error-cerebras-401-unauthorized" | "pipeline-error-cerebras-403-model-access-denied" | "pipeline-error-cerebras-429-exceeded-quota" | "pipeline-error-cerebras-500-server-error" | "pipeline-error-cerebras-503-server-overloaded-error" | "pipeline-error-cerebras-llm-failed" | "call.in-progress.error-vapifault-cerebras-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-cerebras-401-unauthorized" | "call.in-progress.error-vapifault-cerebras-403-model-access-denied" | "call.in-progress.error-vapifault-cerebras-429-exceeded-quota" | "call.in-progress.error-providerfault-cerebras-500-server-error" | "call.in-progress.error-providerfault-cerebras-503-server-overloaded-error" | "pipeline-error-anthropic-400-bad-request-validation-failed" | "pipeline-error-anthropic-401-unauthorized" | "pipeline-error-anthropic-403-model-access-denied" | "pipeline-error-anthropic-429-exceeded-quota" | "pipeline-error-anthropic-500-server-error" | "pipeline-error-anthropic-503-server-overloaded-error" | "pipeline-error-anthropic-llm-failed" | "call.in-progress.error-providerfault-anthropic-llm-failed" | "call.in-progress.error-vapifault-anthropic-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-500-server-error" | "call.in-progress.error-providerfault-anthropic-503-server-overloaded-error" | "pipeline-error-anthropic-bedrock-400-bad-request-validation-failed" | "pipeline-error-anthropic-bedrock-401-unauthorized" | "pipeline-error-anthropic-bedrock-403-model-access-denied" | "pipeline-error-anthropic-bedrock-429-exceeded-quota" | "pipeline-error-anthropic-bedrock-500-server-error" | "pipeline-error-anthropic-bedrock-503-server-overloaded-error" | "pipeline-error-anthropic-bedrock-llm-failed" | "call.in-progress.error-providerfault-anthropic-bedrock-llm-failed" | "call.in-progress.error-vapifault-anthropic-bedrock-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-bedrock-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-bedrock-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-bedrock-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-bedrock-500-server-error" | "call.in-progress.error-providerfault-anthropic-bedrock-503-server-overloaded-error" | "pipeline-error-anthropic-vertex-400-bad-request-validation-failed" | "pipeline-error-anthropic-vertex-401-unauthorized" | "pipeline-error-anthropic-vertex-403-model-access-denied" | "pipeline-error-anthropic-vertex-429-exceeded-quota" | "pipeline-error-anthropic-vertex-500-server-error" | "pipeline-error-anthropic-vertex-503-server-overloaded-error" | "pipeline-error-anthropic-vertex-llm-failed" | "call.in-progress.error-providerfault-anthropic-vertex-llm-failed" | "call.in-progress.error-vapifault-anthropic-vertex-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-vertex-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-vertex-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-vertex-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-vertex-500-server-error" | "call.in-progress.error-providerfault-anthropic-vertex-503-server-overloaded-error" | "pipeline-error-together-ai-400-bad-request-validation-failed" | "pipeline-error-together-ai-401-unauthorized" | "pipeline-error-together-ai-403-model-access-denied" | "pipeline-error-together-ai-429-exceeded-quota" | "pipeline-error-together-ai-500-server-error" | "pipeline-error-together-ai-503-server-overloaded-error" | "pipeline-error-together-ai-llm-failed" | "call.in-progress.error-providerfault-together-ai-llm-failed" | "call.in-progress.error-vapifault-together-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-together-ai-401-unauthorized" | "call.in-progress.error-vapifault-together-ai-403-model-access-denied" | "call.in-progress.error-vapifault-together-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-together-ai-500-server-error" | "call.in-progress.error-providerfault-together-ai-503-server-overloaded-error" | "pipeline-error-anyscale-400-bad-request-validation-failed" | "pipeline-error-anyscale-401-unauthorized" | "pipeline-error-anyscale-403-model-access-denied" | "pipeline-error-anyscale-429-exceeded-quota" | "pipeline-error-anyscale-500-server-error" | "pipeline-error-anyscale-503-server-overloaded-error" | "pipeline-error-anyscale-llm-failed" | "call.in-progress.error-providerfault-anyscale-llm-failed" | "call.in-progress.error-vapifault-anyscale-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anyscale-401-unauthorized" | "call.in-progress.error-vapifault-anyscale-403-model-access-denied" | "call.in-progress.error-vapifault-anyscale-429-exceeded-quota" | "call.in-progress.error-providerfault-anyscale-500-server-error" | "call.in-progress.error-providerfault-anyscale-503-server-overloaded-error" | "pipeline-error-openrouter-400-bad-request-validation-failed" | "pipeline-error-openrouter-401-unauthorized" | "pipeline-error-openrouter-403-model-access-denied" | "pipeline-error-openrouter-429-exceeded-quota" | "pipeline-error-openrouter-500-server-error" | "pipeline-error-openrouter-503-server-overloaded-error" | "pipeline-error-openrouter-llm-failed" | "call.in-progress.error-providerfault-openrouter-llm-failed" | "call.in-progress.error-vapifault-openrouter-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-openrouter-401-unauthorized" | "call.in-progress.error-vapifault-openrouter-403-model-access-denied" | "call.in-progress.error-vapifault-openrouter-429-exceeded-quota" | "call.in-progress.error-providerfault-openrouter-500-server-error" | "call.in-progress.error-providerfault-openrouter-503-server-overloaded-error" | "pipeline-error-perplexity-ai-400-bad-request-validation-failed" | "pipeline-error-perplexity-ai-401-unauthorized" | "pipeline-error-perplexity-ai-403-model-access-denied" | "pipeline-error-perplexity-ai-429-exceeded-quota" | "pipeline-error-perplexity-ai-500-server-error" | "pipeline-error-perplexity-ai-503-server-overloaded-error" | "pipeline-error-perplexity-ai-llm-failed" | "call.in-progress.error-providerfault-perplexity-ai-llm-failed" | "call.in-progress.error-vapifault-perplexity-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-perplexity-ai-401-unauthorized" | "call.in-progress.error-vapifault-perplexity-ai-403-model-access-denied" | "call.in-progress.error-vapifault-perplexity-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-perplexity-ai-500-server-error" | "call.in-progress.error-providerfault-perplexity-ai-503-server-overloaded-error" | "pipeline-error-deepinfra-400-bad-request-validation-failed" | "pipeline-error-deepinfra-401-unauthorized" | "pipeline-error-deepinfra-403-model-access-denied" | "pipeline-error-deepinfra-429-exceeded-quota" | "pipeline-error-deepinfra-500-server-error" | "pipeline-error-deepinfra-503-server-overloaded-error" | "pipeline-error-deepinfra-llm-failed" | "call.in-progress.error-providerfault-deepinfra-llm-failed" | "call.in-progress.error-vapifault-deepinfra-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-deepinfra-401-unauthorized" | "call.in-progress.error-vapifault-deepinfra-403-model-access-denied" | "call.in-progress.error-vapifault-deepinfra-429-exceeded-quota" | "call.in-progress.error-providerfault-deepinfra-500-server-error" | "call.in-progress.error-providerfault-deepinfra-503-server-overloaded-error" | "pipeline-error-runpod-400-bad-request-validation-failed" | "pipeline-error-runpod-401-unauthorized" | "pipeline-error-runpod-403-model-access-denied" | "pipeline-error-runpod-429-exceeded-quota" | "pipeline-error-runpod-500-server-error" | "pipeline-error-runpod-503-server-overloaded-error" | "pipeline-error-runpod-llm-failed" | "call.in-progress.error-providerfault-runpod-llm-failed" | "call.in-progress.error-vapifault-runpod-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-runpod-401-unauthorized" | "call.in-progress.error-vapifault-runpod-403-model-access-denied" | "call.in-progress.error-vapifault-runpod-429-exceeded-quota" | "call.in-progress.error-providerfault-runpod-500-server-error" | "call.in-progress.error-providerfault-runpod-503-server-overloaded-error" | "pipeline-error-custom-llm-400-bad-request-validation-failed" | "pipeline-error-custom-llm-401-unauthorized" | "pipeline-error-custom-llm-403-model-access-denied" | "pipeline-error-custom-llm-429-exceeded-quota" | "pipeline-error-custom-llm-500-server-error" | "pipeline-error-custom-llm-503-server-overloaded-error" | "pipeline-error-custom-llm-llm-failed" | "call.in-progress.error-providerfault-custom-llm-llm-failed" | "call.in-progress.error-vapifault-custom-llm-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-custom-llm-401-unauthorized" | "call.in-progress.error-vapifault-custom-llm-403-model-access-denied" | "call.in-progress.error-vapifault-custom-llm-429-exceeded-quota" | "call.in-progress.error-providerfault-custom-llm-500-server-error" | "call.in-progress.error-providerfault-custom-llm-503-server-overloaded-error" | "call.in-progress.error-pipeline-ws-model-connection-failed" | "pipeline-error-custom-voice-failed" | "pipeline-error-cartesia-socket-hang-up" | "pipeline-error-cartesia-requested-payment" | "pipeline-error-cartesia-500-server-error" | "pipeline-error-cartesia-502-server-error" | "pipeline-error-cartesia-503-server-error" | "pipeline-error-cartesia-522-server-error" | "call.in-progress.error-vapifault-cartesia-socket-hang-up" | "call.in-progress.error-vapifault-cartesia-requested-payment" | "call.in-progress.error-providerfault-cartesia-500-server-error" | "call.in-progress.error-providerfault-cartesia-503-server-error" | "call.in-progress.error-providerfault-cartesia-522-server-error" | "pipeline-error-eleven-labs-voice-not-found" | "pipeline-error-eleven-labs-quota-exceeded" | "pipeline-error-eleven-labs-unauthorized-access" | "pipeline-error-eleven-labs-unauthorized-to-access-model" | "pipeline-error-eleven-labs-professional-voices-only-for-creator-plus" | "pipeline-error-eleven-labs-blocked-free-plan-and-requested-upgrade" | "pipeline-error-eleven-labs-blocked-concurrent-requests-and-requested-upgrade" | "pipeline-error-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade" | "pipeline-error-eleven-labs-system-busy-and-requested-upgrade" | "pipeline-error-eleven-labs-voice-not-fine-tuned" | "pipeline-error-eleven-labs-invalid-api-key" | "pipeline-error-eleven-labs-invalid-voice-samples" | "pipeline-error-eleven-labs-voice-disabled-by-owner" | "pipeline-error-eleven-labs-vapi-voice-disabled-by-owner" | "pipeline-error-eleven-labs-blocked-account-in-probation" | "pipeline-error-eleven-labs-blocked-content-against-their-policy" | "pipeline-error-eleven-labs-missing-samples-for-voice-clone" | "pipeline-error-eleven-labs-voice-not-fine-tuned-and-cannot-be-used" | "pipeline-error-eleven-labs-voice-not-allowed-for-free-users" | "pipeline-error-eleven-labs-max-character-limit-exceeded" | "pipeline-error-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification" | "pipeline-error-eleven-labs-500-server-error" | "pipeline-error-eleven-labs-503-server-error" | "call.in-progress.error-vapifault-eleven-labs-voice-not-found" | "call.in-progress.error-vapifault-eleven-labs-quota-exceeded" | "call.in-progress.error-vapifault-eleven-labs-unauthorized-access" | "call.in-progress.error-vapifault-eleven-labs-unauthorized-to-access-model" | "call.in-progress.error-vapifault-eleven-labs-professional-voices-only-for-creator-plus" | "call.in-progress.error-vapifault-eleven-labs-blocked-free-plan-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-blocked-concurrent-requests-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-system-busy-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned" | "call.in-progress.error-vapifault-eleven-labs-invalid-api-key" | "call.in-progress.error-vapifault-eleven-labs-invalid-voice-samples" | "call.in-progress.error-vapifault-eleven-labs-voice-disabled-by-owner" | "call.in-progress.error-vapifault-eleven-labs-blocked-account-in-probation" | "call.in-progress.error-vapifault-eleven-labs-blocked-content-against-their-policy" | "call.in-progress.error-vapifault-eleven-labs-missing-samples-for-voice-clone" | "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned-and-cannot-be-used" | "call.in-progress.error-vapifault-eleven-labs-voice-not-allowed-for-free-users" | "call.in-progress.error-vapifault-eleven-labs-max-character-limit-exceeded" | "call.in-progress.error-vapifault-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification" | "call.in-progress.error-providerfault-eleven-labs-system-busy-and-requested-upgrade" | "call.in-progress.error-providerfault-eleven-labs-500-server-error" | "call.in-progress.error-providerfault-eleven-labs-503-server-error" | "pipeline-error-playht-request-timed-out" | "pipeline-error-playht-invalid-voice" | "pipeline-error-playht-unexpected-error" | "pipeline-error-playht-out-of-credits" | "pipeline-error-playht-invalid-emotion" | "pipeline-error-playht-voice-must-be-a-valid-voice-manifest-uri" | "pipeline-error-playht-401-unauthorized" | "pipeline-error-playht-403-forbidden-out-of-characters" | "pipeline-error-playht-403-forbidden-api-access-not-available" | "pipeline-error-playht-429-exceeded-quota" | "pipeline-error-playht-502-gateway-error" | "pipeline-error-playht-504-gateway-error" | "call.in-progress.error-vapifault-playht-request-timed-out" | "call.in-progress.error-vapifault-playht-invalid-voice" | "call.in-progress.error-vapifault-playht-unexpected-error" | "call.in-progress.error-vapifault-playht-out-of-credits" | "call.in-progress.error-vapifault-playht-invalid-emotion" | "call.in-progress.error-vapifault-playht-voice-must-be-a-valid-voice-manifest-uri" | "call.in-progress.error-vapifault-playht-401-unauthorized" | "call.in-progress.error-vapifault-playht-403-forbidden-out-of-characters" | "call.in-progress.error-vapifault-playht-403-forbidden-api-access-not-available" | "call.in-progress.error-vapifault-playht-429-exceeded-quota" | "call.in-progress.error-providerfault-playht-502-gateway-error" | "call.in-progress.error-providerfault-playht-504-gateway-error" | "pipeline-error-custom-transcriber-failed" | "call.in-progress.error-vapifault-custom-transcriber-failed" | "pipeline-error-eleven-labs-transcriber-failed" | "call.in-progress.error-vapifault-eleven-labs-transcriber-failed" | "pipeline-error-deepgram-returning-400-no-such-model-language-tier-combination" | "pipeline-error-deepgram-returning-401-invalid-credentials" | "pipeline-error-deepgram-returning-403-model-access-denied" | "pipeline-error-deepgram-returning-404-not-found" | "pipeline-error-deepgram-returning-500-invalid-json" | "pipeline-error-deepgram-returning-502-network-error" | "pipeline-error-deepgram-returning-502-bad-gateway-ehostunreach" | "pipeline-error-deepgram-returning-econnreset" | "call.in-progress.error-vapifault-deepgram-returning-400-no-such-model-language-tier-combination" | "call.in-progress.error-vapifault-deepgram-returning-401-invalid-credentials" | "call.in-progress.error-vapifault-deepgram-returning-404-not-found" | "call.in-progress.error-vapifault-deepgram-returning-403-model-access-denied" | "call.in-progress.error-providerfault-deepgram-returning-500-invalid-json" | "call.in-progress.error-providerfault-deepgram-returning-502-network-error" | "call.in-progress.error-providerfault-deepgram-returning-502-bad-gateway-ehostunreach" | "pipeline-error-google-transcriber-failed" | "call.in-progress.error-vapifault-google-transcriber-failed" | "pipeline-error-openai-transcriber-failed" | "call.in-progress.error-vapifault-openai-transcriber-failed" | "call.in-progress.error-warm-transfer-max-duration" | "call.in-progress.error-warm-transfer-assistant-cancelled" | "call.in-progress.error-warm-transfer-silence-timeout" | "call.in-progress.error-warm-transfer-microphone-timeout" | "assistant-ended-call" | "assistant-said-end-call-phrase" | "assistant-ended-call-with-hangup-task" | "assistant-ended-call-after-message-spoken" | "assistant-forwarded-call" | "assistant-join-timed-out" | "call.in-progress.error-assistant-did-not-receive-customer-audio" | "call.in-progress.error-transfer-failed" | "customer-busy" | "customer-ended-call" | "customer-ended-call-before-warm-transfer" | "customer-ended-call-after-warm-transfer-attempt" | "customer-did-not-answer" | "customer-did-not-give-microphone-permission" | "exceeded-max-duration" | "manually-canceled" | "phone-call-provider-closed-websocket" | "call.forwarding.operator-busy" | "silence-timed-out" | "call.in-progress.error-sip-inbound-call-failed-to-connect" | "call.in-progress.error-providerfault-outbound-sip-403-forbidden" | "call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required" | "call.in-progress.error-providerfault-outbound-sip-503-service-unavailable" | "call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable" | "call.in-progress.error-sip-outbound-call-failed-to-connect" | "call.ringing.hook-executed-say" | "call.ringing.hook-executed-transfer" | "call.ending.hook-executed-say" | "call.ending.hook-executed-transfer" | "call.ringing.sip-inbound-caller-hungup-before-call-connect" | "call.ringing.error-sip-inbound-call-failed-to-connect" | "twilio-failed-to-connect-call" | "twilio-reported-customer-misdialed" | "vonage-rejected" | "voicemail" | "call-deleted"; /** This is the cost of the call in USD. This can also be found at `call.cost` on GET /call/:id. */ cost?: number; /** These are the costs of individual components of the call in USD. This can also be found at `call.costs` on GET /call/:id. */ costs?: ( | TransportCost | TranscriberCost | ModelCost | VoiceCost | VapiCost | VoicemailDetectionCost | AnalysisCost | KnowledgeBaseCost )[]; /** This is the timestamp of the message. */ timestamp?: number; /** These are the artifacts from the call. This can also be found at `call.artifact` on GET /call/:id. */ artifact: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the analysis of the call. This can also be found at `call.analysis` on GET /call/:id. */ analysis: Analysis; /** * This is the ISO 8601 date-time string of when the call started. This can also be found at `call.startedAt` on GET /call/:id. * @format date-time */ startedAt?: string; /** * This is the ISO 8601 date-time string of when the call ended. This can also be found at `call.endedAt` on GET /call/:id. * @format date-time */ endedAt?: string; /** This is the compliance result of the call. This can also be found at `call.compliance` on GET /call/:id. */ compliance?: Compliance; } export interface ServerMessageHandoffDestinationRequest { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "handoff-destination-request" is sent when the model is requesting handoff but destination is unknown. */ type: "handoff-destination-request"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the parameters of the handoff destination request. */ parameters: object; } export interface ServerMessageHang { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** * This is the type of the message. "hang" is sent when the assistant is hanging due to a delay. The delay can be caused by many factors, such as: * - the model is too slow to respond * - the voice is too slow to respond * - the tool call is still waiting for a response from your server * - etc. */ type: "hang"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageKnowledgeBaseRequest { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "knowledge-base-request" is sent to request knowledge base documents. To enable, use `assistant.knowledgeBase.provider=custom-knowledge-base`. */ type: "knowledge-base-request"; /** These are the messages that are going to be sent to the `model` right after the `knowledge-base-request` webhook completes. */ messages?: ( | UserMessage | SystemMessage | BotMessage | ToolCallMessage | ToolCallResultMessage )[]; /** This is just `messages` formatted for OpenAI. */ messagesOpenAIFormatted: OpenAIMessage[]; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageModelOutput { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "model-output" is sent as the model outputs tokens. */ type: "model-output"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the output of the model. It can be a token or tool call. */ output: object; } export interface ServerMessagePhoneCallControl { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** * This is the type of the message. "phone-call-control" is an advanced type of message. * * When it is requested in `assistant.serverMessages`, the hangup and forwarding responsibilities are delegated to your server. Vapi will no longer do the actual transfer and hangup. */ type: "phone-call-control"; /** This is the request to control the phone call. */ request: "forward" | "hang-up"; /** This is the destination to forward the call to if the request is "forward". */ destination?: TransferDestinationNumber | TransferDestinationSip; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageSpeechUpdate { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "speech-update" is sent whenever assistant or user start or stop speaking. */ type: "speech-update"; /** This is the status of the speech update. */ status: "started" | "stopped"; /** This is the role which the speech update is for. */ role: "assistant" | "user"; /** This is the turn number of the speech update (0-indexed). */ turn?: number; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageStatusUpdate { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "status-update" is sent whenever the `call.status` changes. */ type: "status-update"; /** This is the status of the call. */ status: | "scheduled" | "queued" | "ringing" | "in-progress" | "forwarding" | "ended" | "not-found" | "deletion-failed"; /** This is the reason the call ended. This is only sent if the status is "ended". */ endedReason?: | "call-start-error-neither-assistant-nor-server-set" | "assistant-request-failed" | "assistant-request-returned-error" | "assistant-request-returned-unspeakable-error" | "assistant-request-returned-invalid-assistant" | "assistant-request-returned-no-assistant" | "assistant-request-returned-forwarding-phone-number" | "scheduled-call-deleted" | "call.start.error-vapifault-get-org" | "call.start.error-vapifault-get-subscription" | "call.start.error-get-assistant" | "call.start.error-get-phone-number" | "call.start.error-get-customer" | "call.start.error-get-resources-validation" | "call.start.error-vapi-number-international" | "call.start.error-vapi-number-outbound-daily-limit" | "call.start.error-get-transport" | "call.start.error-subscription-wallet-does-not-exist" | "call.start.error-fraud-check-failed" | "call.start.error-subscription-frozen" | "call.start.error-subscription-insufficient-credits" | "call.start.error-subscription-upgrade-failed" | "call.start.error-subscription-concurrency-limit-reached" | "call.start.error-enterprise-feature-not-available-recording-consent" | "assistant-not-valid" | "call.start.error-vapifault-database-error" | "assistant-not-found" | "pipeline-error-openai-voice-failed" | "pipeline-error-cartesia-voice-failed" | "pipeline-error-deepgram-voice-failed" | "pipeline-error-eleven-labs-voice-failed" | "pipeline-error-playht-voice-failed" | "pipeline-error-lmnt-voice-failed" | "pipeline-error-azure-voice-failed" | "pipeline-error-rime-ai-voice-failed" | "pipeline-error-smallest-ai-voice-failed" | "pipeline-error-vapi-voice-failed" | "pipeline-error-neuphonic-voice-failed" | "pipeline-error-hume-voice-failed" | "pipeline-error-sesame-voice-failed" | "pipeline-error-inworld-voice-failed" | "pipeline-error-minimax-voice-failed" | "pipeline-error-tavus-video-failed" | "call.in-progress.error-vapifault-openai-voice-failed" | "call.in-progress.error-vapifault-cartesia-voice-failed" | "call.in-progress.error-vapifault-deepgram-voice-failed" | "call.in-progress.error-vapifault-eleven-labs-voice-failed" | "call.in-progress.error-vapifault-playht-voice-failed" | "call.in-progress.error-vapifault-lmnt-voice-failed" | "call.in-progress.error-vapifault-azure-voice-failed" | "call.in-progress.error-vapifault-rime-ai-voice-failed" | "call.in-progress.error-vapifault-smallest-ai-voice-failed" | "call.in-progress.error-vapifault-vapi-voice-failed" | "call.in-progress.error-vapifault-neuphonic-voice-failed" | "call.in-progress.error-vapifault-hume-voice-failed" | "call.in-progress.error-vapifault-sesame-voice-failed" | "call.in-progress.error-vapifault-inworld-voice-failed" | "call.in-progress.error-vapifault-minimax-voice-failed" | "call.in-progress.error-vapifault-tavus-video-failed" | "pipeline-error-vapi-llm-failed" | "pipeline-error-vapi-400-bad-request-validation-failed" | "pipeline-error-vapi-401-unauthorized" | "pipeline-error-vapi-403-model-access-denied" | "pipeline-error-vapi-429-exceeded-quota" | "pipeline-error-vapi-500-server-error" | "pipeline-error-vapi-503-server-overloaded-error" | "call.in-progress.error-providerfault-vapi-llm-failed" | "call.in-progress.error-vapifault-vapi-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-vapi-401-unauthorized" | "call.in-progress.error-vapifault-vapi-403-model-access-denied" | "call.in-progress.error-vapifault-vapi-429-exceeded-quota" | "call.in-progress.error-providerfault-vapi-500-server-error" | "call.in-progress.error-providerfault-vapi-503-server-overloaded-error" | "pipeline-error-deepgram-transcriber-failed" | "pipeline-error-deepgram-transcriber-api-key-missing" | "call.in-progress.error-vapifault-deepgram-transcriber-failed" | "pipeline-error-gladia-transcriber-failed" | "call.in-progress.error-vapifault-gladia-transcriber-failed" | "pipeline-error-speechmatics-transcriber-failed" | "call.in-progress.error-vapifault-speechmatics-transcriber-failed" | "pipeline-error-assembly-ai-transcriber-failed" | "pipeline-error-assembly-ai-returning-400-insufficent-funds" | "pipeline-error-assembly-ai-returning-400-paid-only-feature" | "pipeline-error-assembly-ai-returning-401-invalid-credentials" | "pipeline-error-assembly-ai-returning-500-invalid-schema" | "pipeline-error-assembly-ai-returning-500-word-boost-parsing-failed" | "call.in-progress.error-vapifault-assembly-ai-transcriber-failed" | "call.in-progress.error-vapifault-assembly-ai-returning-400-insufficent-funds" | "call.in-progress.error-vapifault-assembly-ai-returning-400-paid-only-feature" | "call.in-progress.error-vapifault-assembly-ai-returning-401-invalid-credentials" | "call.in-progress.error-vapifault-assembly-ai-returning-500-invalid-schema" | "call.in-progress.error-vapifault-assembly-ai-returning-500-word-boost-parsing-failed" | "pipeline-error-talkscriber-transcriber-failed" | "call.in-progress.error-vapifault-talkscriber-transcriber-failed" | "pipeline-error-azure-speech-transcriber-failed" | "call.in-progress.error-vapifault-azure-speech-transcriber-failed" | "call.in-progress.error-pipeline-no-available-llm-model" | "worker-shutdown" | "vonage-disconnected" | "vonage-failed-to-connect-call" | "vonage-completed" | "phone-call-provider-bypass-enabled-but-no-call-received" | "call.in-progress.error-providerfault-transport-never-connected" | "call.in-progress.error-vapifault-worker-not-available" | "call.in-progress.error-vapifault-transport-never-connected" | "call.in-progress.error-vapifault-transport-connected-but-call-not-active" | "call.in-progress.error-vapifault-call-started-but-connection-to-transport-missing" | "call.in-progress.error-vapifault-worker-died" | "call.in-progress.twilio-completed-call" | "call.in-progress.sip-completed-call" | "call.in-progress.error-providerfault-openai-llm-failed" | "call.in-progress.error-providerfault-azure-openai-llm-failed" | "call.in-progress.error-providerfault-groq-llm-failed" | "call.in-progress.error-providerfault-google-llm-failed" | "call.in-progress.error-providerfault-xai-llm-failed" | "call.in-progress.error-providerfault-mistral-llm-failed" | "call.in-progress.error-providerfault-inflection-ai-llm-failed" | "call.in-progress.error-providerfault-cerebras-llm-failed" | "call.in-progress.error-providerfault-deep-seek-llm-failed" | "call.in-progress.error-vapifault-chat-pipeline-failed-to-start" | "pipeline-error-openai-400-bad-request-validation-failed" | "pipeline-error-openai-401-unauthorized" | "pipeline-error-openai-401-incorrect-api-key" | "pipeline-error-openai-401-account-not-in-organization" | "pipeline-error-openai-403-model-access-denied" | "pipeline-error-openai-429-exceeded-quota" | "pipeline-error-openai-429-rate-limit-reached" | "pipeline-error-openai-500-server-error" | "pipeline-error-openai-503-server-overloaded-error" | "pipeline-error-openai-llm-failed" | "call.in-progress.error-vapifault-openai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-openai-401-unauthorized" | "call.in-progress.error-vapifault-openai-401-incorrect-api-key" | "call.in-progress.error-vapifault-openai-401-account-not-in-organization" | "call.in-progress.error-vapifault-openai-403-model-access-denied" | "call.in-progress.error-vapifault-openai-429-exceeded-quota" | "call.in-progress.error-vapifault-openai-429-rate-limit-reached" | "call.in-progress.error-providerfault-openai-500-server-error" | "call.in-progress.error-providerfault-openai-503-server-overloaded-error" | "pipeline-error-azure-openai-400-bad-request-validation-failed" | "pipeline-error-azure-openai-401-unauthorized" | "pipeline-error-azure-openai-403-model-access-denied" | "pipeline-error-azure-openai-429-exceeded-quota" | "pipeline-error-azure-openai-500-server-error" | "pipeline-error-azure-openai-503-server-overloaded-error" | "pipeline-error-azure-openai-llm-failed" | "call.in-progress.error-vapifault-azure-openai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-azure-openai-401-unauthorized" | "call.in-progress.error-vapifault-azure-openai-403-model-access-denied" | "call.in-progress.error-vapifault-azure-openai-429-exceeded-quota" | "call.in-progress.error-providerfault-azure-openai-500-server-error" | "call.in-progress.error-providerfault-azure-openai-503-server-overloaded-error" | "pipeline-error-google-400-bad-request-validation-failed" | "pipeline-error-google-401-unauthorized" | "pipeline-error-google-403-model-access-denied" | "pipeline-error-google-429-exceeded-quota" | "pipeline-error-google-500-server-error" | "pipeline-error-google-503-server-overloaded-error" | "pipeline-error-google-llm-failed" | "call.in-progress.error-vapifault-google-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-google-401-unauthorized" | "call.in-progress.error-vapifault-google-403-model-access-denied" | "call.in-progress.error-vapifault-google-429-exceeded-quota" | "call.in-progress.error-providerfault-google-500-server-error" | "call.in-progress.error-providerfault-google-503-server-overloaded-error" | "pipeline-error-xai-400-bad-request-validation-failed" | "pipeline-error-xai-401-unauthorized" | "pipeline-error-xai-403-model-access-denied" | "pipeline-error-xai-429-exceeded-quota" | "pipeline-error-xai-500-server-error" | "pipeline-error-xai-503-server-overloaded-error" | "pipeline-error-xai-llm-failed" | "call.in-progress.error-vapifault-xai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-xai-401-unauthorized" | "call.in-progress.error-vapifault-xai-403-model-access-denied" | "call.in-progress.error-vapifault-xai-429-exceeded-quota" | "call.in-progress.error-providerfault-xai-500-server-error" | "call.in-progress.error-providerfault-xai-503-server-overloaded-error" | "pipeline-error-mistral-400-bad-request-validation-failed" | "pipeline-error-mistral-401-unauthorized" | "pipeline-error-mistral-403-model-access-denied" | "pipeline-error-mistral-429-exceeded-quota" | "pipeline-error-mistral-500-server-error" | "pipeline-error-mistral-503-server-overloaded-error" | "pipeline-error-mistral-llm-failed" | "call.in-progress.error-vapifault-mistral-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-mistral-401-unauthorized" | "call.in-progress.error-vapifault-mistral-403-model-access-denied" | "call.in-progress.error-vapifault-mistral-429-exceeded-quota" | "call.in-progress.error-providerfault-mistral-500-server-error" | "call.in-progress.error-providerfault-mistral-503-server-overloaded-error" | "pipeline-error-inflection-ai-400-bad-request-validation-failed" | "pipeline-error-inflection-ai-401-unauthorized" | "pipeline-error-inflection-ai-403-model-access-denied" | "pipeline-error-inflection-ai-429-exceeded-quota" | "pipeline-error-inflection-ai-500-server-error" | "pipeline-error-inflection-ai-503-server-overloaded-error" | "pipeline-error-inflection-ai-llm-failed" | "call.in-progress.error-vapifault-inflection-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-inflection-ai-401-unauthorized" | "call.in-progress.error-vapifault-inflection-ai-403-model-access-denied" | "call.in-progress.error-vapifault-inflection-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-inflection-ai-500-server-error" | "call.in-progress.error-providerfault-inflection-ai-503-server-overloaded-error" | "pipeline-error-deep-seek-400-bad-request-validation-failed" | "pipeline-error-deep-seek-401-unauthorized" | "pipeline-error-deep-seek-403-model-access-denied" | "pipeline-error-deep-seek-429-exceeded-quota" | "pipeline-error-deep-seek-500-server-error" | "pipeline-error-deep-seek-503-server-overloaded-error" | "pipeline-error-deep-seek-llm-failed" | "call.in-progress.error-vapifault-deep-seek-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-deep-seek-401-unauthorized" | "call.in-progress.error-vapifault-deep-seek-403-model-access-denied" | "call.in-progress.error-vapifault-deep-seek-429-exceeded-quota" | "call.in-progress.error-providerfault-deep-seek-500-server-error" | "call.in-progress.error-providerfault-deep-seek-503-server-overloaded-error" | "pipeline-error-groq-400-bad-request-validation-failed" | "pipeline-error-groq-401-unauthorized" | "pipeline-error-groq-403-model-access-denied" | "pipeline-error-groq-429-exceeded-quota" | "pipeline-error-groq-500-server-error" | "pipeline-error-groq-503-server-overloaded-error" | "pipeline-error-groq-llm-failed" | "call.in-progress.error-vapifault-groq-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-groq-401-unauthorized" | "call.in-progress.error-vapifault-groq-403-model-access-denied" | "call.in-progress.error-vapifault-groq-429-exceeded-quota" | "call.in-progress.error-providerfault-groq-500-server-error" | "call.in-progress.error-providerfault-groq-503-server-overloaded-error" | "pipeline-error-cerebras-400-bad-request-validation-failed" | "pipeline-error-cerebras-401-unauthorized" | "pipeline-error-cerebras-403-model-access-denied" | "pipeline-error-cerebras-429-exceeded-quota" | "pipeline-error-cerebras-500-server-error" | "pipeline-error-cerebras-503-server-overloaded-error" | "pipeline-error-cerebras-llm-failed" | "call.in-progress.error-vapifault-cerebras-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-cerebras-401-unauthorized" | "call.in-progress.error-vapifault-cerebras-403-model-access-denied" | "call.in-progress.error-vapifault-cerebras-429-exceeded-quota" | "call.in-progress.error-providerfault-cerebras-500-server-error" | "call.in-progress.error-providerfault-cerebras-503-server-overloaded-error" | "pipeline-error-anthropic-400-bad-request-validation-failed" | "pipeline-error-anthropic-401-unauthorized" | "pipeline-error-anthropic-403-model-access-denied" | "pipeline-error-anthropic-429-exceeded-quota" | "pipeline-error-anthropic-500-server-error" | "pipeline-error-anthropic-503-server-overloaded-error" | "pipeline-error-anthropic-llm-failed" | "call.in-progress.error-providerfault-anthropic-llm-failed" | "call.in-progress.error-vapifault-anthropic-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-500-server-error" | "call.in-progress.error-providerfault-anthropic-503-server-overloaded-error" | "pipeline-error-anthropic-bedrock-400-bad-request-validation-failed" | "pipeline-error-anthropic-bedrock-401-unauthorized" | "pipeline-error-anthropic-bedrock-403-model-access-denied" | "pipeline-error-anthropic-bedrock-429-exceeded-quota" | "pipeline-error-anthropic-bedrock-500-server-error" | "pipeline-error-anthropic-bedrock-503-server-overloaded-error" | "pipeline-error-anthropic-bedrock-llm-failed" | "call.in-progress.error-providerfault-anthropic-bedrock-llm-failed" | "call.in-progress.error-vapifault-anthropic-bedrock-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-bedrock-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-bedrock-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-bedrock-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-bedrock-500-server-error" | "call.in-progress.error-providerfault-anthropic-bedrock-503-server-overloaded-error" | "pipeline-error-anthropic-vertex-400-bad-request-validation-failed" | "pipeline-error-anthropic-vertex-401-unauthorized" | "pipeline-error-anthropic-vertex-403-model-access-denied" | "pipeline-error-anthropic-vertex-429-exceeded-quota" | "pipeline-error-anthropic-vertex-500-server-error" | "pipeline-error-anthropic-vertex-503-server-overloaded-error" | "pipeline-error-anthropic-vertex-llm-failed" | "call.in-progress.error-providerfault-anthropic-vertex-llm-failed" | "call.in-progress.error-vapifault-anthropic-vertex-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anthropic-vertex-401-unauthorized" | "call.in-progress.error-vapifault-anthropic-vertex-403-model-access-denied" | "call.in-progress.error-vapifault-anthropic-vertex-429-exceeded-quota" | "call.in-progress.error-providerfault-anthropic-vertex-500-server-error" | "call.in-progress.error-providerfault-anthropic-vertex-503-server-overloaded-error" | "pipeline-error-together-ai-400-bad-request-validation-failed" | "pipeline-error-together-ai-401-unauthorized" | "pipeline-error-together-ai-403-model-access-denied" | "pipeline-error-together-ai-429-exceeded-quota" | "pipeline-error-together-ai-500-server-error" | "pipeline-error-together-ai-503-server-overloaded-error" | "pipeline-error-together-ai-llm-failed" | "call.in-progress.error-providerfault-together-ai-llm-failed" | "call.in-progress.error-vapifault-together-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-together-ai-401-unauthorized" | "call.in-progress.error-vapifault-together-ai-403-model-access-denied" | "call.in-progress.error-vapifault-together-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-together-ai-500-server-error" | "call.in-progress.error-providerfault-together-ai-503-server-overloaded-error" | "pipeline-error-anyscale-400-bad-request-validation-failed" | "pipeline-error-anyscale-401-unauthorized" | "pipeline-error-anyscale-403-model-access-denied" | "pipeline-error-anyscale-429-exceeded-quota" | "pipeline-error-anyscale-500-server-error" | "pipeline-error-anyscale-503-server-overloaded-error" | "pipeline-error-anyscale-llm-failed" | "call.in-progress.error-providerfault-anyscale-llm-failed" | "call.in-progress.error-vapifault-anyscale-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-anyscale-401-unauthorized" | "call.in-progress.error-vapifault-anyscale-403-model-access-denied" | "call.in-progress.error-vapifault-anyscale-429-exceeded-quota" | "call.in-progress.error-providerfault-anyscale-500-server-error" | "call.in-progress.error-providerfault-anyscale-503-server-overloaded-error" | "pipeline-error-openrouter-400-bad-request-validation-failed" | "pipeline-error-openrouter-401-unauthorized" | "pipeline-error-openrouter-403-model-access-denied" | "pipeline-error-openrouter-429-exceeded-quota" | "pipeline-error-openrouter-500-server-error" | "pipeline-error-openrouter-503-server-overloaded-error" | "pipeline-error-openrouter-llm-failed" | "call.in-progress.error-providerfault-openrouter-llm-failed" | "call.in-progress.error-vapifault-openrouter-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-openrouter-401-unauthorized" | "call.in-progress.error-vapifault-openrouter-403-model-access-denied" | "call.in-progress.error-vapifault-openrouter-429-exceeded-quota" | "call.in-progress.error-providerfault-openrouter-500-server-error" | "call.in-progress.error-providerfault-openrouter-503-server-overloaded-error" | "pipeline-error-perplexity-ai-400-bad-request-validation-failed" | "pipeline-error-perplexity-ai-401-unauthorized" | "pipeline-error-perplexity-ai-403-model-access-denied" | "pipeline-error-perplexity-ai-429-exceeded-quota" | "pipeline-error-perplexity-ai-500-server-error" | "pipeline-error-perplexity-ai-503-server-overloaded-error" | "pipeline-error-perplexity-ai-llm-failed" | "call.in-progress.error-providerfault-perplexity-ai-llm-failed" | "call.in-progress.error-vapifault-perplexity-ai-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-perplexity-ai-401-unauthorized" | "call.in-progress.error-vapifault-perplexity-ai-403-model-access-denied" | "call.in-progress.error-vapifault-perplexity-ai-429-exceeded-quota" | "call.in-progress.error-providerfault-perplexity-ai-500-server-error" | "call.in-progress.error-providerfault-perplexity-ai-503-server-overloaded-error" | "pipeline-error-deepinfra-400-bad-request-validation-failed" | "pipeline-error-deepinfra-401-unauthorized" | "pipeline-error-deepinfra-403-model-access-denied" | "pipeline-error-deepinfra-429-exceeded-quota" | "pipeline-error-deepinfra-500-server-error" | "pipeline-error-deepinfra-503-server-overloaded-error" | "pipeline-error-deepinfra-llm-failed" | "call.in-progress.error-providerfault-deepinfra-llm-failed" | "call.in-progress.error-vapifault-deepinfra-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-deepinfra-401-unauthorized" | "call.in-progress.error-vapifault-deepinfra-403-model-access-denied" | "call.in-progress.error-vapifault-deepinfra-429-exceeded-quota" | "call.in-progress.error-providerfault-deepinfra-500-server-error" | "call.in-progress.error-providerfault-deepinfra-503-server-overloaded-error" | "pipeline-error-runpod-400-bad-request-validation-failed" | "pipeline-error-runpod-401-unauthorized" | "pipeline-error-runpod-403-model-access-denied" | "pipeline-error-runpod-429-exceeded-quota" | "pipeline-error-runpod-500-server-error" | "pipeline-error-runpod-503-server-overloaded-error" | "pipeline-error-runpod-llm-failed" | "call.in-progress.error-providerfault-runpod-llm-failed" | "call.in-progress.error-vapifault-runpod-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-runpod-401-unauthorized" | "call.in-progress.error-vapifault-runpod-403-model-access-denied" | "call.in-progress.error-vapifault-runpod-429-exceeded-quota" | "call.in-progress.error-providerfault-runpod-500-server-error" | "call.in-progress.error-providerfault-runpod-503-server-overloaded-error" | "pipeline-error-custom-llm-400-bad-request-validation-failed" | "pipeline-error-custom-llm-401-unauthorized" | "pipeline-error-custom-llm-403-model-access-denied" | "pipeline-error-custom-llm-429-exceeded-quota" | "pipeline-error-custom-llm-500-server-error" | "pipeline-error-custom-llm-503-server-overloaded-error" | "pipeline-error-custom-llm-llm-failed" | "call.in-progress.error-providerfault-custom-llm-llm-failed" | "call.in-progress.error-vapifault-custom-llm-400-bad-request-validation-failed" | "call.in-progress.error-vapifault-custom-llm-401-unauthorized" | "call.in-progress.error-vapifault-custom-llm-403-model-access-denied" | "call.in-progress.error-vapifault-custom-llm-429-exceeded-quota" | "call.in-progress.error-providerfault-custom-llm-500-server-error" | "call.in-progress.error-providerfault-custom-llm-503-server-overloaded-error" | "call.in-progress.error-pipeline-ws-model-connection-failed" | "pipeline-error-custom-voice-failed" | "pipeline-error-cartesia-socket-hang-up" | "pipeline-error-cartesia-requested-payment" | "pipeline-error-cartesia-500-server-error" | "pipeline-error-cartesia-502-server-error" | "pipeline-error-cartesia-503-server-error" | "pipeline-error-cartesia-522-server-error" | "call.in-progress.error-vapifault-cartesia-socket-hang-up" | "call.in-progress.error-vapifault-cartesia-requested-payment" | "call.in-progress.error-providerfault-cartesia-500-server-error" | "call.in-progress.error-providerfault-cartesia-503-server-error" | "call.in-progress.error-providerfault-cartesia-522-server-error" | "pipeline-error-eleven-labs-voice-not-found" | "pipeline-error-eleven-labs-quota-exceeded" | "pipeline-error-eleven-labs-unauthorized-access" | "pipeline-error-eleven-labs-unauthorized-to-access-model" | "pipeline-error-eleven-labs-professional-voices-only-for-creator-plus" | "pipeline-error-eleven-labs-blocked-free-plan-and-requested-upgrade" | "pipeline-error-eleven-labs-blocked-concurrent-requests-and-requested-upgrade" | "pipeline-error-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade" | "pipeline-error-eleven-labs-system-busy-and-requested-upgrade" | "pipeline-error-eleven-labs-voice-not-fine-tuned" | "pipeline-error-eleven-labs-invalid-api-key" | "pipeline-error-eleven-labs-invalid-voice-samples" | "pipeline-error-eleven-labs-voice-disabled-by-owner" | "pipeline-error-eleven-labs-vapi-voice-disabled-by-owner" | "pipeline-error-eleven-labs-blocked-account-in-probation" | "pipeline-error-eleven-labs-blocked-content-against-their-policy" | "pipeline-error-eleven-labs-missing-samples-for-voice-clone" | "pipeline-error-eleven-labs-voice-not-fine-tuned-and-cannot-be-used" | "pipeline-error-eleven-labs-voice-not-allowed-for-free-users" | "pipeline-error-eleven-labs-max-character-limit-exceeded" | "pipeline-error-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification" | "pipeline-error-eleven-labs-500-server-error" | "pipeline-error-eleven-labs-503-server-error" | "call.in-progress.error-vapifault-eleven-labs-voice-not-found" | "call.in-progress.error-vapifault-eleven-labs-quota-exceeded" | "call.in-progress.error-vapifault-eleven-labs-unauthorized-access" | "call.in-progress.error-vapifault-eleven-labs-unauthorized-to-access-model" | "call.in-progress.error-vapifault-eleven-labs-professional-voices-only-for-creator-plus" | "call.in-progress.error-vapifault-eleven-labs-blocked-free-plan-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-blocked-concurrent-requests-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-system-busy-and-requested-upgrade" | "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned" | "call.in-progress.error-vapifault-eleven-labs-invalid-api-key" | "call.in-progress.error-vapifault-eleven-labs-invalid-voice-samples" | "call.in-progress.error-vapifault-eleven-labs-voice-disabled-by-owner" | "call.in-progress.error-vapifault-eleven-labs-blocked-account-in-probation" | "call.in-progress.error-vapifault-eleven-labs-blocked-content-against-their-policy" | "call.in-progress.error-vapifault-eleven-labs-missing-samples-for-voice-clone" | "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned-and-cannot-be-used" | "call.in-progress.error-vapifault-eleven-labs-voice-not-allowed-for-free-users" | "call.in-progress.error-vapifault-eleven-labs-max-character-limit-exceeded" | "call.in-progress.error-vapifault-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification" | "call.in-progress.error-providerfault-eleven-labs-system-busy-and-requested-upgrade" | "call.in-progress.error-providerfault-eleven-labs-500-server-error" | "call.in-progress.error-providerfault-eleven-labs-503-server-error" | "pipeline-error-playht-request-timed-out" | "pipeline-error-playht-invalid-voice" | "pipeline-error-playht-unexpected-error" | "pipeline-error-playht-out-of-credits" | "pipeline-error-playht-invalid-emotion" | "pipeline-error-playht-voice-must-be-a-valid-voice-manifest-uri" | "pipeline-error-playht-401-unauthorized" | "pipeline-error-playht-403-forbidden-out-of-characters" | "pipeline-error-playht-403-forbidden-api-access-not-available" | "pipeline-error-playht-429-exceeded-quota" | "pipeline-error-playht-502-gateway-error" | "pipeline-error-playht-504-gateway-error" | "call.in-progress.error-vapifault-playht-request-timed-out" | "call.in-progress.error-vapifault-playht-invalid-voice" | "call.in-progress.error-vapifault-playht-unexpected-error" | "call.in-progress.error-vapifault-playht-out-of-credits" | "call.in-progress.error-vapifault-playht-invalid-emotion" | "call.in-progress.error-vapifault-playht-voice-must-be-a-valid-voice-manifest-uri" | "call.in-progress.error-vapifault-playht-401-unauthorized" | "call.in-progress.error-vapifault-playht-403-forbidden-out-of-characters" | "call.in-progress.error-vapifault-playht-403-forbidden-api-access-not-available" | "call.in-progress.error-vapifault-playht-429-exceeded-quota" | "call.in-progress.error-providerfault-playht-502-gateway-error" | "call.in-progress.error-providerfault-playht-504-gateway-error" | "pipeline-error-custom-transcriber-failed" | "call.in-progress.error-vapifault-custom-transcriber-failed" | "pipeline-error-eleven-labs-transcriber-failed" | "call.in-progress.error-vapifault-eleven-labs-transcriber-failed" | "pipeline-error-deepgram-returning-400-no-such-model-language-tier-combination" | "pipeline-error-deepgram-returning-401-invalid-credentials" | "pipeline-error-deepgram-returning-403-model-access-denied" | "pipeline-error-deepgram-returning-404-not-found" | "pipeline-error-deepgram-returning-500-invalid-json" | "pipeline-error-deepgram-returning-502-network-error" | "pipeline-error-deepgram-returning-502-bad-gateway-ehostunreach" | "pipeline-error-deepgram-returning-econnreset" | "call.in-progress.error-vapifault-deepgram-returning-400-no-such-model-language-tier-combination" | "call.in-progress.error-vapifault-deepgram-returning-401-invalid-credentials" | "call.in-progress.error-vapifault-deepgram-returning-404-not-found" | "call.in-progress.error-vapifault-deepgram-returning-403-model-access-denied" | "call.in-progress.error-providerfault-deepgram-returning-500-invalid-json" | "call.in-progress.error-providerfault-deepgram-returning-502-network-error" | "call.in-progress.error-providerfault-deepgram-returning-502-bad-gateway-ehostunreach" | "pipeline-error-google-transcriber-failed" | "call.in-progress.error-vapifault-google-transcriber-failed" | "pipeline-error-openai-transcriber-failed" | "call.in-progress.error-vapifault-openai-transcriber-failed" | "call.in-progress.error-warm-transfer-max-duration" | "call.in-progress.error-warm-transfer-assistant-cancelled" | "call.in-progress.error-warm-transfer-silence-timeout" | "call.in-progress.error-warm-transfer-microphone-timeout" | "assistant-ended-call" | "assistant-said-end-call-phrase" | "assistant-ended-call-with-hangup-task" | "assistant-ended-call-after-message-spoken" | "assistant-forwarded-call" | "assistant-join-timed-out" | "call.in-progress.error-assistant-did-not-receive-customer-audio" | "call.in-progress.error-transfer-failed" | "customer-busy" | "customer-ended-call" | "customer-ended-call-before-warm-transfer" | "customer-ended-call-after-warm-transfer-attempt" | "customer-did-not-answer" | "customer-did-not-give-microphone-permission" | "exceeded-max-duration" | "manually-canceled" | "phone-call-provider-closed-websocket" | "call.forwarding.operator-busy" | "silence-timed-out" | "call.in-progress.error-sip-inbound-call-failed-to-connect" | "call.in-progress.error-providerfault-outbound-sip-403-forbidden" | "call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required" | "call.in-progress.error-providerfault-outbound-sip-503-service-unavailable" | "call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable" | "call.in-progress.error-sip-outbound-call-failed-to-connect" | "call.ringing.hook-executed-say" | "call.ringing.hook-executed-transfer" | "call.ending.hook-executed-say" | "call.ending.hook-executed-transfer" | "call.ringing.sip-inbound-caller-hungup-before-call-connect" | "call.ringing.error-sip-inbound-call-failed-to-connect" | "twilio-failed-to-connect-call" | "twilio-reported-customer-misdialed" | "vonage-rejected" | "voicemail" | "call-deleted"; /** These are the conversation messages of the call. This is only sent if the status is "forwarding". */ messages?: ( | UserMessage | SystemMessage | BotMessage | ToolCallMessage | ToolCallResultMessage )[]; /** These are the conversation messages of the call. This is only sent if the status is "forwarding". */ messagesOpenAIFormatted?: OpenAIMessage[]; /** This is the destination the call is being transferred to. This is only sent if the status is "forwarding". */ destination?: TransferDestinationNumber | TransferDestinationSip; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the transcript of the call. This is only sent if the status is "forwarding". */ transcript?: string; /** This is the summary of the call. This is only sent if the status is "forwarding". */ summary?: string; /** * This is the inbound phone call debugging artifacts. This is only sent if the status is "ended" and there was an error accepting the inbound phone call. * * This will include any errors related to the "assistant-request" if one was made. */ inboundPhoneCallDebuggingArtifacts?: object; } export interface ServerMessageToolCalls { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "tool-calls" is sent to call a tool. */ type?: "tool-calls"; /** This is the list of tools calls that the model is requesting along with the original tool configuration. */ toolWithToolCallList: ( | FunctionToolWithToolCall | GhlToolWithToolCall | MakeToolWithToolCall | BashToolWithToolCall | ComputerToolWithToolCall | TextEditorToolWithToolCall | GoogleCalendarCreateEventToolWithToolCall )[]; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the list of tool calls that the model is requesting. */ toolCallList: ToolCall[]; } export interface ServerMessageTransferDestinationRequest { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "transfer-destination-request" is sent when the model is requesting transfer but destination is unknown. */ type: "transfer-destination-request"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageTransferUpdate { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "transfer-update" is sent whenever a transfer happens. */ type: "transfer-update"; /** This is the destination of the transfer. */ destination?: | TransferDestinationAssistant | TransferDestinationNumber | TransferDestinationSip; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the assistant that the call is being transferred to. This is only sent if `destination.type` is "assistant". */ toAssistant?: CreateAssistantDTO; /** This is the assistant that the call is being transferred from. This is only sent if `destination.type` is "assistant". */ fromAssistant?: CreateAssistantDTO; /** This is the step that the conversation moved to. */ toStepRecord?: object; /** This is the step that the conversation moved from. = */ fromStepRecord?: object; } export interface ServerMessageTranscript { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "transcript" is sent as transcriber outputs partial or final transcript. */ type: "transcript" | "transcript[transcriptType='final']"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the role for which the transcript is for. */ role: "assistant" | "user"; /** This is the type of the transcript. */ transcriptType: "partial" | "final"; /** This is the transcript content. */ transcript: string; /** Indicates if the transcript was filtered for security reasons. */ isFiltered?: boolean; /** List of detected security threats if the transcript was filtered. */ detectedThreats?: string[]; /** The original transcript before filtering (only included if content was filtered). */ originalTranscript?: string; } export interface ServerMessageUserInterrupted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "user-interrupted" is sent when the user interrupts the assistant. */ type: "user-interrupted"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageLanguageChangeDetected { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "language-change-detected" is sent when the transcriber is automatically switched based on the detected language. */ type: "language-change-detected"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the language the transcriber is switched to. */ language: string; } export interface ServerMessageVoiceInput { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "voice-input" is sent when a generation is requested from voice provider. */ type: "voice-input"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the voice input content */ input: string; } export interface ServerMessageVoiceRequest { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** * This is the type of the message. "voice-request" is sent when using `assistant.voice={ "type": "custom-voice" }`. * * Here is what the request will look like: * * POST https://{assistant.voice.server.url} * Content-Type: application/json * * { * "messsage": { * "type": "voice-request", * "text": "Hello, world!", * "sampleRate": 24000, * ...other metadata about the call... * } * } * * The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: * ``` * response.on('data', (chunk: Buffer) => { * outputStream.write(chunk); * }); * ``` */ type: "voice-request"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the text to be synthesized. */ text: string; /** This is the sample rate to be synthesized. */ sampleRate: number; } export interface ServerMessageCallEndpointingRequest { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** * This is the type of the message. "call.endpointing.request" is sent when using `assistant.startSpeakingPlan.smartEndpointingPlan={ "provider": "custom-endpointing-model" }`. * * Here is what the request will look like: * * POST https://{assistant.startSpeakingPlan.smartEndpointingPlan.server.url} * Content-Type: application/json * * { * "message": { * "type": "call.endpointing.request", * "messages": [ * { * "role": "user", * "message": "Hello, how are you?", * "time": 1234567890, * "secondsFromStart": 0 * } * ], * ...other metadata about the call... * } * } * * The expected response: * { * "timeoutSeconds": 0.5 * } */ type: "call.endpointing.request"; /** This is the conversation history at the time of the endpointing request. */ messages?: ( | UserMessage | SystemMessage | BotMessage | ToolCallMessage | ToolCallResultMessage )[]; /** This is just `messages` formatted for OpenAI. */ messagesOpenAIFormatted: OpenAIMessage[]; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageChatCreated { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "chat.created" is sent when a new chat is created. */ type: "chat.created"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat that was created. */ chat: Chat; } export interface ServerMessageChatDeleted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "chat.deleted" is sent when a chat is deleted. */ type: "chat.deleted"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat that was deleted. */ chat: Chat; } export interface ServerMessageSessionCreated { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "session.created" is sent when a new session is created. */ type: "session.created"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the session that was created. */ session: Session; } export interface ServerMessageSessionUpdated { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "session.updated" is sent when a session is updated. */ type: "session.updated"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the session that was updated. */ session: Session; } export interface ServerMessageSessionDeleted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "session.deleted" is sent when a session is deleted. */ type: "session.deleted"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; /** This is the session that was deleted. */ session: Session; } export interface ServerMessageCallDeleted { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "call.deleted" is sent when a call is deleted. */ type: "call.deleted"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessageCallDeleteFailed { /** This is the phone number that the message is associated with. */ phoneNumber?: | CreateByoPhoneNumberDTO | CreateTwilioPhoneNumberDTO | CreateVonagePhoneNumberDTO | CreateVapiPhoneNumberDTO | CreateTelnyxPhoneNumberDTO; /** This is the type of the message. "call.deleted" is sent when a call is deleted. */ type: "call.delete.failed"; /** This is the timestamp of the message. */ timestamp?: number; /** * This is a live version of the `call.artifact`. * * This matches what is stored on `call.artifact` after the call. */ artifact?: Artifact; /** This is the assistant that the message is associated with. */ assistant?: CreateAssistantDTO; /** This is the customer that the message is associated with. */ customer?: CreateCustomerDTO; /** This is the call that the message is associated with. */ call?: Call; /** This is the chat object. */ chat?: Chat; } export interface ServerMessage { /** * These are all the messages that can be sent to your server before, after and during the call. Configure the messages you'd like to receive in `assistant.serverMessages`. * * The server where the message is sent is determined by the following precedence order: * * 1. `tool.server.url` (if configured, and only for "tool-calls" message) * 2. `assistant.serverUrl` (if configure) * 3. `phoneNumber.serverUrl` (if configured) * 4. `org.serverUrl` (if configured) */ message: | ServerMessageAssistantRequest | ServerMessageConversationUpdate | ServerMessageEndOfCallReport | ServerMessageHandoffDestinationRequest | ServerMessageHang | ServerMessageKnowledgeBaseRequest | ServerMessageModelOutput | ServerMessagePhoneCallControl | ServerMessageSpeechUpdate | ServerMessageStatusUpdate | ServerMessageToolCalls | ServerMessageTransferDestinationRequest | ServerMessageTransferUpdate | ServerMessageTranscript | ServerMessageUserInterrupted | ServerMessageLanguageChangeDetected | ServerMessageVoiceInput | ServerMessageVoiceRequest | ServerMessageCallEndpointingRequest | ServerMessageChatCreated | ServerMessageChatDeleted | ServerMessageSessionCreated | ServerMessageSessionUpdated | ServerMessageSessionDeleted | ServerMessageCallDeleted | ServerMessageCallDeleteFailed; } export interface ServerMessageResponseAssistantRequest { /** * This is the destination to transfer the inbound call to. This will immediately transfer without using any assistants. * * If this is sent, `assistantId`, `assistant`, `squadId`, and `squad` are ignored. */ destination?: TransferDestinationNumber | TransferDestinationSip; /** * This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead. * * To start a call with: * - Assistant, use `assistantId` or `assistant` * - Squad, use `squadId` or `squad` * - Workflow, use `workflowId` or `workflow` */ assistantId?: string; /** * This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead. * * To start a call with: * - Assistant, use `assistant` * - Squad, use `squad` * - Workflow, use `workflow` */ assistant?: CreateAssistantDTO; /** These are the overrides for the `assistant` or `assistantId`'s settings and template variables. */ assistantOverrides?: AssistantOverrides; /** * This is the squad that will be used for the call. To use a transient squad, use `squad` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squadId?: string; /** * This is a squad that will be used for the call. To use an existing squad, use `squadId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ squad?: CreateSquadDTO; /** * These are the overrides for the `squad` or `squadId`'s member settings and template variables. * This will apply to all members of the squad. */ squadOverrides?: AssistantOverrides; /** * This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflowId?: string; /** * This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead. * * To start a call with: * - Assistant, use `assistant` or `assistantId` * - Squad, use `squad` or `squadId` * - Workflow, use `workflow` or `workflowId` */ workflow?: CreateWorkflowDTO; /** These are the overrides for the `workflow` or `workflowId`'s settings and template variables. */ workflowOverrides?: WorkflowOverrides; /** * This is the error if the call shouldn't be accepted. This is spoken to the customer. * * If this is sent, `assistantId`, `assistant`, `squadId`, `squad`, and `destination` are ignored. */ error?: string; } export interface ServerMessageResponseHandoffDestinationRequest { /** This is the destination you'd like the call to be transferred to. */ destination: HandoffDestinationAssistant; /** This is the error message if the handoff should not be made. */ error?: string; } export interface KnowledgeBaseResponseDocument { /** This is the content of the document. */ content: string; /** This is the similarity score of the document. */ similarity: number; /** This is the uuid of the document. */ uuid?: string; } export interface ServerMessageResponseKnowledgeBaseRequest { /** This is the list of documents that will be sent to the model alongside the `messages` to generate a response. */ documents?: KnowledgeBaseResponseDocument[]; /** This can be used to skip the model output generation and speak a custom message. */ message?: CustomMessage; } export interface ToolCallResult { /** * This is the message that will be spoken to the user. * * If this is not returned, assistant will speak: * 1. a `request-complete` or `request-failed` message from `tool.messages`, if it exists * 2. a response generated by the model, if not */ message?: ToolMessageComplete | ToolMessageFailed; /** This is the name of the function the model called. */ name: string; /** This is the unique identifier for the tool call. */ toolCallId: string; /** * This is the result if the tool call was successful. This is added to the conversation history. * * Further, if this is returned, assistant will speak: * 1. the `message`, if it exists and is of type `request-complete` * 2. a `request-complete` message from `tool.messages`, if it exists * 3. a response generated by the model, if neither exist */ result?: string; /** * This is the error if the tool call was not successful. This is added to the conversation history. * * Further, if this is returned, assistant will speak: * 1. the `message`, if it exists and is of type `request-failed` * 2. a `request-failed` message from `tool.messages`, if it exists * 3. a response generated by the model, if neither exist */ error?: string; /** This is optional metadata for the tool call result to be sent to the client. */ metadata?: object; } export interface ServerMessageResponseToolCalls { /** These are the results of the "tool-calls" message. */ results?: ToolCallResult[]; /** This is the error message if the tool call was not successful. */ error?: string; } export interface ServerMessageResponseTransferDestinationRequest { /** This is the destination you'd like the call to be transferred to. */ destination?: | TransferDestinationAssistant | TransferDestinationNumber | TransferDestinationSip; /** This is the message that will be spoken to the user as the tool is running. */ message?: | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed; /** This is the error message if the transfer should not be made. */ error?: string; } export interface ServerMessageResponseVoiceRequest { /** * DO NOT respond to a `voice-request` webhook with this schema of { data }. This schema just exists to document what the response should look like. Follow these instructions: * * Here is what the request will look like: * * POST https://{assistant.voice.server.url} * Content-Type: application/json * * { * "messsage": { * "type": "voice-request", * "text": "Hello, world!", * "sampleRate": 24000, * ...other metadata about the call... * } * } * * The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: * ``` * response.on('data', (chunk: Buffer) => { * outputStream.write(chunk); * }); * ``` */ data: string; } export interface ServerMessageResponseCallEndpointingRequest { /** * This is the timeout in seconds to wait before considering the user's speech as finished. * @min 0 * @max 15 */ timeoutSeconds: number; } export interface ServerMessageResponse { /** * This is the response that is expected from the server to the message. * * Note: Most messages don't expect a response. Only "assistant-request", "tool-calls" and "transfer-destination-request" do. */ messageResponse: | ServerMessageResponseAssistantRequest | ServerMessageResponseHandoffDestinationRequest | ServerMessageResponseKnowledgeBaseRequest | ServerMessageResponseToolCalls | ServerMessageResponseTransferDestinationRequest | ServerMessageResponseVoiceRequest | ServerMessageResponseCallEndpointingRequest; } export interface ClientInboundMessageAddMessage { /** This is the type of the message. Send "add-message" message to add a message to the conversation history. */ type: "add-message"; /** This is the message to add to the conversation. */ message: OpenAIMessage; /** * This is the flag to trigger a response, or to insert the message into the conversation history silently. Defaults to `true`. * * Usage: * - Use `true` to trigger a response. * - Use `false` to insert the message into the conversation history silently. * * @default true * @default true */ triggerResponseEnabled?: boolean; } export interface ClientInboundMessageControl { /** * This is the type of the message. Send "control" message to control the assistant. `control` options are: * - "mute-assistant" - mute the assistant * - "unmute-assistant" - unmute the assistant * - "mute-customer" - mute the user * - "unmute-customer" - unmute the user * - "say-first-message" - say the first message (this is used when video recording is enabled and the conversation is only started once the client side kicks off the recording) */ type: "control"; /** This is the control action */ control: | "mute-assistant" | "unmute-assistant" | "mute-customer" | "unmute-customer" | "say-first-message"; } export interface ClientInboundMessageSay { /** This is the type of the message. Send "say" message to make the assistant say something. */ type?: "say"; /** * This is the flag for whether the message should replace existing assistant speech. * * @default false * @default false */ interruptAssistantEnabled?: boolean; /** This is the content to say. */ content?: string; /** This is the flag to end call after content is spoken. */ endCallAfterSpoken?: boolean; /** This is the flag for whether the message is interruptible by the user. */ interruptionsEnabled?: boolean; } export interface ClientInboundMessageEndCall { /** This is the type of the message. Send "end-call" message to end the call. */ type: "end-call"; } export interface ClientInboundMessageTransfer { /** This is the type of the message. Send "transfer" message to transfer the call to a destination. */ type: "transfer"; /** This is the destination to transfer the call to. */ destination?: TransferDestinationNumber | TransferDestinationSip; /** This is the content to say. */ content?: string; } export interface ClientInboundMessage { /** These are the messages that can be sent from client-side SDKs to control the call. */ message: | ClientInboundMessageAddMessage | ClientInboundMessageControl | ClientInboundMessageSay | ClientInboundMessageEndCall | ClientInboundMessageTransfer; } export interface BotMessage { /** The role of the bot in the conversation. */ role: string; /** The message content from the bot. */ message: string; /** The timestamp when the message was sent. */ time: number; /** The timestamp when the message ended. */ endTime: number; /** The number of seconds from the start of the conversation. */ secondsFromStart: number; /** The source of the message. */ source?: string; /** The duration of the message in seconds. */ duration?: number; /** Stable speaker label for diarized user speakers (e.g., "Speaker 1"). */ speakerLabel?: string; } export interface ToolCallMessage { /** The role of the tool call in the conversation. */ role: string; /** The list of tool calls made during the conversation. */ toolCalls: object[]; /** The message content for the tool call. */ message: string; /** The timestamp when the message was sent. */ time: number; /** The number of seconds from the start of the conversation. */ secondsFromStart: number; } export interface ToolCallResultMessage { /** The role of the tool call result in the conversation. */ role: string; /** The ID of the tool call. */ toolCallId: string; /** The name of the tool that returned the result. */ name: string; /** The result of the tool call in JSON format. */ result: string; /** The timestamp when the message was sent. */ time: number; /** The number of seconds from the start of the conversation. */ secondsFromStart: number; /** The metadata for the tool call result. */ metadata?: object; } export interface TransportCost { /** This is the type of cost, always 'transport' for this class. */ type: "transport"; provider?: | "daily" | "vapi.websocket" | "twilio" | "vonage" | "telnyx" | "vapi.sip"; /** This is the minutes of `transport` usage. This should match `call.endedAt` - `call.startedAt`. */ minutes: number; /** This is the cost of the component in USD. */ cost: number; } export interface TranscriberCost { /** This is the type of cost, always 'transcriber' for this class. */ type: "transcriber"; /** * This is the transcriber that was used during the call. * * This matches one of the below: * - `call.assistant.transcriber`, * - `call.assistantId->transcriber`, * - `call.squad[n].assistant.transcriber`, * - `call.squad[n].assistantId->transcriber`, * - `call.squadId->[n].assistant.transcriber`, * - `call.squadId->[n].assistantId->transcriber`. */ transcriber: object; /** This is the minutes of `transcriber` usage. This should match `call.endedAt` - `call.startedAt` for single assistant calls, while squad calls will have multiple transcriber costs one for each assistant that was used. */ minutes: number; /** This is the cost of the component in USD. */ cost: number; } export interface ModelCost { /** This is the type of cost, always 'model' for this class. */ type: "model"; /** * This is the model that was used during the call. * * This matches one of the following: * - `call.assistant.model`, * - `call.assistantId->model`, * - `call.squad[n].assistant.model`, * - `call.squad[n].assistantId->model`, * - `call.squadId->[n].assistant.model`, * - `call.squadId->[n].assistantId->model`. */ model: object; /** This is the number of prompt tokens used in the call. These should be total prompt tokens used in the call for single assistant calls, while squad calls will have multiple model costs one for each assistant that was used. */ promptTokens: number; /** This is the number of completion tokens generated in the call. These should be total completion tokens used in the call for single assistant calls, while squad calls will have multiple model costs one for each assistant that was used. */ completionTokens: number; /** This is the cost of the component in USD. */ cost: number; } export interface VoiceCost { /** This is the type of cost, always 'voice' for this class. */ type: "voice"; /** * This is the voice that was used during the call. * * This matches one of the following: * - `call.assistant.voice`, * - `call.assistantId->voice`, * - `call.squad[n].assistant.voice`, * - `call.squad[n].assistantId->voice`, * - `call.squadId->[n].assistant.voice`, * - `call.squadId->[n].assistantId->voice`. */ voice: object; /** This is the number of characters that were generated during the call. These should be total characters used in the call for single assistant calls, while squad calls will have multiple voice costs one for each assistant that was used. */ characters: number; /** This is the cost of the component in USD. */ cost: number; } export interface VapiCost { /** This is the type of cost, always 'vapi' for this class. */ type: "vapi"; /** This is the sub type of the cost. */ subType: "normal" | "overage"; /** This is the minutes of Vapi usage. This should match `call.endedAt` - `call.startedAt`. */ minutes: number; /** This is the cost of the component in USD. */ cost: number; } export interface AnalysisCost { /** This is the type of cost, always 'analysis' for this class. */ type: "analysis"; /** This is the type of analysis performed. */ analysisType: | "summary" | "structuredData" | "successEvaluation" | "structuredOutput"; /** This is the model that was used to perform the analysis. */ model: object; /** This is the number of prompt tokens used in the analysis. */ promptTokens: number; /** This is the number of completion tokens generated in the analysis. */ completionTokens: number; /** This is the cost of the component in USD. */ cost: number; } export interface VoicemailDetectionCost { /** This is the type of cost, always 'voicemail-detection' for this class. */ type: "voicemail-detection"; /** This is the model that was used to perform the analysis. */ model: object; /** This is the provider that was used to detect the voicemail. */ provider: "twilio" | "google" | "openai" | "vapi"; /** This is the number of prompt text tokens used in the voicemail detection. */ promptTextTokens: number; /** This is the number of prompt audio tokens used in the voicemail detection. */ promptAudioTokens: number; /** This is the number of completion text tokens used in the voicemail detection. */ completionTextTokens: number; /** This is the number of completion audio tokens used in the voicemail detection. */ completionAudioTokens: number; /** This is the cost of the component in USD. */ cost: number; } export interface KnowledgeBaseCost { /** This is the type of cost, always 'knowledge-base' for this class. */ type: "knowledge-base"; /** This is the model that was used for processing the knowledge base. */ model: object; /** This is the number of prompt tokens used in the knowledge base query. */ promptTokens: number; /** This is the number of completion tokens generated in the knowledge base query. */ completionTokens: number; /** This is the cost of the component in USD. */ cost: number; } export interface ChatCost { /** This is the type of cost, always 'chat' for this class. */ type: "chat"; /** This is the cost of the component in USD. */ cost: number; } export interface SessionCost { /** This is the type of cost, always 'session' for this class. */ type: "session"; /** This is the cost of the component in USD. */ cost: number; } export interface FunctionToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "function" for Function tool. */ type: "function"; /** * This determines if the tool is async. * * If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server. * * If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server. * * Defaults to synchronous (`false`). * @example false */ async?: boolean; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; toolCall: ToolCall; /** This is the function definition of the tool. */ function?: OpenAIFunction; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GhlToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "ghl" for GHL tool. */ type: "ghl"; toolCall: ToolCall; metadata: GhlToolMetadata; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface MakeToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "make" for Make tool. */ type: "make"; toolCall: ToolCall; metadata: MakeToolMetadata; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface BashToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "bash" for Bash tool. */ type: "bash"; /** The sub type of tool. */ subType: "bash_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; toolCall: ToolCall; /** * The name of the tool, fixed to 'bash' * @default "bash" */ name: "bash"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface ComputerToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "computer" for Computer tool. */ type: "computer"; /** The sub type of tool. */ subType: "computer_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; toolCall: ToolCall; /** * The name of the tool, fixed to 'computer' * @default "computer" */ name: "computer"; /** The display width in pixels */ displayWidthPx: number; /** The display height in pixels */ displayHeightPx: number; /** Optional display number */ displayNumber?: number; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface TextEditorToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "textEditor" for Text Editor tool. */ type: "textEditor"; /** The sub type of tool. */ subType: "text_editor_20241022"; /** * * This is the server where a `tool-calls` webhook will be sent. * * Notes: * - Webhook is sent to this server when a tool call is made. * - Webhook contains the call, assistant, and phone number objects. * - Webhook contains the variables set on the assistant. * - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}. * - Webhook expects a response with tool call result. */ server?: Server; toolCall: ToolCall; /** * The name of the tool, fixed to 'str_replace_editor' * @default "str_replace_editor" */ name: "str_replace_editor"; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoogleCalendarCreateEventToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "google.calendar.event.create" for Google Calendar Create Event tool. */ type: "google.calendar.event.create"; toolCall: ToolCall; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoogleSheetsRowAppendToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "google.sheets.row.append" for Google Sheets Row Append tool. */ type: "google.sheets.row.append"; toolCall: ToolCall; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoHighLevelCalendarAvailabilityToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.calendar.availability.check" for GoHighLevel Calendar Availability Check tool. */ type: "gohighlevel.calendar.availability.check"; toolCall: ToolCall; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoHighLevelCalendarEventCreateToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.calendar.event.create" for GoHighLevel Calendar Event Create tool. */ type: "gohighlevel.calendar.event.create"; toolCall: ToolCall; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoHighLevelContactCreateToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.contact.create" for GoHighLevel Contact Create tool. */ type: "gohighlevel.contact.create"; toolCall: ToolCall; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export interface GoHighLevelContactGetToolWithToolCall { /** * These are the messages that will be spoken to the user as the tool is running. * * For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured. */ messages?: ( | ToolMessageStart | ToolMessageComplete | ToolMessageFailed | ToolMessageDelayed )[]; /** The type of tool. "gohighlevel.contact.get" for GoHighLevel Contact Get tool. */ type: "gohighlevel.contact.get"; toolCall: ToolCall; /** * This is the plan to reject a tool call based on the conversation state. * * // Example 1: Reject endCall if user didn't say goodbye * ```json * { * conditions: [{ * type: 'regex', * regex: '(?i)\\b(bye|goodbye|farewell|see you later|take care)\\b', * target: { position: -1, role: 'user' }, * negate: true // Reject if pattern does NOT match * }] * } * ``` * * // Example 2: Reject transfer if user is actually asking a question * ```json * { * conditions: [{ * type: 'regex', * regex: '\\?', * target: { position: -1, role: 'user' } * }] * } * ``` * * // Example 3: Reject transfer if user didn't mention transfer recently * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 5 %} * {% assign userMessages = recentMessages | where: 'role', 'user' %} * {% assign mentioned = false %} * {% for msg in userMessages %} * {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %} * {% assign mentioned = true %} * {% break %} * {% endif %} * {% endfor %} * {% if mentioned %} * false * {% else %} * true * {% endif %}` * }] * } * ``` * * // Example 4: Reject endCall if the bot is looping and trying to exit * ```json * { * conditions: [{ * type: 'liquid', * liquid: `{% assign recentMessages = messages | last: 6 %} * {% assign userMessages = recentMessages | where: 'role', 'user' | reverse %} * {% if userMessages.size < 3 %} * false * {% else %} * {% assign msg1 = userMessages[0].content | downcase %} * {% assign msg2 = userMessages[1].content | downcase %} * {% assign msg3 = userMessages[2].content | downcase %} * {% comment %} Check for repetitive messages {% endcomment %} * {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %} * true * {% comment %} Check for common loop phrases {% endcomment %} * {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %} * true * {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %} * true * {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %} * true * {% else %} * false * {% endif %} * {% endif %}` * }] * } * ``` */ rejectionPlan?: ToolRejectionPlan; } export type QueryParamsType = Record; export type ResponseFormat = keyof Omit; export interface FullRequestParams extends Omit { /** set parameter to `true` for call `securityWorker` for this request */ secure?: boolean; /** request path */ path: string; /** content type of request body */ type?: ContentType; /** query params */ query?: QueryParamsType; /** format of response (i.e. response.json() -> format: "json") */ format?: ResponseFormat; /** request body */ body?: unknown; /** base url */ baseUrl?: string; /** request cancellation token */ cancelToken?: CancelToken; } export type RequestParams = Omit< FullRequestParams, "body" | "method" | "query" | "path" >; export interface ApiConfig { baseUrl?: string; baseApiParams?: Omit; securityWorker?: ( securityData: SecurityDataType | null, ) => Promise | RequestParams | void; customFetch?: typeof fetch; } export interface HttpResponse extends Response { data: D; error: E; } type CancelToken = Symbol | string | number; export enum ContentType { Json = "application/json", JsonApi = "application/vnd.api+json", FormData = "multipart/form-data", UrlEncoded = "application/x-www-form-urlencoded", Text = "text/plain", } export class HttpClient { public baseUrl: string = "https://api.vapi.ai"; private securityData: SecurityDataType | null = null; private securityWorker?: ApiConfig["securityWorker"]; private abortControllers = new Map(); private customFetch = (...fetchParams: Parameters) => fetch(...fetchParams); private baseApiParams: RequestParams = { credentials: "same-origin", headers: {}, redirect: "follow", referrerPolicy: "no-referrer", }; constructor(apiConfig: ApiConfig = {}) { Object.assign(this, apiConfig); } public setSecurityData = (data: SecurityDataType | null) => { this.securityData = data; }; protected encodeQueryParam(key: string, value: any) { const encodedKey = encodeURIComponent(key); return `${encodedKey}=${encodeURIComponent(typeof value === "number" ? value : `${value}`)}`; } protected addQueryParam(query: QueryParamsType, key: string) { return this.encodeQueryParam(key, query[key]); } protected addArrayQueryParam(query: QueryParamsType, key: string) { const value = query[key]; return value.map((v: any) => this.encodeQueryParam(key, v)).join("&"); } protected toQueryString(rawQuery?: QueryParamsType): string { const query = rawQuery || {}; const keys = Object.keys(query).filter( (key) => "undefined" !== typeof query[key], ); return keys .map((key) => Array.isArray(query[key]) ? this.addArrayQueryParam(query, key) : this.addQueryParam(query, key), ) .join("&"); } protected addQueryParams(rawQuery?: QueryParamsType): string { const queryString = this.toQueryString(rawQuery); return queryString ? `?${queryString}` : ""; } private contentFormatters: Record any> = { [ContentType.Json]: (input: any) => input !== null && (typeof input === "object" || typeof input === "string") ? JSON.stringify(input) : input, [ContentType.JsonApi]: (input: any) => input !== null && (typeof input === "object" || typeof input === "string") ? JSON.stringify(input) : input, [ContentType.Text]: (input: any) => input !== null && typeof input !== "string" ? JSON.stringify(input) : input, [ContentType.FormData]: (input: any) => { if (input instanceof FormData) { return input; } return Object.keys(input || {}).reduce((formData, key) => { const property = input[key]; formData.append( key, property instanceof Blob ? property : typeof property === "object" && property !== null ? JSON.stringify(property) : `${property}`, ); return formData; }, new FormData()); }, [ContentType.UrlEncoded]: (input: any) => this.toQueryString(input), }; protected mergeRequestParams( params1: RequestParams, params2?: RequestParams, ): RequestParams { return { ...this.baseApiParams, ...params1, ...(params2 || {}), headers: { ...(this.baseApiParams.headers || {}), ...(params1.headers || {}), ...((params2 && params2.headers) || {}), }, }; } protected createAbortSignal = ( cancelToken: CancelToken, ): AbortSignal | undefined => { if (this.abortControllers.has(cancelToken)) { const abortController = this.abortControllers.get(cancelToken); if (abortController) { return abortController.signal; } return void 0; } const abortController = new AbortController(); this.abortControllers.set(cancelToken, abortController); return abortController.signal; }; public abortRequest = (cancelToken: CancelToken) => { const abortController = this.abortControllers.get(cancelToken); if (abortController) { abortController.abort(); this.abortControllers.delete(cancelToken); } }; public request = async ({ body, secure, path, type, query, format, baseUrl, cancelToken, ...params }: FullRequestParams): Promise> => { const secureParams = ((typeof secure === "boolean" ? secure : this.baseApiParams.secure) && this.securityWorker && (await this.securityWorker(this.securityData))) || {}; const requestParams = this.mergeRequestParams(params, secureParams); const queryString = query && this.toQueryString(query); const payloadFormatter = this.contentFormatters[type || ContentType.Json]; const responseFormat = format || requestParams.format; return this.customFetch( `${baseUrl || this.baseUrl || ""}${path}${queryString ? `?${queryString}` : ""}`, { ...requestParams, headers: { ...(requestParams.headers || {}), ...(type && type !== ContentType.FormData ? { "Content-Type": type } : {}), }, signal: (cancelToken ? this.createAbortSignal(cancelToken) : requestParams.signal) || null, body: typeof body === "undefined" || body === null ? null : payloadFormatter(body), }, ).then(async (response) => { const r = response as HttpResponse; r.data = null as unknown as T; r.error = null as unknown as E; const responseToParse = responseFormat ? response.clone() : response; const data = !responseFormat ? r : await responseToParse[responseFormat]() .then((data) => { if (r.ok) { r.data = data; } else { r.error = data; } return r; }) .catch((e) => { r.error = e; return r; }); if (cancelToken) { this.abortControllers.delete(cancelToken); } if (!response.ok) throw data; return data; }); }; } /** * @title Vapi API * @version 1.0 * @baseUrl https://api.vapi.ai * @contact * * Voice AI for developers. */ export class Api< SecurityDataType extends unknown, > extends HttpClient { assistant = { /** * No description * * @tags Assistants * @name AssistantControllerCreate * @summary Create Assistant * @request POST:/assistant * @secure */ assistantControllerCreate: ( data: CreateAssistantDTO, params: RequestParams = {}, ) => this.request({ path: `/assistant`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Assistants * @name AssistantControllerFindAll * @summary List Assistants * @request GET:/assistant * @secure */ assistantControllerFindAll: ( query?: { /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/assistant`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Assistants * @name AssistantControllerFindOne * @summary Get Assistant * @request GET:/assistant/{id} * @secure */ assistantControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/assistant/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Assistants * @name AssistantControllerUpdate * @summary Update Assistant * @request PATCH:/assistant/{id} * @secure */ assistantControllerUpdate: ( id: string, data: UpdateAssistantDTO, params: RequestParams = {}, ) => this.request({ path: `/assistant/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Assistants * @name AssistantControllerReplace * @summary Replace Assistant * @request PUT:/assistant/{id} * @secure */ assistantControllerReplace: ( id: string, data: UpdateAssistantDTO, params: RequestParams = {}, ) => this.request({ path: `/assistant/${id}`, method: "PUT", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Assistants * @name AssistantControllerRemove * @summary Delete Assistant * @request DELETE:/assistant/{id} * @secure */ assistantControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/assistant/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Assistants * @name AssistantControllerFindVersions * @summary List Assistant Versions * @request GET:/assistant/{id}/version * @secure */ assistantControllerFindVersions: ( id: string, query?: { page?: number; limit?: number; pageState?: string; }, params: RequestParams = {}, ) => this.request({ path: `/assistant/${id}/version`, method: "GET", query: query, secure: true, format: "json", ...params, }), }; v2 = { /** * No description * * @tags Assistants * @name AssistantControllerFindAllPaginated * @summary List Assistants with pagination * @request GET:/v2/assistant * @secure */ assistantControllerFindAllPaginated: ( query?: { /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/v2/assistant`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Calls * @name CallControllerCallsExport * @summary Export Calls to CSV * @request GET:/v2/call/export * @secure */ callControllerCallsExport: ( query?: { /** * Filter by assistant overrides. Use variableValues to filter by template variables. * @example {"variableValues":{"name":"John","age":"25"}} */ assistantOverrides?: object; /** * Filter by customer properties. Supports filtering by number, name, externalId, and extension. * @example {"number":"+1234567890","name":"John Doe"} */ customer?: object; /** * Columns to include in the CSV export * @default ["id","assistantId","squadId","customerId","customerName","customerNumber","customerSipUri","customerExtension","phoneNumberId","endedReason","type","duration","startedAt","endedAt","transcript","summary","successEvaluation","recordingUrl","cost","phoneCallProvider","phoneCallProviderId","createdAt","updatedAt"] */ columns?: | "id" | "assistantId" | "squadId" | "customerId" | "customerName" | "customerNumber" | "customerSipUri" | "customerExtension" | "phoneNumberId" | "endedReason" | "type" | "duration" | "startedAt" | "endedAt" | "transcript" | "summary" | "successEvaluation" | "recordingUrl" | "cost" | "phoneCallProvider" | "phoneCallProviderId" | "createdAt" | "updatedAt"; /** * This determines if the CSV export is async. * * If async, the API will enqueue a background job to export the calls and return a job ID in the response. * * If sync, the API will export the calls and return the CSV file in the response. * * Defaults to async (`true`). * @default true * @example true */ async?: boolean; /** * This determines the export format. * * If 'csv', exports calls as CSV with selected columns. * If 'json', exports the full Call objects as a JSON array. * * Defaults to 'csv'. * @default "csv" * @example "csv" */ format?: "csv" | "json"; /** * This is the email to send the export to, if async is true and API Private Key is used to make the request. * If JWT is used, or request made from the dashboard, the email will be the user's email. * @maxLength 1000 */ email?: string; /** This will return calls with the specified assistantId. */ assistantId?: string; /** * This will return calls where the transient assistant name exactly matches the specified value (case-insensitive). * @maxLength 40 */ assistantName?: string; /** This will return calls with the specified squadId. */ squadId?: string; /** This will return calls where the transient squad name exactly matches the specified value (case-insensitive). */ squadName?: string; /** This will return calls with the specified callId. */ id?: string; /** This will return calls with the specified callIds. */ idAny?: string[]; /** This will return calls where the cost is less than or equal to the specified value. */ costLe?: number; /** This will return calls where the cost is greater than or equal to the specified value. */ costGe?: number; /** This will return calls with the exact specified cost. */ cost?: number; /** * This will return calls with the specified successEvaluation. * @maxLength 1000 */ successEvaluation?: string; /** * This will return calls with the specified endedReason. * @maxLength 1000 */ endedReason?: string; /** This will return calls with the specified phoneNumberId. */ phoneNumberId?: string; /** * Filter calls by structured output values. Use structured output ID as key and filter operators as values. * @example {"c9dddda4-d70a-4dad-aa5c-aaf117f85cea":{"eq":"2","gt":"1"}} */ structuredOutputs?: Record; /** * Filter calls by the first scorecard's normalized score. * @example {"gte":80,"lt":100} */ score?: StructuredOutputFilterDTO; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/v2/call/export`, method: "GET", query: query, secure: true, ...params, }), /** * No description * * @tags Calls * @name CallControllerFindAllPaginated * @summary List Calls * @request GET:/v2/call * @secure */ callControllerFindAllPaginated: ( query?: { /** * Filter by assistant overrides. Use variableValues to filter by template variables. * @example {"variableValues":{"name":"John","age":"25"}} */ assistantOverrides?: object; /** * Filter by customer properties. Supports filtering by number, name, externalId, and extension. * @example {"number":"+1234567890","name":"John Doe"} */ customer?: object; /** This will return calls with the specified assistantId. */ assistantId?: string; /** * This will return calls where the transient assistant name exactly matches the specified value (case-insensitive). * @maxLength 40 */ assistantName?: string; /** This will return calls with the specified squadId. */ squadId?: string; /** This will return calls where the transient squad name exactly matches the specified value (case-insensitive). */ squadName?: string; /** This will return calls with the specified callId. */ id?: string; /** This will return calls with the specified callIds. */ idAny?: string[]; /** This will return calls where the cost is less than or equal to the specified value. */ costLe?: number; /** This will return calls where the cost is greater than or equal to the specified value. */ costGe?: number; /** This will return calls with the exact specified cost. */ cost?: number; /** * This will return calls with the specified successEvaluation. * @maxLength 1000 */ successEvaluation?: string; /** * This will return calls with the specified endedReason. * @maxLength 1000 */ endedReason?: string; /** This will return calls with the specified phoneNumberId. */ phoneNumberId?: string; /** * Filter calls by structured output values. Use structured output ID as key and filter operators as values. * @example {"c9dddda4-d70a-4dad-aa5c-aaf117f85cea":{"eq":"2","gt":"1"}} */ structuredOutputs?: Record; /** * Filter calls by the first scorecard's normalized score. * @example {"gte":80,"lt":100} */ score?: StructuredOutputFilterDTO; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/v2/call`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Calls * @name CallControllerFindAllMetadataPaginated * @summary List Call Metadata * @request GET:/v2/call/metadata * @secure */ callControllerFindAllMetadataPaginated: ( query?: { /** * Filter by assistant overrides. Use variableValues to filter by template variables. * @example {"variableValues":{"name":"John","age":"25"}} */ assistantOverrides?: object; /** * Filter by customer properties. Supports filtering by number, name, externalId, and extension. * @example {"number":"+1234567890","name":"John Doe"} */ customer?: object; /** This will return calls with the specified assistantId. */ assistantId?: string; /** * This will return calls where the transient assistant name exactly matches the specified value (case-insensitive). * @maxLength 40 */ assistantName?: string; /** This will return calls with the specified squadId. */ squadId?: string; /** This will return calls where the transient squad name exactly matches the specified value (case-insensitive). */ squadName?: string; /** This will return calls with the specified callId. */ id?: string; /** This will return calls with the specified callIds. */ idAny?: string[]; /** This will return calls where the cost is less than or equal to the specified value. */ costLe?: number; /** This will return calls where the cost is greater than or equal to the specified value. */ costGe?: number; /** This will return calls with the exact specified cost. */ cost?: number; /** * This will return calls with the specified successEvaluation. * @maxLength 1000 */ successEvaluation?: string; /** * This will return calls with the specified endedReason. * @maxLength 1000 */ endedReason?: string; /** This will return calls with the specified phoneNumberId. */ phoneNumberId?: string; /** * Filter calls by structured output values. Use structured output ID as key and filter operators as values. * @example {"c9dddda4-d70a-4dad-aa5c-aaf117f85cea":{"eq":"2","gt":"1"}} */ structuredOutputs?: Record; /** * Filter calls by the first scorecard's normalized score. * @example {"gte":80,"lt":100} */ score?: StructuredOutputFilterDTO; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/v2/call/metadata`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Phone Numbers * @name PhoneNumberControllerFindAllPaginated * @summary List Phone Numbers * @request GET:/v2/phone-number * @secure */ phoneNumberControllerFindAllPaginated: ( query?: { /** * This will search phone numbers by name, number, or SIP URI (partial match, case-insensitive). * @maxLength 100 */ search?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/v2/phone-number`, method: "GET", query: query, secure: true, format: "json", ...params, }), }; squad = { /** * No description * * @tags Squads * @name SquadControllerCreate * @summary Create Squad * @request POST:/squad * @secure */ squadControllerCreate: (data: CreateSquadDTO, params: RequestParams = {}) => this.request({ path: `/squad`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Squads * @name SquadControllerFindAll * @summary List Squads * @request GET:/squad * @secure */ squadControllerFindAll: ( query?: { /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/squad`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Squads * @name SquadControllerFindOne * @summary Get Squad * @request GET:/squad/{id} * @secure */ squadControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/squad/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Squads * @name SquadControllerUpdate * @summary Update Squad * @request PATCH:/squad/{id} * @secure */ squadControllerUpdate: ( id: string, data: UpdateSquadDTO, params: RequestParams = {}, ) => this.request({ path: `/squad/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Squads * @name SquadControllerRemove * @summary Delete Squad * @request DELETE:/squad/{id} * @secure */ squadControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/squad/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; workflow = { /** * No description * * @tags Workflow * @name WorkflowControllerFindAll * @summary Get Workflows * @request GET:/workflow * @secure */ workflowControllerFindAll: (params: RequestParams = {}) => this.request({ path: `/workflow`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Workflow * @name WorkflowControllerCreate * @summary Create Workflow * @request POST:/workflow * @secure */ workflowControllerCreate: ( data: CreateWorkflowDTO, params: RequestParams = {}, ) => this.request({ path: `/workflow`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Workflow * @name WorkflowControllerFindOne * @summary Get Workflow * @request GET:/workflow/{id} * @secure */ workflowControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/workflow/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Workflow * @name WorkflowControllerDelete * @summary Delete Workflow * @request DELETE:/workflow/{id} * @secure */ workflowControllerDelete: (id: string, params: RequestParams = {}) => this.request({ path: `/workflow/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Workflow * @name WorkflowControllerUpdate * @summary Update Workflow * @request PATCH:/workflow/{id} * @secure */ workflowControllerUpdate: ( id: string, data: UpdateWorkflowDTO, params: RequestParams = {}, ) => this.request({ path: `/workflow/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), }; call = { /** * No description * * @tags Calls * @name CallControllerCreate * @summary Create Call * @request POST:/call * @secure */ callControllerCreate: (data: CreateCallDTO, params: RequestParams = {}) => this.request({ path: `/call`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Calls * @name CallControllerFindAll * @summary List Calls * @request GET:/call * @secure */ callControllerFindAll: ( query?: { /** This is the unique identifier for the call. */ id?: string; /** This will return calls with the specified assistantId. */ assistantId?: string; /** * This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead. * * Only relevant for `outboundPhoneCall` and `inboundPhoneCall` type. */ phoneNumberId?: string; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/call`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Calls * @name CallControllerFindOne * @summary Get Call * @request GET:/call/{id} * @secure */ callControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/call/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Calls * @name CallControllerUpdate * @summary Update Call * @request PATCH:/call/{id} * @secure */ callControllerUpdate: ( id: string, data: UpdateCallDTO, params: RequestParams = {}, ) => this.request({ path: `/call/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Calls * @name CallControllerDeleteCallData * @summary Delete Call Data * @request DELETE:/call/{id} * @secure */ callControllerDeleteCallData: ( id: string, data: DeleteCallDTO, params: RequestParams = {}, ) => this.request({ path: `/call/${id}`, method: "DELETE", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Calls * @name CallControllerCreatePhoneCall * @summary Create Phone Call * @request POST:/call/phone * @deprecated * @secure */ callControllerCreatePhoneCall: ( data: CreateOutboundCallDTO, params: RequestParams = {}, ) => this.request({ path: `/call/phone`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Calls * @name CallControllerCreateWebCall * @summary Create Web Call * @request POST:/call/web * @secure */ callControllerCreateWebCall: ( data: CreateWebCallDTO, params: RequestParams = {}, ) => this.request({ path: `/call/web`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), }; chat = { /** * No description * * @tags Chats * @name ChatControllerListChats * @summary List Chats * @request GET:/chat * @secure */ chatControllerListChats: ( query?: { /** This is the unique identifier for the assistant that will be used for the chat. */ assistantId?: string; /** This is the unique identifier for the squad that will be used for the chat. */ squadId?: string; /** This is the unique identifier for the session that will be used for the chat. */ sessionId?: string; /** This is the unique identifier for the previous chat to filter by. */ previousChatId?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/chat`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * @description Creates a new chat with optional SMS delivery via transport field. Requires at least one of: assistantId/assistant, sessionId, or previousChatId. Note: sessionId and previousChatId are mutually exclusive. Transport field enables SMS delivery with two modes: (1) New conversation - provide transport.phoneNumberId and transport.customer to create a new session, (2) Existing conversation - provide sessionId to use existing session data. Cannot specify both sessionId and transport fields together. The transport.useLLMGeneratedMessageForOutbound flag controls whether input is processed by LLM (true, default) or forwarded directly as SMS (false). * * @tags Chats * @name ChatControllerCreateChat * @summary Create Chat * @request POST:/chat * @secure */ chatControllerCreateChat: ( data: CreateChatDTO, params: RequestParams = {}, ) => this.request({ path: `/chat`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Chats * @name ChatControllerGetChat * @summary Get Chat * @request GET:/chat/{id} * @secure */ chatControllerGetChat: (id: string, params: RequestParams = {}) => this.request({ path: `/chat/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Chats * @name ChatControllerDeleteChat * @summary Delete Chat * @request DELETE:/chat/{id} * @secure */ chatControllerDeleteChat: (id: string, params: RequestParams = {}) => this.request({ path: `/chat/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Chats * @name ChatControllerCreateOpenAiChat * @summary Create Chat (OpenAI Compatible) * @request POST:/chat/responses * @secure */ chatControllerCreateOpenAiChat: ( data: OpenAIResponsesRequest, params: RequestParams = {}, ) => this.request< | ResponseObject | ResponseTextDeltaEvent | ResponseTextDoneEvent | ResponseCompletedEvent | ResponseErrorEvent, any >({ path: `/chat/responses`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Chats * @name ChatControllerCreateWebChat * @summary Create WebChat * @request POST:/chat/web * @secure */ chatControllerCreateWebChat: ( data: CreateWebChatDTO, params: RequestParams = {}, ) => this.request({ path: `/chat/web`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Chats * @name ChatControllerCreateOpenAiWebChat * @summary Create WebChat (OpenAI Compatible) * @request POST:/chat/web/responses * @secure */ chatControllerCreateOpenAiWebChat: ( data: OpenAIWebChatRequest, params: RequestParams = {}, ) => this.request< | ResponseObject | ResponseTextDeltaEvent | ResponseTextDoneEvent | ResponseCompletedEvent | ResponseErrorEvent, any >({ path: `/chat/web/responses`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), }; campaign = { /** * No description * * @tags Campaigns * @name CampaignControllerCreate * @summary Create Campaign * @request POST:/campaign * @secure */ campaignControllerCreate: ( data: CreateCampaignDTO, params: RequestParams = {}, ) => this.request({ path: `/campaign`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Campaigns * @name CampaignControllerFindAll * @summary List Campaigns * @request GET:/campaign * @secure */ campaignControllerFindAll: ( query?: { id?: string; status?: "scheduled" | "in-progress" | "ended"; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/campaign`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Campaigns * @name CampaignControllerFindOne * @summary Get Campaign * @request GET:/campaign/{id} * @secure */ campaignControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/campaign/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Campaigns * @name CampaignControllerUpdate * @summary Update Campaign * @request PATCH:/campaign/{id} * @secure */ campaignControllerUpdate: ( id: string, data: UpdateCampaignDTO, params: RequestParams = {}, ) => this.request({ path: `/campaign/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Campaigns * @name CampaignControllerRemove * @summary Delete Campaign * @request DELETE:/campaign/{id} * @secure */ campaignControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/campaign/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; session = { /** * No description * * @tags Sessions * @name SessionControllerCreate * @summary Create Session * @request POST:/session * @secure */ sessionControllerCreate: ( data: CreateSessionDTO, params: RequestParams = {}, ) => this.request({ path: `/session`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Sessions * @name SessionControllerFindAllPaginated * @summary List Sessions * @request GET:/session * @secure */ sessionControllerFindAllPaginated: ( query?: { /** * This is the name of the customer. This is just for your own reference. * * For SIP inbound calls, this is extracted from the `From` SIP header with format `"Display Name" `. * @maxLength 40 */ name?: string; /** This is the ID of the assistant to filter sessions by. */ assistantId?: string; /** This is the ID of the squad to filter sessions by. */ squadId?: string; /** This is the ID of the workflow to filter sessions by. */ workflowId?: string; /** * This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it. * * Use cases: * - `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks. * - `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls. * * If `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\+?[a-zA-Z0-9]+$/`). * * @default true (E164 check is enabled) * @default true */ numberE164CheckEnabled?: boolean; /** * This is the extension that will be dialed after the call is answered. * @maxLength 10 * @example null */ extension?: string; /** * These are the overrides for the assistant's settings and template variables specific to this customer. * This allows customization of the assistant's behavior for individual customers in batch calls. */ assistantOverrides?: AssistantOverrides; /** * This is the number of the customer. * @minLength 3 * @maxLength 40 */ number?: string; /** This is the SIP URI of the customer. */ sipUri?: string; /** * This is the email of the customer. * @maxLength 40 */ email?: string; /** * This is the external ID of the customer. * @maxLength 40 */ externalId?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/session`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Sessions * @name SessionControllerFindOne * @summary Get Session * @request GET:/session/{id} * @secure */ sessionControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/session/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Sessions * @name SessionControllerUpdate * @summary Update Session * @request PATCH:/session/{id} * @secure */ sessionControllerUpdate: ( id: string, data: UpdateSessionDTO, params: RequestParams = {}, ) => this.request({ path: `/session/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Sessions * @name SessionControllerRemove * @summary Delete Session * @request DELETE:/session/{id} * @secure */ sessionControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/session/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; phoneNumber = { /** * @description Use POST /phone-number instead. * * @tags Phone Numbers * @name PhoneNumberControllerImportTwilio * @summary Import Twilio Number * @request POST:/phone-number/import/twilio * @deprecated * @secure */ phoneNumberControllerImportTwilio: ( data: ImportTwilioPhoneNumberDTO, params: RequestParams = {}, ) => this.request< | ({ provider: "byo-phone-number"; } & ByoPhoneNumber) | ({ provider: "twilio"; } & TwilioPhoneNumber) | ({ provider: "vonage"; } & VonagePhoneNumber) | ({ provider: "vapi"; } & VapiPhoneNumber) | ({ provider: "telnyx"; } & TelnyxPhoneNumber), any >({ path: `/phone-number/import/twilio`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * @description Use POST /phone-number instead. * * @tags Phone Numbers * @name PhoneNumberControllerImportVonage * @summary Import Vonage Number * @request POST:/phone-number/import/vonage * @deprecated * @secure */ phoneNumberControllerImportVonage: ( data: ImportVonagePhoneNumberDTO, params: RequestParams = {}, ) => this.request< | ({ provider: "byo-phone-number"; } & ByoPhoneNumber) | ({ provider: "twilio"; } & TwilioPhoneNumber) | ({ provider: "vonage"; } & VonagePhoneNumber) | ({ provider: "vapi"; } & VapiPhoneNumber) | ({ provider: "telnyx"; } & TelnyxPhoneNumber), any >({ path: `/phone-number/import/vonage`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Phone Numbers * @name PhoneNumberControllerCreate * @summary Create Phone Number * @request POST:/phone-number * @secure */ phoneNumberControllerCreate: ( data: | ({ provider: "byo-phone-number"; } & CreateByoPhoneNumberDTO) | ({ provider: "twilio"; } & CreateTwilioPhoneNumberDTO) | ({ provider: "vonage"; } & CreateVonagePhoneNumberDTO) | ({ provider: "vapi"; } & CreateVapiPhoneNumberDTO) | ({ provider: "telnyx"; } & CreateTelnyxPhoneNumberDTO), params: RequestParams = {}, ) => this.request< | ({ provider: "byo-phone-number"; } & ByoPhoneNumber) | ({ provider: "twilio"; } & TwilioPhoneNumber) | ({ provider: "vonage"; } & VonagePhoneNumber) | ({ provider: "vapi"; } & VapiPhoneNumber) | ({ provider: "telnyx"; } & TelnyxPhoneNumber), any >({ path: `/phone-number`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Phone Numbers * @name PhoneNumberControllerFindAll * @summary List Phone Numbers * @request GET:/phone-number * @secure */ phoneNumberControllerFindAll: ( query?: { /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request< ( | ({ provider: "byo-phone-number"; } & ByoPhoneNumber) | ({ provider: "twilio"; } & TwilioPhoneNumber) | ({ provider: "vonage"; } & VonagePhoneNumber) | ({ provider: "vapi"; } & VapiPhoneNumber) | ({ provider: "telnyx"; } & TelnyxPhoneNumber) )[], any >({ path: `/phone-number`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Phone Numbers * @name PhoneNumberControllerFindOne * @summary Get Phone Number * @request GET:/phone-number/{id} * @secure */ phoneNumberControllerFindOne: (id: string, params: RequestParams = {}) => this.request< | ({ provider: "byo-phone-number"; } & ByoPhoneNumber) | ({ provider: "twilio"; } & TwilioPhoneNumber) | ({ provider: "vonage"; } & VonagePhoneNumber) | ({ provider: "vapi"; } & VapiPhoneNumber) | ({ provider: "telnyx"; } & TelnyxPhoneNumber), any >({ path: `/phone-number/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Phone Numbers * @name PhoneNumberControllerUpdate * @summary Update Phone Number * @request PATCH:/phone-number/{id} * @secure */ phoneNumberControllerUpdate: ( id: string, data: | ({ provider: "byo-phone-number"; } & UpdateByoPhoneNumberDTO) | ({ provider: "twilio"; } & UpdateTwilioPhoneNumberDTO) | ({ provider: "vonage"; } & UpdateVonagePhoneNumberDTO) | ({ provider: "vapi"; } & UpdateVapiPhoneNumberDTO) | ({ provider: "telnyx"; } & UpdateTelnyxPhoneNumberDTO), params: RequestParams = {}, ) => this.request< | ({ provider: "byo-phone-number"; } & ByoPhoneNumber) | ({ provider: "twilio"; } & TwilioPhoneNumber) | ({ provider: "vonage"; } & VonagePhoneNumber) | ({ provider: "vapi"; } & VapiPhoneNumber) | ({ provider: "telnyx"; } & TelnyxPhoneNumber), any >({ path: `/phone-number/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Phone Numbers * @name PhoneNumberControllerRemove * @summary Delete Phone Number * @request DELETE:/phone-number/{id} * @secure */ phoneNumberControllerRemove: (id: string, params: RequestParams = {}) => this.request< | ({ provider: "byo-phone-number"; } & ByoPhoneNumber) | ({ provider: "twilio"; } & TwilioPhoneNumber) | ({ provider: "vonage"; } & VonagePhoneNumber) | ({ provider: "vapi"; } & VapiPhoneNumber) | ({ provider: "telnyx"; } & TelnyxPhoneNumber), any >({ path: `/phone-number/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; tool = { /** * No description * * @tags Tools * @name ToolControllerCreate * @summary Create Tool * @request POST:/tool * @secure */ toolControllerCreate: ( data: | ({ type: "apiRequest"; } & CreateApiRequestToolDTO) | ({ type: "dtmf"; } & CreateDtmfToolDTO) | ({ type: "endCall"; } & CreateEndCallToolDTO) | ({ type: "function"; } & CreateFunctionToolDTO) | ({ type: "transferCall"; } & CreateTransferCallToolDTO) | ({ type: "handoff"; } & CreateHandoffToolDTO) | ({ type: "bash"; } & CreateBashToolDTO) | ({ type: "computer"; } & CreateComputerToolDTO) | ({ type: "textEditor"; } & CreateTextEditorToolDTO) | ({ type: "query"; } & CreateQueryToolDTO) | ({ type: "google.calendar.event.create"; } & CreateGoogleCalendarCreateEventToolDTO) | ({ type: "google.sheets.row.append"; } & CreateGoogleSheetsRowAppendToolDTO) | ({ type: "google.calendar.availability.check"; } & CreateGoogleCalendarCheckAvailabilityToolDTO) | ({ type: "slack.message.send"; } & CreateSlackSendMessageToolDTO) | ({ type: "sms"; } & CreateSmsToolDTO) | ({ type: "mcp"; } & CreateMcpToolDTO) | ({ type: "gohighlevel.calendar.availability.check"; } & CreateGoHighLevelCalendarAvailabilityToolDTO) | ({ type: "gohighlevel.calendar.event.create"; } & CreateGoHighLevelCalendarEventCreateToolDTO) | ({ type: "gohighlevel.contact.create"; } & CreateGoHighLevelContactCreateToolDTO) | ({ type: "gohighlevel.contact.get"; } & CreateGoHighLevelContactGetToolDTO), params: RequestParams = {}, ) => this.request< | ({ type: "apiRequest"; } & ApiRequestTool) | ({ type: "dtmf"; } & DtmfTool) | ({ type: "endCall"; } & EndCallTool) | ({ type: "function"; } & FunctionTool) | ({ type: "transferCall"; } & TransferCallTool) | ({ type: "handoff"; } & HandoffTool) | ({ type: "bash"; } & BashTool) | ({ type: "computer"; } & ComputerTool) | ({ type: "textEditor"; } & TextEditorTool) | ({ type: "query"; } & QueryTool) | ({ type: "google.calendar.event.create"; } & GoogleCalendarCreateEventTool) | ({ type: "google.sheets.row.append"; } & GoogleSheetsRowAppendTool) | ({ type: "google.calendar.availability.check"; } & GoogleCalendarCheckAvailabilityTool) | ({ type: "slack.message.send"; } & SlackSendMessageTool) | ({ type: "sms"; } & SmsTool) | ({ type: "mcp"; } & McpTool) | ({ type: "gohighlevel.calendar.availability.check"; } & GoHighLevelCalendarAvailabilityTool) | ({ type: "gohighlevel.calendar.event.create"; } & GoHighLevelCalendarEventCreateTool) | ({ type: "gohighlevel.contact.create"; } & GoHighLevelContactCreateTool) | ({ type: "gohighlevel.contact.get"; } & GoHighLevelContactGetTool), any >({ path: `/tool`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Tools * @name ToolControllerFindAll * @summary List Tools * @request GET:/tool * @secure */ toolControllerFindAll: ( query?: { /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request< ( | ({ type: "apiRequest"; } & ApiRequestTool) | ({ type: "dtmf"; } & DtmfTool) | ({ type: "endCall"; } & EndCallTool) | ({ type: "function"; } & FunctionTool) | ({ type: "transferCall"; } & TransferCallTool) | ({ type: "handoff"; } & HandoffTool) | ({ type: "bash"; } & BashTool) | ({ type: "computer"; } & ComputerTool) | ({ type: "textEditor"; } & TextEditorTool) | ({ type: "query"; } & QueryTool) | ({ type: "google.calendar.event.create"; } & GoogleCalendarCreateEventTool) | ({ type: "google.sheets.row.append"; } & GoogleSheetsRowAppendTool) | ({ type: "google.calendar.availability.check"; } & GoogleCalendarCheckAvailabilityTool) | ({ type: "slack.message.send"; } & SlackSendMessageTool) | ({ type: "sms"; } & SmsTool) | ({ type: "mcp"; } & McpTool) | ({ type: "gohighlevel.calendar.availability.check"; } & GoHighLevelCalendarAvailabilityTool) | ({ type: "gohighlevel.calendar.event.create"; } & GoHighLevelCalendarEventCreateTool) | ({ type: "gohighlevel.contact.create"; } & GoHighLevelContactCreateTool) | ({ type: "gohighlevel.contact.get"; } & GoHighLevelContactGetTool) )[], any >({ path: `/tool`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Tools * @name ToolControllerFindOne * @summary Get Tool * @request GET:/tool/{id} * @secure */ toolControllerFindOne: (id: string, params: RequestParams = {}) => this.request< | ({ type: "apiRequest"; } & ApiRequestTool) | ({ type: "dtmf"; } & DtmfTool) | ({ type: "endCall"; } & EndCallTool) | ({ type: "function"; } & FunctionTool) | ({ type: "transferCall"; } & TransferCallTool) | ({ type: "handoff"; } & HandoffTool) | ({ type: "bash"; } & BashTool) | ({ type: "computer"; } & ComputerTool) | ({ type: "textEditor"; } & TextEditorTool) | ({ type: "query"; } & QueryTool) | ({ type: "google.calendar.event.create"; } & GoogleCalendarCreateEventTool) | ({ type: "google.sheets.row.append"; } & GoogleSheetsRowAppendTool) | ({ type: "google.calendar.availability.check"; } & GoogleCalendarCheckAvailabilityTool) | ({ type: "slack.message.send"; } & SlackSendMessageTool) | ({ type: "sms"; } & SmsTool) | ({ type: "mcp"; } & McpTool) | ({ type: "gohighlevel.calendar.availability.check"; } & GoHighLevelCalendarAvailabilityTool) | ({ type: "gohighlevel.calendar.event.create"; } & GoHighLevelCalendarEventCreateTool) | ({ type: "gohighlevel.contact.create"; } & GoHighLevelContactCreateTool) | ({ type: "gohighlevel.contact.get"; } & GoHighLevelContactGetTool), any >({ path: `/tool/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Tools * @name ToolControllerUpdate * @summary Update Tool * @request PATCH:/tool/{id} * @secure */ toolControllerUpdate: ( id: string, data: | ({ type: "apiRequest"; } & UpdateApiRequestToolDTO) | ({ type: "dtmf"; } & UpdateDtmfToolDTO) | ({ type: "endCall"; } & UpdateEndCallToolDTO) | ({ type: "function"; } & UpdateFunctionToolDTO) | ({ type: "transferCall"; } & UpdateTransferCallToolDTO) | ({ type: "handoff"; } & UpdateHandoffToolDTO) | ({ type: "bash"; } & UpdateBashToolDTO) | ({ type: "computer"; } & UpdateComputerToolDTO) | ({ type: "textEditor"; } & UpdateTextEditorToolDTO) | ({ type: "query"; } & UpdateQueryToolDTO) | ({ type: "google.calendar.event.create"; } & UpdateGoogleCalendarCreateEventToolDTO) | ({ type: "google.sheets.row.append"; } & UpdateGoogleSheetsRowAppendToolDTO) | ({ type: "google.calendar.availability.check"; } & UpdateGoogleCalendarCheckAvailabilityToolDTO) | ({ type: "slack.message.send"; } & UpdateSlackSendMessageToolDTO) | ({ type: "sms"; } & UpdateSmsToolDTO) | ({ type: "mcp"; } & UpdateMcpToolDTO) | ({ type: "gohighlevel.calendar.availability.check"; } & UpdateGoHighLevelCalendarAvailabilityToolDTO) | ({ type: "gohighlevel.calendar.event.create"; } & UpdateGoHighLevelCalendarEventCreateToolDTO) | ({ type: "gohighlevel.contact.create"; } & UpdateGoHighLevelContactCreateToolDTO) | ({ type: "gohighlevel.contact.get"; } & UpdateGoHighLevelContactGetToolDTO), params: RequestParams = {}, ) => this.request< | ({ type: "apiRequest"; } & ApiRequestTool) | ({ type: "dtmf"; } & DtmfTool) | ({ type: "endCall"; } & EndCallTool) | ({ type: "function"; } & FunctionTool) | ({ type: "transferCall"; } & TransferCallTool) | ({ type: "handoff"; } & HandoffTool) | ({ type: "bash"; } & BashTool) | ({ type: "computer"; } & ComputerTool) | ({ type: "textEditor"; } & TextEditorTool) | ({ type: "query"; } & QueryTool) | ({ type: "google.calendar.event.create"; } & GoogleCalendarCreateEventTool) | ({ type: "google.sheets.row.append"; } & GoogleSheetsRowAppendTool) | ({ type: "google.calendar.availability.check"; } & GoogleCalendarCheckAvailabilityTool) | ({ type: "slack.message.send"; } & SlackSendMessageTool) | ({ type: "sms"; } & SmsTool) | ({ type: "mcp"; } & McpTool) | ({ type: "gohighlevel.calendar.availability.check"; } & GoHighLevelCalendarAvailabilityTool) | ({ type: "gohighlevel.calendar.event.create"; } & GoHighLevelCalendarEventCreateTool) | ({ type: "gohighlevel.contact.create"; } & GoHighLevelContactCreateTool) | ({ type: "gohighlevel.contact.get"; } & GoHighLevelContactGetTool), any >({ path: `/tool/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Tools * @name ToolControllerRemove * @summary Delete Tool * @request DELETE:/tool/{id} * @secure */ toolControllerRemove: (id: string, params: RequestParams = {}) => this.request< | ({ type: "apiRequest"; } & ApiRequestTool) | ({ type: "dtmf"; } & DtmfTool) | ({ type: "endCall"; } & EndCallTool) | ({ type: "function"; } & FunctionTool) | ({ type: "transferCall"; } & TransferCallTool) | ({ type: "handoff"; } & HandoffTool) | ({ type: "bash"; } & BashTool) | ({ type: "computer"; } & ComputerTool) | ({ type: "textEditor"; } & TextEditorTool) | ({ type: "query"; } & QueryTool) | ({ type: "google.calendar.event.create"; } & GoogleCalendarCreateEventTool) | ({ type: "google.sheets.row.append"; } & GoogleSheetsRowAppendTool) | ({ type: "google.calendar.availability.check"; } & GoogleCalendarCheckAvailabilityTool) | ({ type: "slack.message.send"; } & SlackSendMessageTool) | ({ type: "sms"; } & SmsTool) | ({ type: "mcp"; } & McpTool) | ({ type: "gohighlevel.calendar.availability.check"; } & GoHighLevelCalendarAvailabilityTool) | ({ type: "gohighlevel.calendar.event.create"; } & GoHighLevelCalendarEventCreateTool) | ({ type: "gohighlevel.contact.create"; } & GoHighLevelContactCreateTool) | ({ type: "gohighlevel.contact.get"; } & GoHighLevelContactGetTool), any >({ path: `/tool/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; file = { /** * @description Use POST /file instead. * * @tags Files * @name FileControllerCreateDeprecated * @summary Upload File * @request POST:/file/upload * @deprecated * @secure */ fileControllerCreateDeprecated: ( data: CreateFileDTO, params: RequestParams = {}, ) => this.request({ path: `/file/upload`, method: "POST", body: data, secure: true, type: ContentType.FormData, format: "json", ...params, }), /** * No description * * @tags Files * @name FileControllerCreate * @summary Upload File * @request POST:/file * @secure */ fileControllerCreate: (data: CreateFileDTO, params: RequestParams = {}) => this.request({ path: `/file`, method: "POST", body: data, secure: true, type: ContentType.FormData, format: "json", ...params, }), /** * No description * * @tags Files * @name FileControllerFindAll * @summary List Files * @request GET:/file * @secure */ fileControllerFindAll: (params: RequestParams = {}) => this.request({ path: `/file`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Files * @name FileControllerFindOne * @summary Get File * @request GET:/file/{id} * @secure */ fileControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/file/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Files * @name FileControllerUpdate * @summary Update File * @request PATCH:/file/{id} * @secure */ fileControllerUpdate: ( id: string, data: UpdateFileDTO, params: RequestParams = {}, ) => this.request({ path: `/file/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Files * @name FileControllerRemove * @summary Delete File * @request DELETE:/file/{id} * @secure */ fileControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/file/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; knowledgeBase = { /** * No description * * @tags Knowledge Base * @name KnowledgeBaseControllerCreate * @summary Create Knowledge Base * @request POST:/knowledge-base * @secure */ knowledgeBaseControllerCreate: ( data: | ({ provider: "trieve"; } & CreateTrieveKnowledgeBaseDTO) | ({ provider: "custom-knowledge-base"; } & CreateCustomKnowledgeBaseDTO), params: RequestParams = {}, ) => this.request< | ({ provider: "trieve"; } & TrieveKnowledgeBase) | ({ provider: "custom-knowledge-base"; } & CustomKnowledgeBase), any >({ path: `/knowledge-base`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Knowledge Base * @name KnowledgeBaseControllerFindAll * @summary List Knowledge Bases * @request GET:/knowledge-base * @secure */ knowledgeBaseControllerFindAll: ( query?: { /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request< ( | ({ provider: "trieve"; } & TrieveKnowledgeBase) | ({ provider: "custom-knowledge-base"; } & CustomKnowledgeBase) )[], any >({ path: `/knowledge-base`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Knowledge Base * @name KnowledgeBaseControllerFindOne * @summary Get Knowledge Base * @request GET:/knowledge-base/{id} * @secure */ knowledgeBaseControllerFindOne: (id: string, params: RequestParams = {}) => this.request< | ({ provider: "trieve"; } & TrieveKnowledgeBase) | ({ provider: "custom-knowledge-base"; } & CustomKnowledgeBase), any >({ path: `/knowledge-base/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Knowledge Base * @name KnowledgeBaseControllerUpdate * @summary Update Knowledge Base * @request PATCH:/knowledge-base/{id} * @secure */ knowledgeBaseControllerUpdate: ( id: string, data: | ({ provider: "trieve"; } & UpdateTrieveKnowledgeBaseDTO) | ({ provider: "custom-knowledge-base"; } & UpdateCustomKnowledgeBaseDTO), params: RequestParams = {}, ) => this.request< | ({ provider: "trieve"; } & TrieveKnowledgeBase) | ({ provider: "custom-knowledge-base"; } & CustomKnowledgeBase), any >({ path: `/knowledge-base/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Knowledge Base * @name KnowledgeBaseControllerRemove * @summary Delete Knowledge Base * @request DELETE:/knowledge-base/{id} * @secure */ knowledgeBaseControllerRemove: (id: string, params: RequestParams = {}) => this.request< | ({ provider: "trieve"; } & TrieveKnowledgeBase) | ({ provider: "custom-knowledge-base"; } & CustomKnowledgeBase), any >({ path: `/knowledge-base/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; structuredOutput = { /** * No description * * @tags Structured Outputs * @name StructuredOutputControllerFindAll * @summary List Structured Outputs * @request GET:/structured-output * @secure */ structuredOutputControllerFindAll: ( query?: { /** This will return structured outputs where the id matches the specified value. */ id?: string; /** This will return structured outputs where the name matches the specified value. */ name?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/structured-output`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Structured Outputs * @name StructuredOutputControllerCreate * @summary Create Structured Output * @request POST:/structured-output * @secure */ structuredOutputControllerCreate: ( data: CreateStructuredOutputDTO, params: RequestParams = {}, ) => this.request({ path: `/structured-output`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Structured Outputs * @name StructuredOutputControllerFindOne * @summary Get Structured Output * @request GET:/structured-output/{id} * @secure */ structuredOutputControllerFindOne: ( id: string, params: RequestParams = {}, ) => this.request({ path: `/structured-output/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Structured Outputs * @name StructuredOutputControllerUpdate * @summary Update Structured Output * @request PATCH:/structured-output/{id} * @secure */ structuredOutputControllerUpdate: ( id: string, query: { schemaOverride: string; }, data: UpdateStructuredOutputDTO, params: RequestParams = {}, ) => this.request({ path: `/structured-output/${id}`, method: "PATCH", query: query, body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Structured Outputs * @name StructuredOutputControllerRemove * @summary Delete Structured Output * @request DELETE:/structured-output/{id} * @secure */ structuredOutputControllerRemove: ( id: string, params: RequestParams = {}, ) => this.request({ path: `/structured-output/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Structured Outputs * @name StructuredOutputControllerRun * @summary Run Structured Output * @request POST:/structured-output/run * @secure */ structuredOutputControllerRun: ( data: StructuredOutputRunDTO, params: RequestParams = {}, ) => this.request({ path: `/structured-output/run`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), }; testSuite = { /** * No description * * @tags Test Suites * @name TestSuiteControllerFindAllPaginated * @summary List Test Suites * @request GET:/test-suite * @secure */ testSuiteControllerFindAllPaginated: ( query?: { /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/test-suite`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Test Suites * @name TestSuiteControllerCreate * @summary Create Test Suite * @request POST:/test-suite * @secure */ testSuiteControllerCreate: ( data: CreateTestSuiteDto, params: RequestParams = {}, ) => this.request({ path: `/test-suite`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Test Suites * @name TestSuiteControllerFindOne * @summary Get Test Suite * @request GET:/test-suite/{id} * @secure */ testSuiteControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/test-suite/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Test Suites * @name TestSuiteControllerUpdate * @summary Update Test Suite * @request PATCH:/test-suite/{id} * @secure */ testSuiteControllerUpdate: ( id: string, data: UpdateTestSuiteDto, params: RequestParams = {}, ) => this.request({ path: `/test-suite/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Test Suites * @name TestSuiteControllerRemove * @summary Delete Test Suite * @request DELETE:/test-suite/{id} * @secure */ testSuiteControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/test-suite/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Test Suite Tests * @name TestSuiteTestControllerFindAllPaginated * @summary List Tests * @request GET:/test-suite/{testSuiteId}/test * @secure */ testSuiteTestControllerFindAllPaginated: ( testSuiteId: string, query?: { /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/test-suite/${testSuiteId}/test`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Test Suite Tests * @name TestSuiteTestControllerCreate * @summary Create Test * @request POST:/test-suite/{testSuiteId}/test * @secure */ testSuiteTestControllerCreate: ( testSuiteId: string, data: | ({ type: "voice"; } & CreateTestSuiteTestVoiceDto) | ({ type: "chat"; } & CreateTestSuiteTestChatDto), params: RequestParams = {}, ) => this.request< | ({ type: "voice"; } & TestSuiteTestVoice) | ({ type: "chat"; } & TestSuiteTestChat), any >({ path: `/test-suite/${testSuiteId}/test`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Test Suite Tests * @name TestSuiteTestControllerFindOne * @summary Get Test * @request GET:/test-suite/{testSuiteId}/test/{id} * @secure */ testSuiteTestControllerFindOne: ( testSuiteId: string, id: string, params: RequestParams = {}, ) => this.request< | ({ type: "voice"; } & TestSuiteTestVoice) | ({ type: "chat"; } & TestSuiteTestChat), any >({ path: `/test-suite/${testSuiteId}/test/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Test Suite Tests * @name TestSuiteTestControllerUpdate * @summary Update Test * @request PATCH:/test-suite/{testSuiteId}/test/{id} * @secure */ testSuiteTestControllerUpdate: ( testSuiteId: string, id: string, data: | ({ type: "voice"; } & UpdateTestSuiteTestVoiceDto) | ({ type: "chat"; } & UpdateTestSuiteTestChatDto), params: RequestParams = {}, ) => this.request< | ({ type: "voice"; } & TestSuiteTestVoice) | ({ type: "chat"; } & TestSuiteTestChat), any >({ path: `/test-suite/${testSuiteId}/test/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Test Suite Tests * @name TestSuiteTestControllerRemove * @summary Delete Test * @request DELETE:/test-suite/{testSuiteId}/test/{id} * @secure */ testSuiteTestControllerRemove: ( testSuiteId: string, id: string, params: RequestParams = {}, ) => this.request< | ({ type: "voice"; } & TestSuiteTestVoice) | ({ type: "chat"; } & TestSuiteTestChat), any >({ path: `/test-suite/${testSuiteId}/test/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Test Suite Runs * @name TestSuiteRunControllerFindAllPaginated * @summary List Test Suite Runs * @request GET:/test-suite/{testSuiteId}/run * @secure */ testSuiteRunControllerFindAllPaginated: ( testSuiteId: string, query?: { /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/test-suite/${testSuiteId}/run`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Test Suite Runs * @name TestSuiteRunControllerCreate * @summary Create Test Suite Run * @request POST:/test-suite/{testSuiteId}/run * @secure */ testSuiteRunControllerCreate: ( testSuiteId: string, data: CreateTestSuiteRunDto, params: RequestParams = {}, ) => this.request({ path: `/test-suite/${testSuiteId}/run`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Test Suite Runs * @name TestSuiteRunControllerFindOne * @summary Get Test Suite Run * @request GET:/test-suite/{testSuiteId}/run/{id} * @secure */ testSuiteRunControllerFindOne: ( testSuiteId: string, id: string, params: RequestParams = {}, ) => this.request({ path: `/test-suite/${testSuiteId}/run/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Test Suite Runs * @name TestSuiteRunControllerUpdate * @summary Update Test Suite Run * @request PATCH:/test-suite/{testSuiteId}/run/{id} * @secure */ testSuiteRunControllerUpdate: ( testSuiteId: string, id: string, data: UpdateTestSuiteRunDto, params: RequestParams = {}, ) => this.request({ path: `/test-suite/${testSuiteId}/run/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Test Suite Runs * @name TestSuiteRunControllerRemove * @summary Delete Test Suite Run * @request DELETE:/test-suite/{testSuiteId}/run/{id} * @secure */ testSuiteRunControllerRemove: ( testSuiteId: string, id: string, params: RequestParams = {}, ) => this.request({ path: `/test-suite/${testSuiteId}/run/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; reporting = { /** * No description * * @tags Insight * @name InsightControllerCreate * @summary Create Insight * @request POST:/reporting/insight * @secure */ insightControllerCreate: ( data: | ({ type: "bar"; } & CreateBarInsightFromCallTableDTO) | ({ type: "pie"; } & CreatePieInsightFromCallTableDTO) | ({ type: "line"; } & CreateLineInsightFromCallTableDTO) | ({ type: "text"; } & CreateTextInsightFromCallTableDTO), params: RequestParams = {}, ) => this.request< | ({ type: "bar"; } & BarInsight) | ({ type: "pie"; } & PieInsight) | ({ type: "line"; } & LineInsight) | ({ type: "text"; } & TextInsight), any >({ path: `/reporting/insight`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Insight * @name InsightControllerFindAll * @summary Get Insights * @request GET:/reporting/insight * @secure */ insightControllerFindAll: ( query?: { id?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/reporting/insight`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Insight * @name InsightControllerUpdate * @summary Update Insight * @request PATCH:/reporting/insight/{id} * @secure */ insightControllerUpdate: ( id: string, data: | ({ type: "bar"; } & UpdateBarInsightFromCallTableDTO) | ({ type: "pie"; } & UpdatePieInsightFromCallTableDTO) | ({ type: "line"; } & UpdateLineInsightFromCallTableDTO) | ({ type: "text"; } & UpdateTextInsightFromCallTableDTO), params: RequestParams = {}, ) => this.request< | ({ type: "bar"; } & BarInsight) | ({ type: "pie"; } & PieInsight) | ({ type: "line"; } & LineInsight) | ({ type: "text"; } & TextInsight), any >({ path: `/reporting/insight/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Insight * @name InsightControllerFindOne * @summary Get Insight * @request GET:/reporting/insight/{id} * @secure */ insightControllerFindOne: (id: string, params: RequestParams = {}) => this.request< | ({ type: "bar"; } & BarInsight) | ({ type: "pie"; } & PieInsight) | ({ type: "line"; } & LineInsight) | ({ type: "text"; } & TextInsight), any >({ path: `/reporting/insight/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Insight * @name InsightControllerRemove * @summary Delete Insight * @request DELETE:/reporting/insight/{id} * @secure */ insightControllerRemove: (id: string, params: RequestParams = {}) => this.request< | ({ type: "bar"; } & BarInsight) | ({ type: "pie"; } & PieInsight) | ({ type: "line"; } & LineInsight) | ({ type: "text"; } & TextInsight), any >({ path: `/reporting/insight/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Insight * @name InsightControllerRun * @summary Run Insight * @request POST:/reporting/insight/{id}/run * @secure */ insightControllerRun: ( id: string, data: InsightRunDTO, params: RequestParams = {}, ) => this.request({ path: `/reporting/insight/${id}/run`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Insight * @name InsightControllerPreview * @summary Preview Insight * @request POST:/reporting/insight/preview * @secure */ insightControllerPreview: ( data: | ({ type: "bar"; } & CreateBarInsightFromCallTableDTO) | ({ type: "pie"; } & CreatePieInsightFromCallTableDTO) | ({ type: "line"; } & CreateLineInsightFromCallTableDTO) | ({ type: "text"; } & CreateTextInsightFromCallTableDTO), params: RequestParams = {}, ) => this.request({ path: `/reporting/insight/preview`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), }; eval = { /** * No description * * @tags Eval * @name EvalControllerCreate * @summary Create Eval * @request POST:/eval * @secure */ evalControllerCreate: (data: CreateEvalDTO, params: RequestParams = {}) => this.request({ path: `/eval`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Eval * @name EvalControllerGetPaginated * @summary List Evals * @request GET:/eval * @secure */ evalControllerGetPaginated: ( query?: { id?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/eval`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Eval * @name EvalControllerUpdate * @summary Update Eval * @request PATCH:/eval/{id} * @secure */ evalControllerUpdate: ( id: string, data: UpdateEvalDTO, params: RequestParams = {}, ) => this.request({ path: `/eval/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Eval * @name EvalControllerRemove * @summary Delete Eval * @request DELETE:/eval/{id} * @secure */ evalControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/eval/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Eval * @name EvalControllerGet * @summary Get Eval * @request GET:/eval/{id} * @secure */ evalControllerGet: (id: string, params: RequestParams = {}) => this.request({ path: `/eval/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Eval * @name EvalControllerRemoveRun * @summary Delete Eval Run * @request DELETE:/eval/run/{id} * @secure */ evalControllerRemoveRun: (id: string, params: RequestParams = {}) => this.request({ path: `/eval/run/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Eval * @name EvalControllerGetRun * @summary Get Eval Run * @request GET:/eval/run/{id} * @secure */ evalControllerGetRun: (id: string, params: RequestParams = {}) => this.request({ path: `/eval/run/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Eval * @name EvalControllerRun * @summary Create Eval Run * @request POST:/eval/run * @secure */ evalControllerRun: (data: CreateEvalRunDTO, params: RequestParams = {}) => this.request({ path: `/eval/run`, method: "POST", body: data, secure: true, type: ContentType.Json, ...params, }), /** * No description * * @tags Eval * @name EvalControllerGetRunsPaginated * @summary List Eval Runs * @request GET:/eval/run * @secure */ evalControllerGetRunsPaginated: ( query?: { id?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/eval/run`, method: "GET", query: query, secure: true, format: "json", ...params, }), }; observability = { /** * No description * * @tags Observability/Scorecard * @name ScorecardControllerGet * @summary Get Scorecard * @request GET:/observability/scorecard/{id} * @secure */ scorecardControllerGet: (id: string, params: RequestParams = {}) => this.request({ path: `/observability/scorecard/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Observability/Scorecard * @name ScorecardControllerUpdate * @summary Update Scorecard * @request PATCH:/observability/scorecard/{id} * @secure */ scorecardControllerUpdate: ( id: string, data: UpdateScorecardDTO, params: RequestParams = {}, ) => this.request({ path: `/observability/scorecard/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Observability/Scorecard * @name ScorecardControllerRemove * @summary Delete Scorecard * @request DELETE:/observability/scorecard/{id} * @secure */ scorecardControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/observability/scorecard/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Observability/Scorecard * @name ScorecardControllerGetPaginated * @summary List Scorecards * @request GET:/observability/scorecard * @secure */ scorecardControllerGetPaginated: ( query?: { id?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/observability/scorecard`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Observability/Scorecard * @name ScorecardControllerCreate * @summary Create Scorecard * @request POST:/observability/scorecard * @secure */ scorecardControllerCreate: ( data: CreateScorecardDTO, params: RequestParams = {}, ) => this.request({ path: `/observability/scorecard`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), }; org = { /** * No description * * @tags Orgs * @name OrgControllerCreate * @summary Create Org * @request POST:/org * @secure */ orgControllerCreate: (data: CreateOrgDTO, params: RequestParams = {}) => this.request({ path: `/org`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Orgs * @name OrgControllerFindAll * @summary List Orgs * @request GET:/org * @secure */ orgControllerFindAll: (params: RequestParams = {}) => this.request({ path: `/org`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Orgs * @name OrgControllerFeatureFlagEnabled * @summary Check if Feature Flag is enabled * @request GET:/org/feature-flag * @secure */ orgControllerFeatureFlagEnabled: ( query: { key: string; orgId: string; }, params: RequestParams = {}, ) => this.request({ path: `/org/feature-flag`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Orgs * @name OrgControllerFindOne * @summary Get Org * @request GET:/org/{id} * @secure */ orgControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/org/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Orgs * @name OrgControllerUpdate * @summary Update Org * @request PATCH:/org/{id} * @secure */ orgControllerUpdate: ( id: string, data: UpdateOrgDTO, params: RequestParams = {}, ) => this.request({ path: `/org/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Orgs * @name OrgControllerDeleteOrg * @summary Delete Org * @request DELETE:/org/{id} * @secure */ orgControllerDeleteOrg: (id: string, params: RequestParams = {}) => this.request({ path: `/org/${id}`, method: "DELETE", secure: true, ...params, }), /** * No description * * @tags Orgs * @name OrgControllerFindAllUsers * @summary List Users * @request GET:/org/{id}/user * @secure */ orgControllerFindAllUsers: (id: string, params: RequestParams = {}) => this.request({ path: `/org/${id}/user`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Orgs * @name OrgControllerOrgLeave * @summary Leave Org * @request DELETE:/org/{id}/leave * @secure */ orgControllerOrgLeave: (id: string, params: RequestParams = {}) => this.request({ path: `/org/${id}/leave`, method: "DELETE", secure: true, ...params, }), /** * No description * * @tags Orgs * @name OrgControllerOrgRemoveUser * @summary Remove Org Member * @request DELETE:/org/{id}/member/{memberId}/leave * @secure */ orgControllerOrgRemoveUser: ( id: string, memberId: string, params: RequestParams = {}, ) => this.request({ path: `/org/${id}/member/${memberId}/leave`, method: "DELETE", secure: true, ...params, }), /** * No description * * @tags Orgs * @name OrgControllerUsersInvite * @summary Invite User * @request POST:/org/{id}/invite * @secure */ orgControllerUsersInvite: ( id: string, data: InviteUserDTO, params: RequestParams = {}, ) => this.request({ path: `/org/${id}/invite`, method: "POST", body: data, secure: true, type: ContentType.Json, ...params, }), /** * No description * * @tags Orgs * @name OrgControllerUserUpdate * @summary Update User Role * @request PATCH:/org/{id}/role * @secure */ orgControllerUserUpdate: ( id: string, data: UpdateUserRoleDTO, params: RequestParams = {}, ) => this.request({ path: `/org/${id}/role`, method: "PATCH", body: data, secure: true, type: ContentType.Json, ...params, }), /** * No description * * @tags Orgs * @name OrgControllerOrgToken * @summary Generate User Org JWT * @request GET:/org/{id}/auth * @secure */ orgControllerOrgToken: (id: string, params: RequestParams = {}) => this.request({ path: `/org/${id}/auth`, method: "GET", secure: true, format: "json", ...params, }), }; token = { /** * No description * * @tags Tokens * @name TokenControllerCreate * @summary Create Token * @request POST:/token * @secure */ tokenControllerCreate: (data: CreateTokenDTO, params: RequestParams = {}) => this.request({ path: `/token`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Tokens * @name TokenControllerFindAll * @summary List Tokens * @request GET:/token * @secure */ tokenControllerFindAll: ( query?: { /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/token`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Tokens * @name TokenControllerFindOne * @summary Get Token * @request GET:/token/{id} * @secure */ tokenControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/token/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Tokens * @name TokenControllerUpdate * @summary Update Token * @request PATCH:/token/{id} * @secure */ tokenControllerUpdate: ( id: string, data: UpdateTokenDTO, params: RequestParams = {}, ) => this.request({ path: `/token/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Tokens * @name TokenControllerRemove * @summary Delete Token * @request DELETE:/token/{id} * @secure */ tokenControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/token/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; credential = { /** * No description * * @tags Credentials * @name CredentialControllerCreate * @summary Create Credential * @request POST:/credential * @secure */ credentialControllerCreate: ( data: | ({ provider: "11labs"; } & CreateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & CreateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & CreateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & CreateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & CreateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & CreateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & CreateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & CreateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & CreateCerebrasCredentialDTO) | ({ provider: "cloudflare"; } & CreateCloudflareCredentialDTO) | ({ provider: "custom-llm"; } & CreateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & CreateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & CreateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & CreateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & CreateGcpCredentialDTO) | ({ provider: "gladia"; } & CreateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & CreateGoHighLevelCredentialDTO) | ({ provider: "google"; } & CreateGoogleCredentialDTO) | ({ provider: "groq"; } & CreateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & CreateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & CreateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & CreateLmntCredentialDTO) | ({ provider: "make"; } & CreateMakeCredentialDTO) | ({ provider: "openai"; } & CreateOpenAICredentialDTO) | ({ provider: "openrouter"; } & CreateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & CreatePerplexityAICredentialDTO) | ({ provider: "playht"; } & CreatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & CreateRimeAICredentialDTO) | ({ provider: "runpod"; } & CreateRunpodCredentialDTO) | ({ provider: "s3"; } & CreateS3CredentialDTO) | ({ provider: "supabase"; } & CreateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & CreateSmallestAICredentialDTO) | ({ provider: "tavus"; } & CreateTavusCredentialDTO) | ({ provider: "together-ai"; } & CreateTogetherAICredentialDTO) | ({ provider: "twilio"; } & CreateTwilioCredentialDTO) | ({ provider: "vonage"; } & CreateVonageCredentialDTO) | ({ provider: "webhook"; } & CreateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & CreateCustomCredentialDTO) | ({ provider: "xai"; } & CreateXAiCredentialDTO) | ({ provider: "neuphonic"; } & CreateNeuphonicCredentialDTO) | ({ provider: "hume"; } & CreateHumeCredentialDTO) | ({ provider: "mistral"; } & CreateMistralCredentialDTO) | ({ provider: "speechmatics"; } & CreateSpeechmaticsCredentialDTO) | ({ provider: "trieve"; } & CreateTrieveCredentialDTO) | ({ provider: "google.calendar.oauth2-client"; } & CreateGoogleCalendarOAuth2ClientCredentialDTO) | ({ provider: "google.calendar.oauth2-authorization"; } & CreateGoogleCalendarOAuth2AuthorizationCredentialDTO) | ({ provider: "google.sheets.oauth2-authorization"; } & CreateGoogleSheetsOAuth2AuthorizationCredentialDTO) | ({ provider: "slack.oauth2-authorization"; } & CreateSlackOAuth2AuthorizationCredentialDTO) | ({ provider: "ghl.oauth2-authorization"; } & CreateGoHighLevelMCPCredentialDTO) | ({ provider: "inworld"; } & CreateInworldCredentialDTO) | ({ provider: "minimax"; } & CreateMinimaxCredentialDTO), params: RequestParams = {}, ) => this.request< | ({ provider: "11labs"; } & ElevenLabsCredential) | ({ provider: "anthropic"; } & AnthropicCredential) | ({ provider: "anyscale"; } & AnyscaleCredential) | ({ provider: "assembly-ai"; } & AssemblyAICredential) | ({ provider: "azure"; } & AzureCredential) | ({ provider: "azure-openai"; } & AzureOpenAICredential) | ({ provider: "byo-sip-trunk"; } & ByoSipTrunkCredential) | ({ provider: "cartesia"; } & CartesiaCredential) | ({ provider: "cerebras"; } & CerebrasCredential) | ({ provider: "custom-llm"; } & CustomLLMCredential) | ({ provider: "deepgram"; } & DeepgramCredential) | ({ provider: "deepinfra"; } & DeepInfraCredential) | ({ provider: "deep-seek"; } & DeepSeekCredential) | ({ provider: "gcp"; } & GcpCredential) | ({ provider: "gladia"; } & GladiaCredential) | ({ provider: "gohighlevel"; } & GoHighLevelCredential) | ({ provider: "google"; } & GoogleCredential) | ({ provider: "groq"; } & GroqCredential) | ({ provider: "inflection-ai"; } & InflectionAICredential) | ({ provider: "langfuse"; } & LangfuseCredential) | ({ provider: "lmnt"; } & LmntCredential) | ({ provider: "make"; } & MakeCredential) | ({ provider: "openai"; } & OpenAICredential) | ({ provider: "openrouter"; } & OpenRouterCredential) | ({ provider: "perplexity-ai"; } & PerplexityAICredential) | ({ provider: "playht"; } & PlayHTCredential) | ({ provider: "rime-ai"; } & RimeAICredential) | ({ provider: "runpod"; } & RunpodCredential) | ({ provider: "s3"; } & S3Credential) | ({ provider: "supabase"; } & SupabaseCredential) | ({ provider: "smallest-ai"; } & SmallestAICredential) | ({ provider: "neuphonic"; } & NeuphonicCredential) | ({ provider: "hume"; } & HumeCredential) | ({ provider: "tavus"; } & TavusCredential) | ({ provider: "together-ai"; } & TogetherAICredential) | ({ provider: "twilio"; } & TwilioCredential) | ({ provider: "vonage"; } & VonageCredential) | ({ provider: "webhook"; } & WebhookCredential) | ({ provider: "custom-credential"; } & CustomCredential) | ({ provider: "xai"; } & XAiCredential) | ({ provider: "mistral"; } & MistralCredential) | ({ provider: "speechmatics"; } & SpeechmaticsCredential) | ({ provider: "trieve"; } & TrieveCredential) | ({ provider: "telnyx"; } & any) | ({ provider: "cloudflare"; } & CloudflareCredential) | ({ provider: "google.calendar.oauth2-client"; } & GoogleCalendarOAuth2ClientCredential) | ({ provider: "google.calendar.oauth2-authorization"; } & GoogleCalendarOAuth2AuthorizationCredential) | ({ provider: "google.sheets.oauth2-authorization"; } & GoogleSheetsOAuth2AuthorizationCredential) | ({ provider: "slack.oauth2-authorization"; } & SlackOAuth2AuthorizationCredential) | ({ provider: "aws"; } & any) | ({ provider: "ghl.oauth2-authorization"; } & GoHighLevelMCPCredential) | ({ provider: "inworld"; } & InworldCredential) | ({ provider: "minimax"; } & any), any >({ path: `/credential`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Credentials * @name CredentialControllerFindAll * @summary List Credentials * @request GET:/credential * @secure */ credentialControllerFindAll: ( query?: { /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request< ( | ({ provider: "11labs"; } & ElevenLabsCredential) | ({ provider: "anthropic"; } & AnthropicCredential) | ({ provider: "anyscale"; } & AnyscaleCredential) | ({ provider: "assembly-ai"; } & AssemblyAICredential) | ({ provider: "azure"; } & AzureCredential) | ({ provider: "azure-openai"; } & AzureOpenAICredential) | ({ provider: "byo-sip-trunk"; } & ByoSipTrunkCredential) | ({ provider: "cartesia"; } & CartesiaCredential) | ({ provider: "cerebras"; } & CerebrasCredential) | ({ provider: "custom-llm"; } & CustomLLMCredential) | ({ provider: "deepgram"; } & DeepgramCredential) | ({ provider: "deepinfra"; } & DeepInfraCredential) | ({ provider: "deep-seek"; } & DeepSeekCredential) | ({ provider: "gcp"; } & GcpCredential) | ({ provider: "gladia"; } & GladiaCredential) | ({ provider: "gohighlevel"; } & GoHighLevelCredential) | ({ provider: "google"; } & GoogleCredential) | ({ provider: "groq"; } & GroqCredential) | ({ provider: "inflection-ai"; } & InflectionAICredential) | ({ provider: "langfuse"; } & LangfuseCredential) | ({ provider: "lmnt"; } & LmntCredential) | ({ provider: "make"; } & MakeCredential) | ({ provider: "openai"; } & OpenAICredential) | ({ provider: "openrouter"; } & OpenRouterCredential) | ({ provider: "perplexity-ai"; } & PerplexityAICredential) | ({ provider: "playht"; } & PlayHTCredential) | ({ provider: "rime-ai"; } & RimeAICredential) | ({ provider: "runpod"; } & RunpodCredential) | ({ provider: "s3"; } & S3Credential) | ({ provider: "supabase"; } & SupabaseCredential) | ({ provider: "smallest-ai"; } & SmallestAICredential) | ({ provider: "neuphonic"; } & NeuphonicCredential) | ({ provider: "hume"; } & HumeCredential) | ({ provider: "tavus"; } & TavusCredential) | ({ provider: "together-ai"; } & TogetherAICredential) | ({ provider: "twilio"; } & TwilioCredential) | ({ provider: "vonage"; } & VonageCredential) | ({ provider: "webhook"; } & WebhookCredential) | ({ provider: "custom-credential"; } & CustomCredential) | ({ provider: "xai"; } & XAiCredential) | ({ provider: "mistral"; } & MistralCredential) | ({ provider: "speechmatics"; } & SpeechmaticsCredential) | ({ provider: "trieve"; } & TrieveCredential) | ({ provider: "telnyx"; } & any) | ({ provider: "cloudflare"; } & CloudflareCredential) | ({ provider: "google.calendar.oauth2-client"; } & GoogleCalendarOAuth2ClientCredential) | ({ provider: "google.calendar.oauth2-authorization"; } & GoogleCalendarOAuth2AuthorizationCredential) | ({ provider: "google.sheets.oauth2-authorization"; } & GoogleSheetsOAuth2AuthorizationCredential) | ({ provider: "slack.oauth2-authorization"; } & SlackOAuth2AuthorizationCredential) | ({ provider: "aws"; } & any) | ({ provider: "ghl.oauth2-authorization"; } & GoHighLevelMCPCredential) | ({ provider: "inworld"; } & InworldCredential) | ({ provider: "minimax"; } & any) )[], any >({ path: `/credential`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Credentials * @name CredentialControllerFindOne * @summary Get Credential * @request GET:/credential/{id} * @secure */ credentialControllerFindOne: (id: string, params: RequestParams = {}) => this.request< | ({ provider: "11labs"; } & ElevenLabsCredential) | ({ provider: "anthropic"; } & AnthropicCredential) | ({ provider: "anyscale"; } & AnyscaleCredential) | ({ provider: "assembly-ai"; } & AssemblyAICredential) | ({ provider: "azure"; } & AzureCredential) | ({ provider: "azure-openai"; } & AzureOpenAICredential) | ({ provider: "byo-sip-trunk"; } & ByoSipTrunkCredential) | ({ provider: "cartesia"; } & CartesiaCredential) | ({ provider: "cerebras"; } & CerebrasCredential) | ({ provider: "custom-llm"; } & CustomLLMCredential) | ({ provider: "deepgram"; } & DeepgramCredential) | ({ provider: "deepinfra"; } & DeepInfraCredential) | ({ provider: "deep-seek"; } & DeepSeekCredential) | ({ provider: "gcp"; } & GcpCredential) | ({ provider: "gladia"; } & GladiaCredential) | ({ provider: "gohighlevel"; } & GoHighLevelCredential) | ({ provider: "google"; } & GoogleCredential) | ({ provider: "groq"; } & GroqCredential) | ({ provider: "inflection-ai"; } & InflectionAICredential) | ({ provider: "langfuse"; } & LangfuseCredential) | ({ provider: "lmnt"; } & LmntCredential) | ({ provider: "make"; } & MakeCredential) | ({ provider: "openai"; } & OpenAICredential) | ({ provider: "openrouter"; } & OpenRouterCredential) | ({ provider: "perplexity-ai"; } & PerplexityAICredential) | ({ provider: "playht"; } & PlayHTCredential) | ({ provider: "rime-ai"; } & RimeAICredential) | ({ provider: "runpod"; } & RunpodCredential) | ({ provider: "s3"; } & S3Credential) | ({ provider: "supabase"; } & SupabaseCredential) | ({ provider: "smallest-ai"; } & SmallestAICredential) | ({ provider: "neuphonic"; } & NeuphonicCredential) | ({ provider: "hume"; } & HumeCredential) | ({ provider: "tavus"; } & TavusCredential) | ({ provider: "together-ai"; } & TogetherAICredential) | ({ provider: "twilio"; } & TwilioCredential) | ({ provider: "vonage"; } & VonageCredential) | ({ provider: "webhook"; } & WebhookCredential) | ({ provider: "custom-credential"; } & CustomCredential) | ({ provider: "xai"; } & XAiCredential) | ({ provider: "mistral"; } & MistralCredential) | ({ provider: "speechmatics"; } & SpeechmaticsCredential) | ({ provider: "trieve"; } & TrieveCredential) | ({ provider: "telnyx"; } & any) | ({ provider: "cloudflare"; } & CloudflareCredential) | ({ provider: "google.calendar.oauth2-client"; } & GoogleCalendarOAuth2ClientCredential) | ({ provider: "google.calendar.oauth2-authorization"; } & GoogleCalendarOAuth2AuthorizationCredential) | ({ provider: "google.sheets.oauth2-authorization"; } & GoogleSheetsOAuth2AuthorizationCredential) | ({ provider: "slack.oauth2-authorization"; } & SlackOAuth2AuthorizationCredential) | ({ provider: "aws"; } & any) | ({ provider: "ghl.oauth2-authorization"; } & GoHighLevelMCPCredential) | ({ provider: "inworld"; } & InworldCredential) | ({ provider: "minimax"; } & any), any >({ path: `/credential/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Credentials * @name CredentialControllerUpdate * @summary Update Credential * @request PATCH:/credential/{id} * @secure */ credentialControllerUpdate: ( id: string, data: | ({ provider: "11labs"; } & UpdateElevenLabsCredentialDTO) | ({ provider: "anthropic"; } & UpdateAnthropicCredentialDTO) | ({ provider: "anyscale"; } & UpdateAnyscaleCredentialDTO) | ({ provider: "assembly-ai"; } & UpdateAssemblyAICredentialDTO) | ({ provider: "azure-openai"; } & UpdateAzureOpenAICredentialDTO) | ({ provider: "azure"; } & UpdateAzureCredentialDTO) | ({ provider: "byo-sip-trunk"; } & UpdateByoSipTrunkCredentialDTO) | ({ provider: "cartesia"; } & UpdateCartesiaCredentialDTO) | ({ provider: "cerebras"; } & UpdateCerebrasCredentialDTO) | ({ provider: "custom-llm"; } & UpdateCustomLLMCredentialDTO) | ({ provider: "deepgram"; } & UpdateDeepgramCredentialDTO) | ({ provider: "deepinfra"; } & UpdateDeepInfraCredentialDTO) | ({ provider: "deep-seek"; } & UpdateDeepSeekCredentialDTO) | ({ provider: "gcp"; } & UpdateGcpCredentialDTO) | ({ provider: "gladia"; } & UpdateGladiaCredentialDTO) | ({ provider: "gohighlevel"; } & UpdateGoHighLevelCredentialDTO) | ({ provider: "google"; } & UpdateGoogleCredentialDTO) | ({ provider: "groq"; } & UpdateGroqCredentialDTO) | ({ provider: "inflection-ai"; } & UpdateInflectionAICredentialDTO) | ({ provider: "langfuse"; } & UpdateLangfuseCredentialDTO) | ({ provider: "lmnt"; } & UpdateLmntCredentialDTO) | ({ provider: "make"; } & UpdateMakeCredentialDTO) | ({ provider: "openai"; } & UpdateOpenAICredentialDTO) | ({ provider: "openrouter"; } & UpdateOpenRouterCredentialDTO) | ({ provider: "perplexity-ai"; } & UpdatePerplexityAICredentialDTO) | ({ provider: "playht"; } & UpdatePlayHTCredentialDTO) | ({ provider: "rime-ai"; } & UpdateRimeAICredentialDTO) | ({ provider: "runpod"; } & UpdateRunpodCredentialDTO) | ({ provider: "s3"; } & UpdateS3CredentialDTO) | ({ provider: "supabase"; } & UpdateSupabaseCredentialDTO) | ({ provider: "smallest-ai"; } & UpdateSmallestAICredentialDTO) | ({ provider: "neuphonic"; } & UpdateNeuphonicCredentialDTO) | ({ provider: "hume"; } & UpdateHumeCredentialDTO) | ({ provider: "tavus"; } & UpdateTavusCredentialDTO) | ({ provider: "together-ai"; } & UpdateTogetherAICredentialDTO) | ({ provider: "twilio"; } & UpdateTwilioCredentialDTO) | ({ provider: "vonage"; } & UpdateVonageCredentialDTO) | ({ provider: "webhook"; } & UpdateWebhookCredentialDTO) | ({ provider: "custom-credential"; } & UpdateCustomCredentialDTO) | ({ provider: "xai"; } & UpdateXAiCredentialDTO) | ({ provider: "inworld"; } & UpdateInworldCredentialDTO), params: RequestParams = {}, ) => this.request< | ({ provider: "11labs"; } & ElevenLabsCredential) | ({ provider: "anthropic"; } & AnthropicCredential) | ({ provider: "anyscale"; } & AnyscaleCredential) | ({ provider: "assembly-ai"; } & AssemblyAICredential) | ({ provider: "azure"; } & AzureCredential) | ({ provider: "azure-openai"; } & AzureOpenAICredential) | ({ provider: "byo-sip-trunk"; } & ByoSipTrunkCredential) | ({ provider: "cartesia"; } & CartesiaCredential) | ({ provider: "cerebras"; } & CerebrasCredential) | ({ provider: "custom-llm"; } & CustomLLMCredential) | ({ provider: "deepgram"; } & DeepgramCredential) | ({ provider: "deepinfra"; } & DeepInfraCredential) | ({ provider: "deep-seek"; } & DeepSeekCredential) | ({ provider: "gcp"; } & GcpCredential) | ({ provider: "gladia"; } & GladiaCredential) | ({ provider: "gohighlevel"; } & GoHighLevelCredential) | ({ provider: "google"; } & GoogleCredential) | ({ provider: "groq"; } & GroqCredential) | ({ provider: "inflection-ai"; } & InflectionAICredential) | ({ provider: "langfuse"; } & LangfuseCredential) | ({ provider: "lmnt"; } & LmntCredential) | ({ provider: "make"; } & MakeCredential) | ({ provider: "openai"; } & OpenAICredential) | ({ provider: "openrouter"; } & OpenRouterCredential) | ({ provider: "perplexity-ai"; } & PerplexityAICredential) | ({ provider: "playht"; } & PlayHTCredential) | ({ provider: "rime-ai"; } & RimeAICredential) | ({ provider: "runpod"; } & RunpodCredential) | ({ provider: "s3"; } & S3Credential) | ({ provider: "supabase"; } & SupabaseCredential) | ({ provider: "smallest-ai"; } & SmallestAICredential) | ({ provider: "neuphonic"; } & NeuphonicCredential) | ({ provider: "hume"; } & HumeCredential) | ({ provider: "tavus"; } & TavusCredential) | ({ provider: "together-ai"; } & TogetherAICredential) | ({ provider: "twilio"; } & TwilioCredential) | ({ provider: "vonage"; } & VonageCredential) | ({ provider: "webhook"; } & WebhookCredential) | ({ provider: "custom-credential"; } & CustomCredential) | ({ provider: "xai"; } & XAiCredential) | ({ provider: "mistral"; } & MistralCredential) | ({ provider: "speechmatics"; } & SpeechmaticsCredential) | ({ provider: "trieve"; } & TrieveCredential) | ({ provider: "telnyx"; } & any) | ({ provider: "cloudflare"; } & CloudflareCredential) | ({ provider: "google.calendar.oauth2-client"; } & GoogleCalendarOAuth2ClientCredential) | ({ provider: "google.calendar.oauth2-authorization"; } & GoogleCalendarOAuth2AuthorizationCredential) | ({ provider: "google.sheets.oauth2-authorization"; } & GoogleSheetsOAuth2AuthorizationCredential) | ({ provider: "slack.oauth2-authorization"; } & SlackOAuth2AuthorizationCredential) | ({ provider: "aws"; } & any) | ({ provider: "ghl.oauth2-authorization"; } & GoHighLevelMCPCredential) | ({ provider: "inworld"; } & InworldCredential) | ({ provider: "minimax"; } & any), any >({ path: `/credential/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Credentials * @name CredentialControllerRemove * @summary Delete Credential * @request DELETE:/credential/{id} * @secure */ credentialControllerRemove: (id: string, params: RequestParams = {}) => this.request< | ({ provider: "11labs"; } & ElevenLabsCredential) | ({ provider: "anthropic"; } & AnthropicCredential) | ({ provider: "anyscale"; } & AnyscaleCredential) | ({ provider: "assembly-ai"; } & AssemblyAICredential) | ({ provider: "azure"; } & AzureCredential) | ({ provider: "azure-openai"; } & AzureOpenAICredential) | ({ provider: "byo-sip-trunk"; } & ByoSipTrunkCredential) | ({ provider: "cartesia"; } & CartesiaCredential) | ({ provider: "cerebras"; } & CerebrasCredential) | ({ provider: "custom-llm"; } & CustomLLMCredential) | ({ provider: "deepgram"; } & DeepgramCredential) | ({ provider: "deepinfra"; } & DeepInfraCredential) | ({ provider: "deep-seek"; } & DeepSeekCredential) | ({ provider: "gcp"; } & GcpCredential) | ({ provider: "gladia"; } & GladiaCredential) | ({ provider: "gohighlevel"; } & GoHighLevelCredential) | ({ provider: "google"; } & GoogleCredential) | ({ provider: "groq"; } & GroqCredential) | ({ provider: "inflection-ai"; } & InflectionAICredential) | ({ provider: "langfuse"; } & LangfuseCredential) | ({ provider: "lmnt"; } & LmntCredential) | ({ provider: "make"; } & MakeCredential) | ({ provider: "openai"; } & OpenAICredential) | ({ provider: "openrouter"; } & OpenRouterCredential) | ({ provider: "perplexity-ai"; } & PerplexityAICredential) | ({ provider: "playht"; } & PlayHTCredential) | ({ provider: "rime-ai"; } & RimeAICredential) | ({ provider: "runpod"; } & RunpodCredential) | ({ provider: "s3"; } & S3Credential) | ({ provider: "supabase"; } & SupabaseCredential) | ({ provider: "smallest-ai"; } & SmallestAICredential) | ({ provider: "neuphonic"; } & NeuphonicCredential) | ({ provider: "hume"; } & HumeCredential) | ({ provider: "tavus"; } & TavusCredential) | ({ provider: "together-ai"; } & TogetherAICredential) | ({ provider: "twilio"; } & TwilioCredential) | ({ provider: "vonage"; } & VonageCredential) | ({ provider: "webhook"; } & WebhookCredential) | ({ provider: "custom-credential"; } & CustomCredential) | ({ provider: "xai"; } & XAiCredential) | ({ provider: "mistral"; } & MistralCredential) | ({ provider: "speechmatics"; } & SpeechmaticsCredential) | ({ provider: "trieve"; } & TrieveCredential) | ({ provider: "telnyx"; } & any) | ({ provider: "cloudflare"; } & CloudflareCredential) | ({ provider: "google.calendar.oauth2-client"; } & GoogleCalendarOAuth2ClientCredential) | ({ provider: "google.calendar.oauth2-authorization"; } & GoogleCalendarOAuth2AuthorizationCredential) | ({ provider: "google.sheets.oauth2-authorization"; } & GoogleSheetsOAuth2AuthorizationCredential) | ({ provider: "slack.oauth2-authorization"; } & SlackOAuth2AuthorizationCredential) | ({ provider: "aws"; } & any) | ({ provider: "ghl.oauth2-authorization"; } & GoHighLevelMCPCredential) | ({ provider: "inworld"; } & InworldCredential) | ({ provider: "minimax"; } & any), any >({ path: `/credential/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; template = { /** * No description * * @tags Templates * @name TemplateControllerCreate * @summary Create Template * @request POST:/template * @secure */ templateControllerCreate: ( data: CreateToolTemplateDTO[], params: RequestParams = {}, ) => this.request({ path: `/template`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Templates * @name TemplateControllerFindAll * @summary List Templates * @request GET:/template * @secure */ templateControllerFindAll: ( query?: { collectionId?: string; visibility?: "public" | "private"; provider?: "make" | "gohighlevel" | "function"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/template`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Templates * @name TemplateControllerFindAllPinned * @summary List Templates * @request GET:/template/pinned * @secure */ templateControllerFindAllPinned: (params: RequestParams = {}) => this.request({ path: `/template/pinned`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Templates * @name TemplateControllerFindOne * @summary Get Template * @request GET:/template/{id} * @secure */ templateControllerFindOne: (id: string, params: RequestParams = {}) => this.request({ path: `/template/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Templates * @name TemplateControllerUpdate * @summary Update Template * @request PATCH:/template/{id} * @secure */ templateControllerUpdate: ( id: string, data: UpdateToolTemplateDTO, params: RequestParams = {}, ) => this.request({ path: `/template/${id}`, method: "PATCH", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Templates * @name TemplateControllerRemove * @summary Delete Template * @request DELETE:/template/{id} * @secure */ templateControllerRemove: (id: string, params: RequestParams = {}) => this.request({ path: `/template/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), }; voiceLibrary = { /** * No description * * @tags Voice Library * @name VoiceLibraryControllerVoiceGetByProvider * @summary Get voices in Voice Library by Provider * @request GET:/voice-library/{provider} * @secure */ voiceLibraryControllerVoiceGetByProvider: ( provider: | "vapi" | "11labs" | "azure" | "cartesia" | "custom-voice" | "deepgram" | "hume" | "lmnt" | "neuphonic" | "openai" | "playht" | "rime-ai" | "smallest-ai" | "tavus" | "sesame" | "inworld" | "minimax" | "orpheus", query?: { page?: number; keyword?: string; language?: string; accent?: string; gender?: string; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/voice-library/${provider}`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Voice Library * @name VoiceLibraryControllerVoiceGetAccentsByProvider * @summary Get accents in Voice Library by Provider * @request GET:/voice-library/{provider}/accents * @secure */ voiceLibraryControllerVoiceGetAccentsByProvider: ( provider: | "vapi" | "11labs" | "azure" | "cartesia" | "custom-voice" | "deepgram" | "hume" | "lmnt" | "neuphonic" | "openai" | "playht" | "rime-ai" | "smallest-ai" | "tavus" | "sesame" | "inworld" | "minimax" | "orpheus", params: RequestParams = {}, ) => this.request({ path: `/voice-library/${provider}/accents`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Voice Library * @name VoiceLibraryControllerVoiceLibrarySyncByProvider * @summary Sync Private voices in Voice Library by Provider * @request POST:/voice-library/sync/{provider} * @secure */ voiceLibraryControllerVoiceLibrarySyncByProvider: ( provider: | "vapi" | "11labs" | "azure" | "cartesia" | "custom-voice" | "deepgram" | "hume" | "lmnt" | "neuphonic" | "openai" | "playht" | "rime-ai" | "smallest-ai" | "tavus" | "sesame" | "inworld" | "minimax" | "orpheus", params: RequestParams = {}, ) => this.request({ path: `/voice-library/sync/${provider}`, method: "POST", secure: true, format: "json", ...params, }), /** * No description * * @tags Voice Library * @name VoiceLibraryControllerVoiceLibrarySyncDefaultVoices * @summary Sync Default voices in Voice Library by Providers * @request POST:/voice-library/sync * @secure */ voiceLibraryControllerVoiceLibrarySyncDefaultVoices: ( data: SyncVoiceLibraryDTO, params: RequestParams = {}, ) => this.request({ path: `/voice-library/sync`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Voice Library * @name VoiceLibraryControllerVoiceLibraryCreateSesameVoice * @summary Create a new voice in the Voice Library using Sesame * @request POST:/voice-library/create-sesame-voice * @secure */ voiceLibraryControllerVoiceLibraryCreateSesameVoice: ( data: CreateSesameVoiceDTO, params: RequestParams = {}, ) => this.request({ path: `/voice-library/create-sesame-voice`, method: "POST", body: data, secure: true, type: ContentType.Json, ...params, }), }; provider = { /** * No description * * @tags Provider Resources * @name ProviderResourceControllerCreateProviderResource * @summary Create Provider Resource * @request POST:/provider/{provider}/{resourceName} * @secure */ providerResourceControllerCreateProviderResource: ( provider: "11labs", resourceName: "pronunciation-dictionary", params: RequestParams = {}, ) => this.request({ path: `/provider/${provider}/${resourceName}`, method: "POST", secure: true, format: "json", ...params, }), /** * No description * * @tags Provider Resources * @name ProviderResourceControllerGetProviderResourcesPaginated * @summary List Provider Resources * @request GET:/provider/{provider}/{resourceName} * @secure */ providerResourceControllerGetProviderResourcesPaginated: ( provider: "11labs", resourceName: "pronunciation-dictionary", query?: { id?: string; resourceId?: string; /** * This is the page number to return. Defaults to 1. * @min 1 */ page?: number; /** This is the sort order for pagination. Defaults to 'DESC'. */ sortOrder?: "ASC" | "DESC"; /** * This is the maximum number of items to return. Defaults to 100. * @min 0 * @max 1000 */ limit?: number; /** * This will return items where the createdAt is greater than the specified value. * @format date-time */ createdAtGt?: string; /** * This will return items where the createdAt is less than the specified value. * @format date-time */ createdAtLt?: string; /** * This will return items where the createdAt is greater than or equal to the specified value. * @format date-time */ createdAtGe?: string; /** * This will return items where the createdAt is less than or equal to the specified value. * @format date-time */ createdAtLe?: string; /** * This will return items where the updatedAt is greater than the specified value. * @format date-time */ updatedAtGt?: string; /** * This will return items where the updatedAt is less than the specified value. * @format date-time */ updatedAtLt?: string; /** * This will return items where the updatedAt is greater than or equal to the specified value. * @format date-time */ updatedAtGe?: string; /** * This will return items where the updatedAt is less than or equal to the specified value. * @format date-time */ updatedAtLe?: string; }, params: RequestParams = {}, ) => this.request({ path: `/provider/${provider}/${resourceName}`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Provider Resources * @name ProviderResourceControllerGetProviderResource * @summary Get Provider Resource * @request GET:/provider/{provider}/{resourceName}/{id} * @secure */ providerResourceControllerGetProviderResource: ( provider: "11labs", resourceName: "pronunciation-dictionary", id: string, params: RequestParams = {}, ) => this.request({ path: `/provider/${provider}/${resourceName}/${id}`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Provider Resources * @name ProviderResourceControllerDeleteProviderResource * @summary Delete Provider Resource * @request DELETE:/provider/{provider}/{resourceName}/{id} * @secure */ providerResourceControllerDeleteProviderResource: ( provider: "11labs", resourceName: "pronunciation-dictionary", id: string, params: RequestParams = {}, ) => this.request({ path: `/provider/${provider}/${resourceName}/${id}`, method: "DELETE", secure: true, format: "json", ...params, }), /** * No description * * @tags Provider Resources * @name ProviderResourceControllerUpdateProviderResource * @summary Update Provider Resource * @request PATCH:/provider/{provider}/{resourceName}/{id} * @secure */ providerResourceControllerUpdateProviderResource: ( provider: "11labs", resourceName: "pronunciation-dictionary", id: string, params: RequestParams = {}, ) => this.request({ path: `/provider/${provider}/${resourceName}/${id}`, method: "PATCH", secure: true, format: "json", ...params, }), /** * No description * * @tags Providers * @name ProviderControllerGetWorkflows * @request GET:/{provider}/workflows * @secure */ providerControllerGetWorkflows: ( provider: "make" | "ghl", query?: { locationId?: string; }, params: RequestParams = {}, ) => this.request({ path: `/${provider}/workflows`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Providers * @name ProviderControllerGetWorkflowTriggerHook * @request GET:/{provider}/workflows/{workflowId}/hooks * @secure */ providerControllerGetWorkflowTriggerHook: ( provider: "make" | "ghl", workflowId: string, params: RequestParams = {}, ) => this.request({ path: `/${provider}/workflows/${workflowId}/hooks`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Providers * @name ProviderControllerGetLocations * @request GET:/{provider}/locations * @secure */ providerControllerGetLocations: ( provider: "make" | "ghl", params: RequestParams = {}, ) => this.request({ path: `/${provider}/locations`, method: "GET", secure: true, format: "json", ...params, }), /** * No description * * @tags Providers * @name VoiceProviderControllerSearchVoices * @summary Search Voice from Provider Voice Library. * @request GET:/{provider}/voices/search * @deprecated * @secure */ voiceProviderControllerSearchVoices: ( provider: string, query: { /** The name of the voice from the provider you want to search. */ name: string; }, params: RequestParams = {}, ) => this.request({ path: `/${provider}/voices/search`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Providers * @name VoiceProviderControllerSearchVoice * @summary Search Voice from Provider Voice Library. * @request GET:/{provider}/voice/search * @secure */ voiceProviderControllerSearchVoice: ( provider: string, query: { /** The name of the voice from the provider you want to search. */ name: string; }, params: RequestParams = {}, ) => this.request({ path: `/${provider}/voice/search`, method: "GET", query: query, secure: true, format: "json", ...params, }), /** * No description * * @tags Providers * @name VoiceProviderControllerAddVoices * @summary Add Shared Voice to your Provider Account. * @request POST:/{provider}/voices/add * @deprecated * @secure */ voiceProviderControllerAddVoices: ( provider: string, data: AddVoiceToProviderDTO, params: RequestParams = {}, ) => this.request({ path: `/${provider}/voices/add`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), /** * No description * * @tags Providers * @name VoiceProviderControllerAddVoice * @summary Add Shared Voice to your Provider Account. * @request POST:/{provider}/voice/add * @secure */ voiceProviderControllerAddVoice: ( provider: string, data: AddVoiceToProviderDTO, params: RequestParams = {}, ) => this.request({ path: `/${provider}/voice/add`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), }; v11Labs = { /** * No description * * @tags Providers * @name VoiceProviderControllerCloneVoices * @summary Clone a voice to the provider account and add to Vapi Voice Library. * @request POST:/11labs/voice/clone * @secure */ voiceProviderControllerCloneVoices: ( data: CloneVoiceDTO, params: RequestParams = {}, ) => this.request({ path: `/11labs/voice/clone`, method: "POST", body: data, secure: true, type: ContentType.FormData, ...params, }), }; analytics = { /** * No description * * @tags Analytics * @name AnalyticsControllerQuery * @summary Create Analytics Queries * @request POST:/analytics * @secure */ analyticsControllerQuery: ( data: AnalyticsQueryDTO, params: RequestParams = {}, ) => this.request({ path: `/analytics`, method: "POST", body: data, secure: true, type: ContentType.Json, format: "json", ...params, }), }; }