import type * as ElevenLabs from "../index"; export interface TtsConversationalConfigInput { /** The model to use for TTS */ modelId?: ElevenLabs.TtsConversationalModel; /** The voice ID to use for TTS */ voiceId?: string; /** Additional supported voices for the agent */ supportedVoices?: ElevenLabs.SupportedVoice[]; /** When enabled, applies expressive audio tags prompt. Automatically disabled for non-v3 models. */ expressiveMode?: boolean; /** Suggested audio tags to boost expressive speech (for eleven_v3 and eleven_v3_conversational models). The agent can still use other tags not listed here. */ suggestedAudioTags?: ElevenLabs.SuggestedAudioTag[]; /** The audio format to use for TTS */ agentOutputAudioFormat?: ElevenLabs.TtsOutputFormat; /** The optimization for streaming latency */ optimizeStreamingLatency?: ElevenLabs.TtsOptimizeStreamingLatency; /** The stability of generated speech */ stability?: number; /** The speed of generated speech */ speed?: number; /** The similarity boost for generated speech */ similarityBoost?: number; /** Method for converting numbers to words before converting text to speech. If set to SYSTEM_PROMPT, the system prompt will be updated to include normalization instructions. If set to ELEVENLABS, the text will be normalized after generation, incurring slight additional latency. */ textNormalisationType?: ElevenLabs.TextNormalisationType; /** The pronunciation dictionary locators */ pronunciationDictionaryLocators?: ElevenLabs.PydanticPronunciationDictionaryVersionLocator[]; }