/** * Voice.ai Web SDK * * A single, unified SDK for Voice.ai services. * * @example * ```typescript * import VoiceAI from '@voice-ai-labs/web-sdk'; * * const voiceai = new VoiceAI({ apiKey: 'vk_...' }); * * // Real-time voice connection * await voiceai.connect({ agentId: 'agent-123' }); * voiceai.onTranscription((seg) => console.log(`${seg.role}: ${seg.text}`)); * await voiceai.disconnect(); * * // REST API operations * const agents = await voiceai.agents.list(); * await voiceai.agents.create({ name: 'Support', config: {...} }); * const history = await voiceai.analytics.getCallHistory(); * ``` */ import { AgentClient } from './client/agents'; import { AnalyticsClient } from './client/analytics'; import { KnowledgeBaseClient } from './client/knowledge-base'; import { ManagedToolsClient } from './client/managed-tools'; import { PhoneNumberClient } from './client/phone-numbers'; import { TTSClient } from './client/tts'; import type { ConnectionOptions, ConnectionDetails, ConnectionStatus, TranscriptionHandler, ConnectionStatusHandler, ErrorHandler, AudioCaptureOptions, AgentStateInfo, MicrophoneState, AgentStateHandler, AudioLevelHandler, MicrophoneStateHandler, VoiceAIConfig } from './types'; /** * VoiceAI - The unified Voice.ai SDK * * Provides both real-time voice agent connections and REST API access * through a single, easy-to-use interface. * * @example * ```typescript * import VoiceAI from '@voice-ai-labs/web-sdk'; * * const voiceai = new VoiceAI({ apiKey: 'vk_your_api_key' }); * * // Connect to a voice agent * await voiceai.connect({ agentId: 'agent-123' }); * * // Listen for transcriptions * voiceai.onTranscription((segment) => { * console.log(`${segment.role}: ${segment.text}`); * }); * * // Disconnect when done * await voiceai.disconnect(); * ``` */ export declare class VoiceAI { /** Agent management - create, update, deploy, pause, delete agents. */ get agents(): AgentClient; private _agents?; /** Analytics - call history, transcripts, stats. */ get analytics(): AnalyticsClient; private _analytics?; /** Knowledge Base - manage RAG documents. */ get knowledgeBase(): KnowledgeBaseClient; private _knowledgeBase?; /** Phone Numbers - search, select, release phone numbers. */ get phoneNumbers(): PhoneNumberClient; private _phoneNumbers?; /** Text-to-Speech - generate speech, manage voices, clone voices. */ get tts(): TTSClient; private _tts?; /** Managed tools - provider-specific OAuth/status/disconnect helpers. */ get managedTools(): ManagedToolsClient; private _managedTools?; private room; private connectionStatus; private transcriptionHandlers; private statusHandlers; private errorHandlers; private agentStateHandlers; private audioLevelHandlers; private microphoneStateHandlers; private apiUrl; private apiKey; private authToken?; private getAuthToken?; private effectiveApiKey; private cachedConnectionDetails; private currentAgentState; private agentParticipantId; private audioLevelInterval; /** * Create a new VoiceAI client * * @param config - Configuration options (optional for frontend-only usage) * @param config.apiKey - Your Voice.ai API key (required for API operations and `getConnectionDetails`) * @param config.apiUrl - Custom API URL (optional, defaults to production) */ constructor(config?: VoiceAIConfig); /** * Get connection details for a voice agent. * * Requires an API key. Call this from your backend, then pass the result * to `connectRoom()` on the frontend. * * @param options - Connection options (agentId, testMode, etc.) * @returns Connection details (serverUrl, participantToken, callId) * * @example * ```typescript * // Server-side: get connection details * const details = await voiceai.getConnectionDetails({ agentId: 'agent-123' }); * // Return details to frontend... * * // With test mode * const details = await voiceai.getConnectionDetails({ * agentId: 'agent-123', * testMode: true * }); * ``` */ getConnectionDetails(options: ConnectionOptions): Promise; /** * Connect to a LiveKit room using pre-fetched connection details. * * This is the browser-safe method -- it only needs a room token, * no API key required. Get the connection details from your backend * using `getConnectionDetails()`. * * @param connectionDetails - Server URL, participant token, and call ID from your backend * @param options - Audio/microphone options * * @example * ```typescript * // Frontend: connect using token from your backend * const voiceai = new VoiceAI(); * await voiceai.connectRoom( * { serverUrl, participantToken, callId }, * { autoPublishMic: true } * ); * ``` */ connectRoom(connectionDetails: ConnectionDetails, options?: Pick): Promise; /** * Connect to a voice agent for real-time conversation. * * Convenience method that combines `getConnectionDetails()` + `connectRoom()`. * * @param options - Connection options * * @example * ```typescript * await voiceai.connect({ agentId: 'agent-123' }); * ``` */ connect(options: ConnectionOptions): Promise; /** * Disconnect from the room and end the call. * * Signals the server to free the concurrency slot using endToken from * connection details. Connection details always include end_token when * fetched from the API; backends must pass it when using pre-fetched details. * If no endToken, the server detects room disconnect as fallback. */ disconnect(): Promise; /** * Check if currently connected to a voice agent */ isConnected(): boolean; /** * Get current connection status */ getStatus(): ConnectionStatus; /** * Get current agent state (listening, speaking, thinking, etc.) */ getAgentState(): AgentStateInfo; /** * Get current microphone state */ getMicrophoneState(): MicrophoneState; /** * Send a text message to the agent */ sendMessage(text: string): Promise; /** * Enable or disable the microphone */ setMicrophoneEnabled(enabled: boolean): Promise; /** * Subscribe to transcription events (user and agent speech) * @returns Unsubscribe function */ onTranscription(handler: TranscriptionHandler): () => void; /** * Subscribe to connection status changes * @returns Unsubscribe function */ onStatusChange(handler: ConnectionStatusHandler): () => void; /** * Subscribe to error events * @returns Unsubscribe function */ onError(handler: ErrorHandler): () => void; /** * Subscribe to agent state changes (listening, speaking, thinking) * @returns Unsubscribe function */ onAgentStateChange(handler: AgentStateHandler): () => void; /** * Subscribe to audio level updates (for visualizations) * @returns Unsubscribe function */ onAudioLevel(handler: AudioLevelHandler): () => void; /** * Subscribe to microphone state changes * @returns Unsubscribe function */ onMicrophoneStateChange(handler: MicrophoneStateHandler): () => void; private isTokenExpired; private getOrRefreshConnectionDetails; /** * Fetch connection details from the developer's backend endpoint. * The backend holds the API key and calls the Voice.AI API server-side. */ /** * Fetch connection details using the API key directly. * Used by the public getConnectionDetails() method. */ private fetchConnectionDetails; private resolveAuthToken; private fetchConnectionDetailsFromApi; private setupAudio; private setupRoomListeners; private detectAgentParticipant; private startAudioLevelMonitoring; private stopAudioLevelMonitoring; private updateAgentState; private updateStatus; private emitTranscription; private emitError; private emitAgentState; private emitAudioLevel; private emitMicrophoneState; } /** Default export - the VoiceAI class */ export default VoiceAI; /** Error class for API errors */ export { VoiceAIError } from './client/base'; export type { AuthTokenProvider, VoiceAIConfig, ConnectionOptions, ConnectionDetails, ConnectionStatus, TranscriptionSegment, AgentState, AgentStateInfo, AudioLevelInfo, MicrophoneState, Agent, ManagedToolsConfig, GoogleCalendarOperation, GoogleSheetsOperation, GoogleGmailOperation, GoogleManagedToolOperation, GoogleManagedToolOperationOption, GoogleOAuthStartOptions, GoogleOAuthStartResponse, GoogleConnectionStatus, VoiceResponse, VoiceStatus, RecordingStatus, } from './types'; /** * Generate optimized audio capture options for voice agents * * @param options - Optional overrides * @returns AudioCaptureOptions optimized for voice */ export declare function generateOptimalAudioOptions(options?: Partial): AudioCaptureOptions; export { GOOGLE_IDENTITY_SCOPES, GOOGLE_CALENDAR_SCOPE, GOOGLE_SHEETS_SCOPE, GOOGLE_GMAIL_READ_SCOPE, GOOGLE_GMAIL_SEND_SCOPE, GOOGLE_CALENDAR_OPERATION_OPTIONS, GOOGLE_SHEETS_OPERATION_OPTIONS, GOOGLE_GMAIL_OPERATION_OPTIONS, GOOGLE_MANAGED_OPERATION_OPTIONS, IANA_TIMEZONE_OPTIONS, getManagedToolSelectedOperations, toggleManagedToolOperation, getRequiredGoogleScopes, getMissingGoogleScopes, getMissingGoogleScopesForManagedTools, isGoogleReconnectRequired, getGoogleReconnectState, hasEnabledGoogleManagedTools, } from './managed-tools/google'; export { VoiceAgentWidget } from './components/VoiceAgentWidget'; export type { VoiceAgentWidgetOptions, VoiceAgentWidgetTheme } from './components/VoiceAgentWidget'; //# sourceMappingURL=index.d.ts.map