import { VercelAiClientConfigureProps } from '../llm/vercelai-client'; export interface AssistantClass { configure(config: VercelAiClientConfigureProps & { chatEndpoint?: string; voiceEndpoint?: string; }): void; getInstance(): Promise; registerTool(props: { name: string; tool: unknown; component?: unknown; }): void; addToolResult?(toolCallId: string, additionalData: unknown): void; testConnection?(apiKey: string, model: string): Promise; getBaseURL?(): string | void; } /** * Returns the appropriate Assistant model based on the provider. (Internal use) * * @example * ```tsx * import { GetAssistantModelByProvider } from '@openassistant/core'; * * const AssistantModel = await GetAssistantModelByProvider({ * provider: 'openai', * }); * * // configure the assistant model * AssistantModel.configure({ * apiKey: 'your-api-key', * model: 'gpt-4o', * }); * * // initialize the assistant model * const assistant = await AssistantModel.getInstance(); * * // send a message to the assistant * const result = await assistant.processTextMessage({ * text: 'Hello, world!', * }); * ``` * * @param {Object} options - The options object * @param {string} [options.provider] - The name of the AI provider. The supported providers are: 'openai', 'anthropic', 'google', 'deepseek', 'xai', 'ollama', 'bedrock' * @param {string} [options.chatEndpoint] - The chat endpoint that handles the chat requests, e.g. '/api/chat'. This is required for server-side support. * @returns {Promise} Promise that resolves to the assistant model class. */ export declare function GetAssistantModelByProvider({ provider, chatEndpoint, }: { provider?: string; chatEndpoint?: string; }): Promise;