import { AbstractAssistant } from './assistant'; import { AudioToTextProps, ProcessImageMessageProps, ProcessMessageProps, RegisterToolProps, StreamMessage, StreamMessageCallback, ToolCallComponents } from '../types'; import { CoreMessage, ToolChoice, ToolSet } from 'ai'; import { ToolInvocation } from '@ai-sdk/ui-utils'; export declare function extractMaxToolInvocationStep(toolInvocations: ToolInvocation[] | undefined): number | undefined; /** Check if the message is an assistant message with completed tool calls. The message must have at least one tool invocation and all tool invocations must have a result. */ export declare function isAssistantMessageWithCompletedToolCalls(message: CoreMessage): boolean; /** * Checks if another request should be triggered based on the current message state * @param messages Current message array * @param messageCount Previous message count before last request * @param maxSteps Maximum number of allowed steps * @param currentStep Current maximum tool invocation step * @returns boolean indicating if another request should be triggered */ export declare function shouldTriggerNextRequest(messages: CoreMessage[], messageCount: number, maxSteps: number, currentStep: number): boolean; type VercelAiConfigureProps = { apiKey?: string; model?: string; instructions?: string; temperature?: number; topP?: number; description?: string; version?: string; maxTokens?: number; chatEndpoint?: string; voiceEndpoint?: string; toolChoice?: ToolChoice; toolCallStreaming?: boolean; headers?: Record; baseURL?: string; /** Custom model context windows to extend or override the default mapping */ modelContextWindows?: Record; }; /** * Vercel AI Assistant for Server only. */ export declare class VercelAi extends AbstractAssistant { protected static chatEndpoint: string; protected static voiceEndpoint: string; protected static instructions: string; protected static toolChoice: ToolChoice; protected static toolCallStreaming: boolean; protected static maxSteps: number; protected static additionalContext: string; protected static temperature: number; protected static topP: number | undefined; protected static description: string; protected static maxTokens: number; protected static hasInitializedServer: boolean; protected static headers: Record; protected static baseURL: string; protected static modelContextWindows: Record; protected toolSteps: number; protected streamMessage: StreamMessage; /** * The messages array, which is used to send to the LLM. * * To persist the messages, you can call the {@link setMessages} method, and the {@link getMessages} method. */ protected messages: CoreMessage[]; protected abortController: AbortController | null; protected static tools: ToolSet; protected static toolComponent: Record; protected static toolResults: Record; protected static instance: VercelAi | null; protected constructor(); static getInstance(): Promise; static configure(config: VercelAiConfigureProps): void; static registerTool({ name, tool, component, }: RegisterToolProps): void; static addToolResult(toolCallId: string, additionalData: unknown): void; static getToolResult(toolCallId: string): unknown; getMessages(): CoreMessage[]; addMessage(message: CoreMessage): void; setMessages(messages: CoreMessage[]): void; getComponents(): ToolCallComponents; setAbortController(abortController: AbortController): void; stop(): void; restart(): void; static getBaseURL(): void; processImageMessage({ imageMessage, textMessage, streamMessageCallback, }: ProcessImageMessageProps): Promise; processTextMessage({ textMessage, streamMessageCallback, imageMessage, onToolFinished, }: ProcessMessageProps): Promise<{ streamMessage: StreamMessage; messages: CoreMessage[]; }>; protected triggerRequest({ streamMessageCallback, imageMessage, onToolFinished, }: { streamMessageCallback: StreamMessageCallback; imageMessage?: string; onToolFinished?: (toolCallId: string, additionalData: unknown) => void; }): Promise<{}>; /** * audioToText method to use API endpoint for audio transcription * @param audioBlob - The audio blob to transcribe * @returns The transcribed text */ audioToText({ audioBlob, }: AudioToTextProps): Promise; } export {};