import type { ToolSet } from '../../../_types/@internal_ai-sdk-v5/dist/index.js'; import type { OuterLLMRun } from '../../types.js'; export declare function createLLMExecutionStep({ models, _internal, messageId: messageIdPassed, runId, tools, toolChoice, activeTools, messageList, includeRawChunks, modelSettings, providerOptions, options, toolCallStreaming, controller, structuredOutput, outputProcessors, inputProcessors, errorProcessors, logger, agentId, downloadRetries, downloadConcurrency, processorStates, requestContext, methodType, modelSpanTracker, autoResumeSuspendedTools, maxProcessorRetries, workspace, outputWriter, }: OuterLLMRun): import("../../../workflows").Step<"llm-execution", unknown, { messageId: string; messages: { all: any[]; user: any[]; nonUser: any[]; }; output: { usage: { inputTokens?: number | undefined; outputTokens?: number | undefined; totalTokens?: number | undefined; reasoningTokens?: number | undefined; cachedInputTokens?: number | undefined; }; steps: any[]; text?: string | undefined; reasoning?: any[] | undefined; reasoningText?: string | undefined; files?: any[] | undefined; toolCalls?: any[] | undefined; toolResults?: any[] | undefined; sources?: any[] | undefined; staticToolCalls?: any[] | undefined; dynamicToolCalls?: any[] | undefined; staticToolResults?: any[] | undefined; dynamicToolResults?: any[] | undefined; }; metadata: { id?: string | undefined; model?: string | undefined; modelId?: string | undefined; modelMetadata?: { modelId: string; modelVersion: string; modelProvider: string; } | undefined; timestamp?: Date | undefined; providerMetadata?: Record | undefined; headers?: Record | undefined; request?: Record | undefined; }; stepResult: { reason: string; warnings: any[]; isContinued: boolean; logprobs?: any; totalUsage?: { inputTokens?: number | undefined; outputTokens?: number | undefined; totalTokens?: number | undefined; reasoningTokens?: number | undefined; cachedInputTokens?: number | undefined; } | undefined; headers?: Record | undefined; messageId?: string | undefined; request?: Record | undefined; }; processorRetryCount?: number | undefined; fallbackModelIndex?: number | undefined; processorRetryFeedback?: string | undefined; isTaskCompleteCheckFailed?: boolean | undefined; }, { messageId: string; messages: { all: any[]; user: any[]; nonUser: any[]; }; output: { usage: { inputTokens?: number | undefined; outputTokens?: number | undefined; totalTokens?: number | undefined; reasoningTokens?: number | undefined; cachedInputTokens?: number | undefined; }; steps: any[]; text?: string | undefined; reasoning?: any[] | undefined; reasoningText?: string | undefined; files?: any[] | undefined; toolCalls?: any[] | undefined; toolResults?: any[] | undefined; sources?: any[] | undefined; staticToolCalls?: any[] | undefined; dynamicToolCalls?: any[] | undefined; staticToolResults?: any[] | undefined; dynamicToolResults?: any[] | undefined; }; metadata: { id?: string | undefined; model?: string | undefined; modelId?: string | undefined; modelMetadata?: { modelId: string; modelVersion: string; modelProvider: string; } | undefined; timestamp?: Date | undefined; providerMetadata?: Record | undefined; headers?: Record | undefined; request?: Record | undefined; }; stepResult: { reason: string; warnings: any[]; isContinued: boolean; logprobs?: any; totalUsage?: { inputTokens?: number | undefined; outputTokens?: number | undefined; totalTokens?: number | undefined; reasoningTokens?: number | undefined; cachedInputTokens?: number | undefined; } | undefined; headers?: Record | undefined; messageId?: string | undefined; request?: Record | undefined; }; processorRetryCount?: number | undefined; fallbackModelIndex?: number | undefined; processorRetryFeedback?: string | undefined; isTaskCompleteCheckFailed?: boolean | undefined; }, unknown, unknown, import("../../../workflows").DefaultEngineType, unknown>; //# sourceMappingURL=llm-execution-step.d.ts.map