import { ReadableStream } from 'node:stream/web'; import type { MessageList, MastraDBMessage } from '../../agent/message-list/index.js'; import { MastraBase } from '../../base.js'; import type { ScorerRunInputForAgent, ScorerRunOutputForAgent } from '../../evals/index.js'; import { ProcessorRunner } from '../../processors/runner.js'; import type { WorkflowRunStatus } from '../../workflows/index.js'; import type { ConsumeStreamOptions } from '../aisdk/v5/compat/index.js'; import type { ChunkType, LanguageModelUsage, LLMStepResult, MastraModelOutputOptions, ProviderMetadata, StreamTransport, StepTripwireData, ToolCallChunk } from '../types.js'; /** * Helper function to create a destructurable version of MastraModelOutput. * This wraps the output to ensure properties maintain their context when destructured. */ export declare function createDestructurableOutput(output: MastraModelOutput): MastraModelOutput; type PromiseResults = Pick, 'text' | 'reasoning' | 'sources' | 'files' | 'toolCalls' | 'toolResults' | 'content' | 'usage' | 'warnings' | 'providerMetadata' | 'response' | 'request'> & { suspendPayload: any; resumeSchema: any; object: OUTPUT; reasoningText: string | undefined; totalUsage: LLMStepResult['usage']; steps: LLMStepResult[]; finishReason: LLMStepResult['finishReason']; }; /** * The complete output returned by `getFullOutput()`. */ export type FullOutput = { /** The text output from all steps, excluding rejected responses */ text: string; /** Token usage for the last step */ usage: PromiseResults['usage']; /** All LLM steps executed during the stream */ steps: LLMStepResult[]; /** The reason the stream finished */ finishReason: PromiseResults['finishReason']; /** Any warnings from the model */ warnings: PromiseResults['warnings']; /** Provider-specific metadata */ providerMetadata: PromiseResults['providerMetadata']; /** The request that was sent to model */ request: PromiseResults['request']; /** Reasoning details from the model */ reasoning: PromiseResults['reasoning']; /** Combined reasoning text */ reasoningText: string | undefined; /** Tool calls made during execution */ toolCalls: PromiseResults['toolCalls']; /** Results from tool executions */ toolResults: PromiseResults['toolResults']; /** Sources referenced by model */ sources: PromiseResults['sources']; /** Files generated by the model */ files: PromiseResults['files']; /** Response metadata from the model */ response: PromiseResults['response']; /** Total token usage across all steps */ totalUsage: PromiseResults['totalUsage']; /** The structured object output (when using structured output) */ object: OUTPUT; /** Error if the stream failed */ error: Error | undefined; /** Tripwire data if content was blocked */ tripwire: StepTripwireData | undefined; /** Scoring data for evals (when returnScorerData is enabled) */ scoringData?: { input: Omit; output: ScorerRunOutputForAgent; }; /** Trace ID for this execution. */ traceId: string | undefined; /** Root span ID for this execution, identifying the top-level span in the trace. */ spanId: string | undefined; /** Run ID for this execution */ runId: string | undefined; /** Payload for resuming suspended tool calls */ suspendPayload: any; /** Resume schema of suspended step if available */ resumeSchema?: any; /** All messages from this execution (input + memory history + response) */ messages: MastraDBMessage[]; /** Only messages loaded from memory (conversation history) */ rememberedMessages: MastraDBMessage[]; }; export declare class MastraModelOutput extends MastraBase { #private; /** * Unique identifier for this execution run. */ runId: string; /** * The processor runner for this stream. */ processorRunner?: ProcessorRunner; /** * The message list for this stream. */ messageList: MessageList; /** * Trace ID for this execution. */ traceId?: string; /** * Root span ID for this execution, identifying the top-level span in the trace. */ spanId?: string; messageId: string; constructor({ model: _model, stream, messageList, options, messageId, initialState, }: { model: { modelId: string | undefined; provider: string | undefined; version: 'v2' | 'v3'; }; stream: ReadableStream>; messageList: MessageList; options: MastraModelOutputOptions; messageId: string; initialState?: any; }); private resolvePromise; private resolvePromises; /** * Resolves to the complete text response after streaming completes. */ get text(): Promise; /** * Resolves to reasoning parts array for models that support reasoning. */ get reasoning(): Promise; /** * Resolves to complete reasoning text for models that support reasoning. */ get reasoningText(): Promise; get sources(): Promise; get files(): Promise; get steps(): Promise[]>; get suspendPayload(): Promise; get resumeSchema(): Promise; /** * Stream of all chunks. Provides complete control over stream processing. */ get fullStream(): ReadableStream>; /** * Resolves to the reason generation finished. */ get finishReason(): Promise; /** * Resolves to array of all tool calls made during execution. */ get toolCalls(): Promise; /** * Resolves to array of all tool execution results. */ get toolResults(): Promise; /** * Resolves to token usage statistics including inputTokens, outputTokens, and totalTokens. */ get usage(): Promise; /** * Resolves to array of all warnings generated during execution. */ get warnings(): Promise; /** * Resolves to provider metadata generated during execution. */ get providerMetadata(): Promise; /** * Resolves to the complete response from the model. */ get response(): Promise<{ [key: string]: unknown; headers?: Record; messages?: import("../../_types/@internal_ai-sdk-v5/dist/index.js").StepResult["response"]["messages"]; dbMessages?: MastraDBMessage[]; uiMessages?: import("../../_types/@internal_ai-sdk-v5/dist/index.js").UIMessage<[OUTPUT] extends [undefined] ? undefined : { structuredOutput?: OUTPUT | undefined; } & Record, import("../../_types/@internal_ai-sdk-v5/dist/index.js").UIDataTypes, import("../../_types/@internal_ai-sdk-v5/dist/index.js").UITools>[] | undefined; id?: string; timestamp?: Date; modelId?: string; }>; /** * Resolves to the complete request sent to the model. */ get request(): Promise<{ body?: unknown; }>; /** * Transport handle for the current stream (when available). */ get transport(): StreamTransport | undefined; /** * Resolves to an error if an error occurred during streaming. */ get error(): Error | undefined; updateUsageCount(usage: Partial): void; populateUsageCount(usage: Partial): void; consumeStream(options?: ConsumeStreamOptions): Promise; /** * Returns complete output including text, usage, tool calls, and all metadata. */ getFullOutput(): Promise>; /** * Tripwire data if the stream was aborted due to an output processor blocking the content. * Returns undefined if no tripwire was triggered. */ get tripwire(): StepTripwireData | undefined; /** * The total usage of the stream. */ get totalUsage(): Promise; get content(): Promise; /** * Stream of valid JSON chunks. The final JSON result is validated against the output schema when the stream ends. * * @example * ```typescript * const stream = await agent.stream("Extract data", { * structuredOutput: { * schema: z.object({ name: z.string(), age: z.number() }), * model: 'gpt-4o-mini' // optional to use a model for structuring json output * } * }); * // partial json chunks * for await (const data of stream.objectStream) { * console.log(data); // { name: 'John' }, { name: 'John', age: 30 } * } * ``` */ get objectStream(): ReadableStream>; /** * Stream of individual array elements when output schema is an array type. */ get elementStream(): ReadableStream ? T : never>; /** * Stream of only text content, filtering out metadata and other chunk types. */ get textStream(): ReadableStream; /** * Resolves to the complete object response from the model. Validated against the 'output' schema when the stream ends. * * @example * ```typescript * const stream = await agent.stream("Extract data", { * structuredOutput: { * schema: z.object({ name: z.string(), age: z.number() }), * model: 'gpt-4o-mini' // optionally use a model for structuring json output * } * }); * // final validated json * const data = await stream.object // { name: 'John', age: 30 } * ``` */ get object(): Promise; /** @internal */ _getImmediateToolCalls(): ToolCallChunk[]; /** @internal */ _getImmediateToolResults(): import("..").ToolResultChunk[]; /** @internal */ _getImmediateText(): string; /** @internal */ _getImmediateObject(): OUTPUT | undefined; /** @internal */ _getImmediateUsage(): LanguageModelUsage; /** @internal */ _getImmediateWarnings(): import("@ai-sdk/provider-v5").LanguageModelV2CallWarning[]; /** @internal */ _getImmediateFinishReason(): string | undefined; /** @internal */ _getBaseStream(): ReadableStream>; get status(): WorkflowRunStatus; serializeState(): { status: WorkflowRunStatus; bufferedSteps: LLMStepResult[]; bufferedReasoningDetails: Record; bufferedByStep: LLMStepResult; bufferedText: string[]; bufferedTextChunks: Record; bufferedSources: import("..").SourceChunk[]; bufferedReasoning: import("..").ReasoningChunk[]; bufferedFiles: import("..").FileChunk[]; toolCallArgsDeltas: Record; toolCallDeltaIdNameMap: Record; toolCallStreamingMeta: Record; toolCalls: ToolCallChunk[]; toolResults: import("..").ToolResultChunk[]; warnings: import("@ai-sdk/provider-v5").LanguageModelV2CallWarning[]; finishReason: string | undefined; request: { body?: unknown; }; usageCount: LanguageModelUsage; tripwire: StepTripwireData | undefined; messageList: import("../../agent/message-list/state").SerializedMessageListState; }; deserializeState(state: any): void; } export {}; //# sourceMappingURL=output.d.ts.map