/** * This file was auto-generated by Fern from our API Definition. */ import * as serializers from "../../../../index"; import * as Vellum from "../../../../../api/index"; import * as core from "../../../../../core"; import { DeprecatedPromptRequestInput } from "../../../../types/DeprecatedPromptRequestInput"; import { VellumVariable } from "../../../../types/VellumVariable"; import { PromptParameters } from "../../../../types/PromptParameters"; import { PromptSettings } from "../../../../types/PromptSettings"; import { FunctionDefinition } from "../../../../types/FunctionDefinition"; import { AdHocExpandMeta } from "../../../../types/AdHocExpandMeta"; export declare const AdHocExecutePromptStream: core.serialization.Schema; export declare namespace AdHocExecutePromptStream { interface Raw { ml_model: string; input_values: DeprecatedPromptRequestInput.Raw[]; input_variables: VellumVariable.Raw[]; parameters: PromptParameters.Raw; settings?: (PromptSettings.Raw | null) | null; blocks: serializers.PromptBlock.Raw[]; functions?: (FunctionDefinition.Raw[] | null) | null; expand_meta?: (AdHocExpandMeta.Raw | null) | null; } }