import { BaseChatModel } from "@langchain/core/language_models/chat_models"; import type { ChatCompletionMessage } from "../types.js"; export declare function _isInternalMessage(message: ChatCompletionMessage): boolean; /** * Creates a simulated user powered by a language model for multi-turn conversations. * * This function generates a simulator that can be used with the runMultiturnSimulation method to create * dynamic, LLM-powered user responses in a conversation. The simulator automatically handles message * role conversion to maintain proper conversation flow, where user messages become assistant messages * and vice versa when passed to the underlying LLM. * * @param {Object} params - The parameters for creating the simulated user * @param {string} params.system - System prompt that guides the LLM's behavior as a simulated user * @param {string} [params.model] - Optional name of the language model to use. Must be provided if client is not. * @param {BaseChatModel} [params.client] - Optional LangChain chat model instance. Must be provided if model is not. * @param {(string | ChatCompletionMessage)[]} [params.fixedResponses] - Optional list of fixed responses to use for the simulated user. * If provided, these responses will be used in sequence based on the turn counter before falling back to LLM generation. * * @returns A callable simulator function that takes a trajectory and turn counter, and returns a Promise resolving to a ChatCompletionMessage * * @throws {Error} If neither client nor model is provided * @throws {Error} If both client and model are provided * * @example * ```typescript * import { runMultiturnSimulation, createLLMSimulatedUser } from "openevals"; * * // Create a simulated user with GPT-4.1-mini * const simulatedUser = createLLMSimulatedUser({ * system: "You are a helpful customer service representative", * model: "openai:gpt-4.1-mini" * }); * * // Use with runMultiturnSimulation * const simulator = runMultiturnSimulation({ * app: myChatApp, * user: simulatedUser, * maxTurns: 5 * }); * ``` * * Notes: * - The simulator automatically converts message roles to maintain proper conversation flow: * * User messages become assistant messages when sent to the LLM * * Assistant messages (without tool calls) become user messages when sent to the LLM * * Messages with tool calls are skipped to maintain conversation coherence * - The system prompt is prepended to each conversation to maintain consistent behavior * - If no messages exist in the trajectory, an initial query is generated based on the system prompt * - Fixed responses are used in sequence based on the turn counter before falling back to LLM generation */ export declare function createLLMSimulatedUser({ system, model, client, fixedResponses, }: { system: string; model?: string; client?: BaseChatModel; fixedResponses?: (string | ChatCompletionMessage)[]; }): (params: { trajectory: ChatCompletionMessage[]; turnCounter: number; }) => Promise;