
import { convertAISDKStream } from "./converters/aisdk.cjs";
import { TanStackChatMessage, TanStackInputResult, convertInputToTanStackAI, convertTanStackStream } from "./converters/tanstack.cjs";
import { LanguageModel, ModelMessage, ToolChoice, ToolSet } from "ai";
import { Observable } from "rxjs";
import { InferSchemaOutput, StandardSchemaV1 } from "@copilotkit/shared";
import { AbstractAgent, BaseEvent, Message, RunAgentInput } from "@ag-ui/client";
import { AgentCapabilities } from "@ag-ui/core";
import { z } from "zod";
import { StreamableHTTPClientTransportOptions } from "@modelcontextprotocol/sdk/client/streamableHttp.js";

//#region src/agent/index.d.ts
/**
 * Properties that can be overridden by forwardedProps
 * These match the exact parameter names in streamText
 */
type OverridableProperty = "model" | "toolChoice" | "maxOutputTokens" | "temperature" | "topP" | "topK" | "presencePenalty" | "frequencyPenalty" | "stopSequences" | "seed" | "maxRetries" | "prompt" | "providerOptions";
/**
 * Supported model identifiers for BuiltInAgent
 */
type BuiltInAgentModel = "openai/gpt-5" | "openai/gpt-5-mini" | "openai/gpt-4.1" | "openai/gpt-4.1-mini" | "openai/gpt-4.1-nano" | "openai/gpt-4o" | "openai/gpt-4o-mini" | "openai/o3" | "openai/o3-mini" | "openai/o4-mini" | "anthropic/claude-sonnet-4.5" | "anthropic/claude-sonnet-4" | "anthropic/claude-3.7-sonnet" | "anthropic/claude-opus-4.1" | "anthropic/claude-opus-4" | "anthropic/claude-3.5-haiku" | "google/gemini-2.5-pro" | "google/gemini-2.5-flash" | "google/gemini-2.5-flash-lite" | (string & {});
/**
 * Model specifier - can be a string like "openai/gpt-4o" or a LanguageModel instance
 */
type ModelSpecifier = string | LanguageModel;
/**
 * MCP Client configuration for HTTP transport
 */
interface MCPClientConfigHTTP {
  /**
   * Type of MCP client
   */
  type: "http";
  /**
   * URL of the MCP server
   */
  url: string;
  /**
   * Optional transport options for HTTP client
   */
  options?: StreamableHTTPClientTransportOptions;
}
/**
 * MCP Client configuration for SSE transport
 */
interface MCPClientConfigSSE {
  /**
   * Type of MCP client
   */
  type: "sse";
  /**
   * URL of the MCP server
   */
  url: string;
  /**
   * Optional HTTP headers (e.g., for authentication)
   */
  headers?: Record<string, string>;
}
/**
 * MCP Client configuration
 */
type MCPClientConfig = MCPClientConfigHTTP | MCPClientConfigSSE;
/**
 * A user-managed MCP client that provides tools to the agent.
 * The user is responsible for creating, configuring, and closing the client.
 * Compatible with the return type of @ai-sdk/mcp's createMCPClient().
 *
 * Unlike mcpServers, the agent does NOT create or close these clients.
 * This allows persistent connections, custom auth, and tool caching.
 */
interface MCPClientProvider {
  /** Return tools to be merged into the agent's tool set. */
  tools(): Promise<ToolSet>;
}
/**
 * Resolves a model specifier to a LanguageModel instance
 * @param spec - Model string (e.g., "openai/gpt-4o") or LanguageModel instance
 * @param apiKey - Optional API key to use instead of environment variables
 * @returns LanguageModel instance
 */
declare function resolveModel(spec: ModelSpecifier, apiKey?: string): LanguageModel;
/**
 * Tool definition for BuiltInAgent
 */
interface ToolDefinition<TParameters extends StandardSchemaV1 = StandardSchemaV1> {
  name: string;
  description: string;
  parameters: TParameters;
  execute: (args: InferSchemaOutput<TParameters>) => Promise<unknown>;
}
/**
 * Define a tool for use with BuiltInAgent
 * @param name - The name of the tool
 * @param description - Description of what the tool does
 * @param parameters - Schema for the tool's input parameters (any Standard Schema V1 compatible library: Zod, Valibot, ArkType, etc.)
 * @param execute - Function to execute the tool server-side
 * @returns Tool definition
 */
declare function defineTool<TParameters extends StandardSchemaV1>(config: {
  name: string;
  description: string;
  parameters: TParameters;
  execute: (args: InferSchemaOutput<TParameters>) => Promise<unknown>;
}): ToolDefinition<TParameters>;
/**
 * Options for converting AG-UI messages to Vercel AI SDK format
 */
interface MessageConversionOptions {
  forwardSystemMessages?: boolean;
  forwardDeveloperMessages?: boolean;
}
/**
 * Converts AG-UI messages to Vercel AI SDK ModelMessage format
 */
declare function convertMessagesToVercelAISDKMessages(messages: Message[], options?: MessageConversionOptions): ModelMessage[];
/**
 * JSON Schema type definition
 */
interface JsonSchema {
  type: "object" | "string" | "number" | "integer" | "boolean" | "array";
  description?: string;
  properties?: Record<string, JsonSchema>;
  required?: string[];
  items?: JsonSchema;
  enum?: string[];
}
/**
 * Converts JSON Schema to Zod schema
 */
declare function convertJsonSchemaToZodSchema(jsonSchema: JsonSchema, required: boolean): z.ZodSchema;
declare function convertToolsToVercelAITools(tools: RunAgentInput["tools"]): ToolSet;
/**
 * Converts ToolDefinition array to Vercel AI SDK ToolSet.
 *
 * For Zod schemas, passes them directly to the AI SDK (Zod satisfies FlexibleSchema).
 * For non-Zod schemas, converts to JSON Schema via schemaToJsonSchema() and wraps
 * with the AI SDK's jsonSchema() helper.
 */
declare function convertToolDefinitionsToVercelAITools(tools: ToolDefinition[]): ToolSet;
/**
 * Context passed to the user-supplied factory function in factory mode.
 */
interface AgentFactoryContext {
  input: RunAgentInput;
  /**
   * Prefer `abortSignal` for most use cases (AI SDK, fetch, custom backends).
   * Provided for backends like TanStack AI that require the full AbortController.
   * Do NOT call `.abort()` on this controller — use `abortRun()` on the agent instead.
   */
  abortController: AbortController;
  abortSignal: AbortSignal;
}
/**
 * Factory config for AI SDK backend.
 * The factory must return an object with a `fullStream` async iterable
 * (compatible with the result of `streamText()` — only `fullStream` is consumed).
 */
interface BuiltInAgentAISDKFactoryConfig {
  type: "aisdk";
  factory: (ctx: AgentFactoryContext) => {
    fullStream: AsyncIterable<unknown>;
  } | Promise<{
    fullStream: AsyncIterable<unknown>;
  }>;
}
/**
 * Factory config for TanStack AI backend.
 * The factory must return an async iterable of TanStack AI stream chunks.
 */
interface BuiltInAgentTanStackFactoryConfig {
  type: "tanstack";
  factory: (ctx: AgentFactoryContext) => AsyncIterable<unknown> | Promise<AsyncIterable<unknown>>;
}
/**
 * Factory config for a custom backend that directly yields AG-UI events.
 */
interface BuiltInAgentCustomFactoryConfig {
  type: "custom";
  factory: (ctx: AgentFactoryContext) => AsyncIterable<BaseEvent> | Promise<AsyncIterable<BaseEvent>>;
}
/**
 * Union of all factory-mode configurations.
 */
type BuiltInAgentFactoryConfig = BuiltInAgentAISDKFactoryConfig | BuiltInAgentTanStackFactoryConfig | BuiltInAgentCustomFactoryConfig;
/**
 * Classic config — BuiltInAgent handles streamText, tools, MCP, state tools, prompt building.
 */
interface BuiltInAgentClassicConfig {
  /**
   * The model to use
   */
  model: BuiltInAgentModel | LanguageModel;
  /**
   * API key for the model provider (OpenAI, Anthropic, Google)
   * If not provided, falls back to environment variables:
   * - OPENAI_API_KEY for OpenAI models
   * - ANTHROPIC_API_KEY for Anthropic models
   * - GOOGLE_API_KEY for Google models
   */
  apiKey?: string;
  /**
   * Maximum number of steps/iterations for tool calling (default: 1)
   */
  maxSteps?: number;
  /**
   * Tool choice setting - how tools are selected for execution (default: "auto")
   */
  toolChoice?: ToolChoice<Record<string, unknown>>;
  /**
   * Maximum number of tokens to generate
   */
  maxOutputTokens?: number;
  /**
   * Temperature setting (range depends on provider)
   */
  temperature?: number;
  /**
   * Nucleus sampling (topP)
   */
  topP?: number;
  /**
   * Top K sampling
   */
  topK?: number;
  /**
   * Presence penalty
   */
  presencePenalty?: number;
  /**
   * Frequency penalty
   */
  frequencyPenalty?: number;
  /**
   * Sequences that will stop the generation
   */
  stopSequences?: string[];
  /**
   * Seed for deterministic results
   */
  seed?: number;
  /**
   * Maximum number of retries
   */
  maxRetries?: number;
  /**
   * Prompt for the agent
   */
  prompt?: string;
  /**
   * List of properties that can be overridden by forwardedProps.
   */
  overridableProperties?: OverridableProperty[];
  /**
   * Optional list of MCP server configurations
   */
  mcpServers?: MCPClientConfig[];
  /**
   * Optional list of user-managed MCP clients.
   * Unlike mcpServers, the agent does NOT create or close these clients.
   * The user controls the lifecycle, persistence, auth, and caching.
   *
   * Compatible with @ai-sdk/mcp's createMCPClient() return type:
   * ```typescript
   * const client = await createMCPClient({ transport });
   * const agent = new BuiltInAgent({ model: "...", mcpClients: [client] });
   * ```
   */
  mcpClients?: MCPClientProvider[];
  /**
   * Optional tools available to the agent
   */
  tools?: ToolDefinition[];
  /**
   * Forward system-role messages from input to the LLM.
   * Default: false
   */
  forwardSystemMessages?: boolean;
  /**
   * Forward developer-role messages from input to the LLM (as system messages).
   * Default: false
   */
  forwardDeveloperMessages?: boolean;
  /**
   * Provider-specific options passed to the model (e.g., OpenAI reasoningEffort).
   * Example: `{ openai: { reasoningEffort: "high" } }`
   */
  providerOptions?: Record<string, any>;
  /**
   * Explicit agent capabilities. **Shallow-merged** at the category level on
   * top of auto-inferred defaults — providing a category (e.g. `tools`)
   * replaces that entire category, not individual fields within it.
   *
   * For example, `{ tools: { supported: true } }` will drop the inferred
   * `clientProvided` value. Include all fields for any category you override.
   */
  capabilities?: Partial<AgentCapabilities>;
}
/**
 * Configuration for BuiltInAgent.
 *
 * Two modes:
 * - **Classic** (model + params): BuiltInAgent handles everything — streamText, tools, MCP, state tools.
 * - **Factory** (type + factory): You own the LLM call. BuiltInAgent handles lifecycle only.
 */
type BuiltInAgentConfiguration = BuiltInAgentClassicConfig | BuiltInAgentFactoryConfig;
declare class BuiltInAgent extends AbstractAgent {
  private config;
  private abortController?;
  constructor(config: BuiltInAgentConfiguration);
  /**
   * Check if a property can be overridden by forwardedProps
   */
  canOverride(property: OverridableProperty): boolean;
  getCapabilities(): Promise<AgentCapabilities>;
  run(input: RunAgentInput): Observable<BaseEvent>;
  private runFactory;
  clone(): BuiltInAgent;
  abortRun(): void;
}
/**
 * @deprecated Use BuiltInAgent instead
 */
declare class BasicAgent extends BuiltInAgent {
  constructor(config: BuiltInAgentConfiguration);
}
/** @deprecated Use BuiltInAgentClassicConfig instead */
type BasicAgentConfiguration = BuiltInAgentClassicConfig;
//#endregion
export { BasicAgent, BasicAgentConfiguration, BuiltInAgent, BuiltInAgentAISDKFactoryConfig, BuiltInAgentClassicConfig, BuiltInAgentConfiguration, BuiltInAgentCustomFactoryConfig, BuiltInAgentFactoryConfig, BuiltInAgentModel, BuiltInAgentTanStackFactoryConfig, MCPClientConfig, MCPClientConfigHTTP, MCPClientConfigSSE, MCPClientProvider, MessageConversionOptions, ModelSpecifier, OverridableProperty, ToolDefinition, convertJsonSchemaToZodSchema, convertMessagesToVercelAISDKMessages, convertToolDefinitionsToVercelAITools, convertToolsToVercelAITools, defineTool, resolveModel };
//# sourceMappingURL=index.d.cts.map