/** * @license * Copyright 2025 Google LLC * SPDX-License-Identifier: Apache-2.0 */ import type { GenerateContentResponse, Content, Part } from '@google/genai'; import type { ContentGenerator } from './contentGenerator.js'; /** * Options for generateJson method */ export interface GenerateJsonOptions { prompt: string; schema?: Record; model: string; temperature?: number; systemInstruction?: string; promptId?: string; /** * The maximum number of attempts for the request. */ maxAttempts?: number; } /** * Options for generateEmbedding method */ export interface GenerateEmbeddingOptions { text: string | string[]; model: string; } /** * Options for countTokens method */ export interface CountTokensOptions { text?: string; contents?: Content[]; model: string; } /** * Options for the generateContent utility function. */ export interface GenerateContentOptions { /** The input prompt or history. */ contents: Content[]; /** The model to use. */ model: string; /** * Task-specific system instructions. * If omitted, no system instruction is sent. */ systemInstruction?: string | Part | Part[] | Content; /** Signal for cancellation. */ abortSignal: AbortSignal; /** * A unique ID for the prompt, used for logging/telemetry correlation. */ promptId: string; /** * The maximum number of attempts for the request. */ maxAttempts?: number; } /** * BaseLLMClient extracts stateless utility methods for LLM operations. * Unlike the main Client class, this handles utility calls without conversation state. * * This implements the baseLlmClient pattern from upstream gemini-cli but adapted * for llxprt's multi-provider architecture. * * Key features: * - Multi-provider support (Anthropic, OpenAI, Gemini, Vertex AI) * - Stateless operations (no conversation history) * - Clean separation from GeminiClient * - Dependency injection for testing */ export declare class BaseLLMClient { private readonly contentGenerator; constructor(contentGenerator: ContentGenerator | null); /** * Generate structured JSON from a prompt with optional schema validation. * Supports all providers through the ContentGenerator abstraction. * * @param options - Generation options including prompt, schema, model, etc. * @returns Parsed JSON object * @throws Error if generation fails or response cannot be parsed */ generateJson(options: GenerateJsonOptions): Promise; /** * Generate embeddings for text input. * Supports single text string or array of strings. * * @param options - Embedding options including text and model * @returns Embedding vector(s) as number array(s) * @throws Error if generation fails or response is invalid */ generateEmbedding(options: GenerateEmbeddingOptions): Promise; /** * Count tokens in text or contents without making an API call to generate. * Useful for checking context limits before generation. * * @param options - Options including text/contents and model * @returns Token count * @throws Error if counting fails */ countTokens(options: CountTokensOptions): Promise; /** * Generate content from a prompt. * This is a general-purpose content generation method that doesn't enforce JSON output. * * @param options - Generation options * @returns Raw GenerateContentResponse * @throws Error if generation fails */ generateContent(options: GenerateContentOptions): Promise; private _generateWithRetry; }