import { LlamaChat, LlamaModel, LlamaContext, LlamaCompletion, LlamaContextSequence, LlamaGrammar, ChatHistoryItem, LlamaChatResponse, LlamaEmbeddingContext, GgufFileInfo, LLamaChatContextShiftOptions, LlamaContextOptions } from 'node-llama-cpp'; import { EngineChatCompletionResult, EngineTextCompletionResult, EngineTextCompletionArgs, EngineChatCompletionArgs, EngineContext, ToolDefinition, EngineEmbeddingArgs, EngineEmbeddingResult, FileDownloadProgress, ModelConfig, TextCompletionParams, TextCompletionGrammar, ChatMessage } from '../../types/index.js'; export interface NodeLlamaCppInstance { model: LlamaModel; context: LlamaContext; chat?: LlamaChat; chatHistory: ChatHistoryItem[]; grammars: Record; pendingFunctionCalls: Record; lastEvaluation?: LlamaChatResponse['lastEvaluation']; embeddingContext?: LlamaEmbeddingContext; completion?: LlamaCompletion; contextSequence: LlamaContextSequence; } export interface NodeLlamaCppModelMeta { gguf: GgufFileInfo; } export interface NodeLlamaCppModelConfig extends ModelConfig { location: string; grammars?: Record; sha256?: string; completionDefaults?: TextCompletionParams; initialMessages?: ChatMessage[]; prefix?: string; tools?: { definitions: Record; includeToolDocumentation?: boolean; parallelism?: number; }; contextSize?: number; batchSize?: number; lora?: LlamaContextOptions['lora']; contextShiftStrategy?: LLamaChatContextShiftOptions['strategy']; device?: { gpu?: boolean | 'auto' | (string & {}); gpuLayers?: number; cpuThreads?: number; memLock?: boolean; }; } export declare const autoGpu = true; export declare function prepareModel({ config, log }: EngineContext, onProgress?: (progress: FileDownloadProgress) => void, signal?: AbortSignal): Promise<{ gguf: GgufFileInfo; } | undefined>; export declare function createInstance({ config, log }: EngineContext, signal?: AbortSignal): Promise; export declare function disposeInstance(instance: NodeLlamaCppInstance): Promise; export declare function processChatCompletionTask({ request, config, resetContext, log, onChunk }: EngineChatCompletionArgs, instance: NodeLlamaCppInstance, signal?: AbortSignal): Promise; export declare function processTextCompletionTask({ request, config, resetContext, log, onChunk }: EngineTextCompletionArgs, instance: NodeLlamaCppInstance, signal?: AbortSignal): Promise; export declare function processEmbeddingTask({ request, config, log }: EngineEmbeddingArgs, instance: NodeLlamaCppInstance, signal?: AbortSignal): Promise;