import { InferenceModel, EmbeddingModel } from 'gpt4all'; import { EngineTextCompletionArgs, EngineChatCompletionArgs, EngineChatCompletionResult, EngineTextCompletionResult, EngineContext, EngineEmbeddingArgs, EngineEmbeddingResult, FileDownloadProgress, ModelConfig, TextCompletionParams, ChatMessage } from '../../types/index.js'; export type GPT4AllInstance = InferenceModel | EmbeddingModel; export interface GPT4AllModelMeta { url: string; md5sum: string; filename: string; promptTemplate: string; systemPrompt: string; filesize: number; ramrequired: number; } export interface GPT4AllModelConfig extends ModelConfig { location: string; md5?: string; url?: string; contextSize?: number; batchSize?: number; task: 'text-completion' | 'embedding'; initialMessages?: ChatMessage[]; completionDefaults?: TextCompletionParams; device?: { gpu?: boolean | 'auto' | (string & {}); gpuLayers?: number; cpuThreads?: number; }; } export declare const autoGpu = true; export declare function prepareModel({ config, log }: EngineContext, onProgress?: (progress: FileDownloadProgress) => void, signal?: AbortSignal): Promise; export declare function createInstance({ config, log }: EngineContext, signal?: AbortSignal): Promise; export declare function disposeInstance(instance: GPT4AllInstance): Promise; export declare function processTextCompletionTask({ request, config, onChunk }: EngineTextCompletionArgs, instance: GPT4AllInstance, signal?: AbortSignal): Promise; export declare function processChatCompletionTask({ request, config, resetContext, log, onChunk }: EngineChatCompletionArgs, instance: GPT4AllInstance, signal?: AbortSignal): Promise; export declare function processEmbeddingTask({ request, config }: EngineEmbeddingArgs, instance: GPT4AllInstance, signal?: AbortSignal): Promise;