import { BaseCache, BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js"; import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js"; import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js"; import { RunnableConfig } from "../schema/runnable.js"; export type SerializedLLM = { _model: string; _type: string; } & Record; export interface BaseLLMParams extends BaseLanguageModelParams { /** * @deprecated Use `maxConcurrency` instead */ concurrency?: number; cache?: BaseCache | boolean; } export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions { } /** * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string. */ export declare abstract class BaseLLM extends BaseLanguageModel { ParsedCallOptions: Omit; lc_namespace: string[]; cache?: BaseCache; constructor({ cache, concurrency, ...rest }: BaseLLMParams); invoke(input: BaseLanguageModelInput, options?: CallOptions): Promise; _streamResponseChunks(_input: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator; protected _separateRunnableConfigFromCallOptions(options: CallOptions): [RunnableConfig, this["ParsedCallOptions"]]; _streamIterator(input: BaseLanguageModelInput, options?: CallOptions): AsyncGenerator; generatePrompt(promptValues: BasePromptValue[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; /** * Run the LLM on the given prompts and input. */ abstract _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise; /** * Get the parameters used to invoke the model */ invocationParams(_options?: this["ParsedCallOptions"]): any; _flattenLLMResult(llmResult: LLMResult): LLMResult[]; /** @ignore */ _generateUncached(prompts: string[], parsedOptions: this["ParsedCallOptions"], handledOptions: BaseCallbackConfig): Promise; /** * Run the LLM on the given prompts and input, handling caching. */ generate(prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; /** * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output. */ call(prompt: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise; predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise; predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; /** * Get the identifying parameters of the LLM. */ _identifyingParams(): Record; /** * Return the string type key uniquely identifying this class of LLM. */ abstract _llmType(): string; /** * Return a json-like object representing this LLM. */ serialize(): SerializedLLM; _modelType(): string; /** * Load an LLM from a json-like object describing it. */ static deserialize(data: SerializedLLM): Promise; } /** * LLM class that provides a simpler interface to subclass than {@link BaseLLM}. * * Requires only implementing a simpler {@link _call} method instead of {@link _generate}. * * @augments BaseLLM */ export declare abstract class LLM extends BaseLLM { /** * Run the LLM on the given prompt and input. */ abstract _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise; _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise; }