import { type AuthStatus } from '../auth/types'; import { ModelTag } from './tags'; import { type ChatModel, type EditModel, type ModelContextWindow, ModelUsage } from './types'; export type ModelId = string; export type ApiVersionId = string; export type ProviderId = string; export type ModelRefStr = `${ProviderId}::${ApiVersionId}::${ModelId}`; export interface ModelRef { providerId: ProviderId; apiVersionId: ApiVersionId; modelId: ModelId; } export type ModelCategory = ModelTag.Accuracy | ModelTag.Balanced | ModelTag.Speed; export type ModelStatus = ModelTag.Experimental | ModelTag.Experimental | 'stable' | ModelTag.Deprecated; export type ModelTier = ModelTag.Free | ModelTag.Pro | ModelTag.Enterprise; export type ModelCapability = 'chat' | 'autocomplete'; export interface ContextWindow { maxInputTokens: number; maxOutputTokens: number; } interface ClientSideConfig { /** * The API key for the model */ apiKey?: string; /** * The API endpoint for the model */ apiEndpoint?: string; /** * if this model is compatible with OpenAI API provider * allow the site admin to set configuration params */ openAICompatible?: OpenAICompatible; } interface OpenAICompatible { stopSequences?: string[]; endOfText?: string; contextSizeHintTotalCharacters?: number; contextSizeHintPrefixCharacters?: number; contextSizeHintSuffixCharacters?: number; chatPreInstruction?: string; editPostInstruction?: string; autocompleteSinglelineTimeout?: number; autocompleteMultilineTimeout?: number; chatTopK?: number; chatTopP?: number; chatTemperature?: number; chatMaxTokens?: number; autoCompleteTopK?: number; autoCompleteTopP?: number; autoCompleteTemperature?: number; autoCompleteSinglelineMaxTokens?: number; autoCompleteMultilineMaxTokens?: number; editTopK?: number; editTopP?: number; editTemperature?: number; editMaxTokens?: number; } export interface ServerModel { modelRef: ModelRefStr; displayName: string; modelName: string; capabilities: ModelCapability[]; category: ModelCategory; status: ModelStatus; tier: ModelTier; contextWindow: ContextWindow; clientSideConfig?: ClientSideConfig; } interface Provider { id: string; displayName: string; } interface DefaultModels { chat: ModelRefStr; fastChat: ModelRefStr; codeCompletion: ModelRefStr; } export interface ServerModelConfiguration { schemaVersion: string; revision: string; providers: Provider[]; models: ServerModel[]; defaultModels: DefaultModels; } /** * Model describes an LLM model and its capabilities. */ export declare class Model { /** * The model id that includes the provider name & the model name, * e.g. "anthropic/claude-3-sonnet-20240229" * * TODO(PRIME-282): Replace this with a `ModelRefStr` instance and introduce a separate * "modelId" that is distinct from the "modelName". (e.g. "claude-3-sonnet" vs. "claude-3-sonnet-20240229") */ readonly model: string; /** * The usage of the model, e.g. chat or edit. */ readonly usage: ModelUsage[]; /** * The default context window of the model reserved for Chat and Context. * {@see TokenCounter on how the token usage is calculated.} */ readonly contextWindow: ModelContextWindow; /** * The client-specific configuration for the model. */ readonly clientSideConfig?: ClientSideConfig; provider: string; readonly title: string; /** * The tags assigned for categorizing the model. */ readonly tags: ModelTag[]; readonly modelRef?: ModelRef; constructor({ model, modelRef, usage, contextWindow, clientSideConfig, tags, provider, title, }: ModelParams); static fromApi({ modelRef, displayName, capabilities, category, tier, clientSideConfig, contextWindow, }: ServerModel): Model; static tier(model: Model): ModelTier; static isCodyPro(model?: Model): boolean; static parseModelRef(ref: ModelRefStr): ModelRef; } interface ModelParams { model: string; modelRef?: ModelRefStr | ModelRef; usage: ModelUsage[]; contextWindow?: ModelContextWindow; clientSideConfig?: ClientSideConfig; tags?: ModelTag[]; provider?: string; title?: string; } export interface PerSitePreferences { [endpoint: string]: SitePreferences; } interface SitePreferences { defaults: { [usage in ModelUsage]?: string; }; selected: { [usage in ModelUsage]?: string; }; } /** * ModelsService is the component responsible for keeping track of which models * are supported on the backend, which ones are available based on the user's * preferences, etc. * * TODO(PRIME-228): Update this type to be able to fetch the models from the * Sourcegraph backend instead of being hard-coded. * TODO(PRIME-283): Enable Cody Enterprise users to select which LLM model to * used in the UI. (By having the relevant code paths just pull the models * from this type.) */ export declare class ModelsService { protected ModelsService(): void; static reset(): void; private static get models(); private static primaryModels; private static localModels; private static STORAGE_KEY; private static storage; private static authStatus; private static _preferences; private static get preferences(); static onConfigChange(): Promise; static setAuthStatus(authStatus: AuthStatus): Promise; static setStorage(storage: Storage): void; /** * Sets the primary models available to the user. */ static setModels(models: Model[]): void; /** * Sets the primary and default models from the server sent config */ static setServerSentModels(config: ServerModelConfiguration): Promise; private static setServerDefaultModel; private static flush; /** * Add new models for use. */ static addModels(models: Model[]): void; private static getModelsByType; /** * Gets the available models of the specified usage type, with the default model first. * * @param type - The usage type of the models to retrieve. * @param authStatus - The authentication status of the user. * @returns An array of models, with the default model first. */ static getModels(type: ModelUsage): Model[]; static getDefaultModel(type: ModelUsage): Model | undefined; static getDefaultEditModel(): EditModel | undefined; static getDefaultChatModel(): ChatModel | undefined; static setSelectedModel(type: ModelUsage, model: Model | string): Promise; static isModelAvailable(model: string | Model): boolean; static resolveModel(modelID: Model | string | undefined): Model | undefined; /** * Finds the model provider with the given model ID and returns its Context Window. */ static getContextWindowByID(modelID: string): ModelContextWindow; static getModelByID(modelID: string): Model | undefined; static getModelByIDSubstringOrError(modelSubstring: string): Model; static hasModelTag(model: Model, modelTag: ModelTag): boolean; } interface Storage { get(key: string): string | null; set(key: string, value: string): Promise; delete(key: string): Promise; } export declare function capabilityToUsage(capability: ModelCapability): ModelUsage[]; export {}; //# sourceMappingURL=index.d.ts.map