import type { CancellationToken, LanguageModelToolResult } from 'vscode'; import { Raw } from '.'; import { PromptElement } from './promptElement'; import { BasePromptElementProps, PromptElementProps, PromptPiece, PromptPieceChild, PromptSizing } from './types'; export type ChatMessagePromptElement = SystemMessage | UserMessage | AssistantMessage; export declare function isChatMessagePromptElement(element: unknown): element is ChatMessagePromptElement; export interface ChatMessageProps extends BasePromptElementProps { role?: Raw.ChatRole; name?: string; } export declare class BaseChatMessage extends PromptElement { render(): any; } /** * A {@link PromptElement} which can be rendered to an OpenAI system chat message. * * See {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages} */ export declare class SystemMessage extends BaseChatMessage { constructor(props: ChatMessageProps); } /** * A {@link PromptElement} which can be rendered to an OpenAI user chat message. * * See {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages} */ export declare class UserMessage extends BaseChatMessage { constructor(props: ChatMessageProps); } export interface ToolCall { id: string; function: ToolFunction; type: 'function'; /** * A `` element, created from {@link useKeepWith}, that wraps * the tool result. This will ensure that if the tool result is pruned, * the tool call is also pruned to avoid errors. */ keepWith?: KeepWithCtor; } export interface ToolFunction { arguments: string; name: string; } export interface AssistantMessageProps extends ChatMessageProps { toolCalls?: ToolCall[]; } /** * A {@link PromptElement} which can be rendered to an OpenAI assistant chat message. * * See {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages} */ export declare class AssistantMessage extends BaseChatMessage { constructor(props: AssistantMessageProps); } export interface ToolMessageProps extends ChatMessageProps { toolCallId: string; } /** * A {@link PromptElement} which can be rendered to an OpenAI tool chat message. * * See {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages} */ export declare class ToolMessage extends BaseChatMessage { constructor(props: ToolMessageProps); } export interface TextChunkProps extends BasePromptElementProps { /** * If defined, the text chunk will potentially truncate its contents at the * last occurrence of the string or regular expression to ensure its content * fits within in token budget. * * {@see BasePromptElementProps} for options to control how the token budget * is allocated. */ breakOn?: RegExp | string; /** A shortcut for setting {@link breakOn} to `/\s+/g` */ breakOnWhitespace?: boolean; } /** * @property {string} src - The source of the image. This should be a raw base64 string. * @property {'low' | 'high' | 'auto'} [detail] - Optional. The detail level of the image. Can be either 'low', 'high' or 'auto'. If not specified, `auto` is used. * @property {ImageMediaType} [mimeType] - Optional. The MIME type of the image. Only used for non-base64 URLs. */ export interface ImageProps extends BasePromptElementProps { src: string; detail?: 'low' | 'high' | 'auto'; mimeType?: string; } /** * A chunk of single-line or multi-line text that is a direct child of a {@link ChatMessagePromptElement}. * * TextChunks can only have text literals or intrinsic attributes as children. * It supports truncating text to fix the token budget if passed a {@link TextChunkProps.tokenizer} and {@link TextChunkProps.breakOn} behavior. * Like other {@link PromptElement}s, it can specify `priority` to determine how it should be prioritized. */ export declare class TextChunk extends PromptElement { prepare(sizing: PromptSizing, _progress?: unknown, token?: CancellationToken): Promise; render(piece: PromptPiece): PromptPiece; } export declare class Image extends PromptElement { constructor(props: ImageProps); render(): any; } /** * @property {string} data - Base64-encoded document data. * @property {string} mediaType - MIME type of the document (e.g. 'application/pdf'). */ export interface DocumentProps extends BasePromptElementProps { data: string; mediaType: string; } /** * A document content part (e.g. PDF) that is a direct child of a {@link ChatMessagePromptElement}. */ export declare class Document extends PromptElement { constructor(props: DocumentProps); render(): any; } export interface PrioritizedListProps extends BasePromptElementProps { /** * Priority of the list element. * All rendered elements in this list receive a priority that is offset from this value. */ priority?: number; /** * If `true`, assign higher priority to elements declared earlier in this list. */ descending: boolean; } /** * A utility for assigning priorities to a list of prompt elements. */ export declare class PrioritizedList extends PromptElement { render(): any; } export interface IToolResultProps extends BasePromptElementProps { /** * Base priority of the tool data. All tool data will be scoped to this priority. */ priority?: number; /** * Tool result from VS Code. */ data: LanguageModelToolResult; } /** * A utility to include the result of a tool called using the `vscode.lm.invokeTool` API. */ export declare class ToolResult extends PromptElement { render(): Promise | PromptPiece | undefined; } /** * Marker element that uses the legacy global prioritization algorithm (0.2.x * if this library) for pruning child elements. This will be removed in * the future. * * @deprecated */ export declare class LegacyPrioritization extends PromptElement { render(): any; } /** * Marker element that ensures all of its children are either included, or * not included. This is similar to the `` element, but it is more * basic and can contain extrinsic children. */ export declare class Chunk extends PromptElement { render(): any; } export interface ExpandableProps extends BasePromptElementProps { value: (sizing: PromptSizing) => string | Promise; } /** * An element that can expand to fill the remaining token budget. Takes * a `value` function that is initially called with the element's token budget, * and may be called multiple times with the new token budget as the prompt * is resized. */ export declare class Expandable extends PromptElement { render(_state: void, sizing: PromptSizing): Promise; } export interface TokenLimitProps extends BasePromptElementProps { max: number; } /** * An element that ensures its children don't exceed a certain number of * `maxTokens`. Its contents are pruned to fit within the budget before * the overall prompt pruning is run. */ export declare class TokenLimit extends PromptElement { render(): PromptPiece; } export declare abstract class AbstractKeepWith extends PromptElement { abstract readonly id: number; } export type KeepWithCtor = { new (props: PromptElementProps): AbstractKeepWith; id: number; }; /** * Returns a PromptElement that ensures each wrapped element is retained only * so long as each other wrapped is not empty. * * This is useful when dealing with tool calls, for example. In that case, * your tool call request should only be rendered if the tool call response * survived prioritization. In that case, you implement a `render` function * like so: * * ``` * render() { * const KeepWith = useKeepWith(); * return <> * ... * ... * ; * } * ``` * * Unlike ``, which blocks pruning of any child elements and simply * removes them as a block, `` in this case will allow the * `ToolCallResponse` to be pruned, and if it's fully pruned it will also * remove the `ToolCallRequest`. */ export declare function useKeepWith(): KeepWithCtor; export interface IfEmptyProps extends BasePromptElementProps { alt: PromptPieceChild; } /** * An element that returns its `alt` prop if its children are empty at the * time when it's rendered. This is especially useful when you require * fallback logic for opaque child data, such as tool calls. */ export declare class IfEmpty extends PromptElement { render(): PromptPiece; } export declare class LogicalWrapper extends PromptElement { render(): PromptPiece; }