/** * @module teams-ai */ /** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. */ import { TurnContext } from 'botbuilder'; import { DataSource } from '../dataSources'; import { Memory } from '../MemoryFork'; import { Tokenizer } from '../tokenizers'; import { PromptTemplate } from './PromptTemplate'; import { PromptFunctions, PromptFunction } from './PromptFunctions'; /** * Options used to configure the prompt manager. */ export interface PromptManagerOptions { /** * Path to the filesystem folder containing all the application's prompts. */ promptsFolder: string; /** * Optional. Message role to use for loaded prompts. * @remarks * Defaults to 'system'. */ role?: string; /** * Optional. Maximum number of tokens of conversation history to include in prompts. * @remarks * The default is to let conversation history consume the remainder of the prompts * `max_input_tokens` budget. Setting this to a value greater than 1 will override that and * all prompts will use a fixed token budget. */ max_conversation_history_tokens?: number; /** * Optional. Maximum number of messages to use when rendering conversation_history. * @remarks * This controls the automatic pruning of the conversation history that's done by the planners * LLMClient instance. This helps keep your memory from getting too big and defaults to a value * of `10` (or 5 turns.) */ max_history_messages?: number; /** * Optional. Maximum number of tokens user input to include in prompts. * @remarks * This defaults to unlimited but can be set to a value greater than `1` to limit the length of * user input included in prompts. For example, if set to `100` then the any user input over * 100 tokens in length will be truncated. */ max_input_tokens?: number; } /** * The configured PromptManager options. */ export interface ConfiguredPromptManagerOptions { /** * Path to the filesystem folder containing all the applications prompts. */ promptsFolder: string; /** * Message role to use for loaded prompts. */ role: string; /** * Maximum number of tokens of conversation history to include in prompts. */ max_conversation_history_tokens: number; /** * Maximum number of messages to use when rendering conversation_history. */ max_history_messages: number; /** * Maximum number of tokens of user input to include in prompts. */ max_input_tokens: number; } /** * A filesystem based prompt manager. * @remarks * The default prompt manager uses the file system to define prompts that are compatible with * Microsoft's Semantic Kernel SDK (see: https://github.com/microsoft/semantic-kernel) * * Each prompt is a separate folder under a root prompts folder. The folder should contain the following files: * * - "config.json": Required. Contains the prompts configuration and is a serialized instance of `PromptTemplateConfig`. * - "skprompt.txt": Required. Contains the text of the prompt and supports Semantic Kernels prompt template syntax. * - "actions.json": Optional. Contains a list of actions that can be called by the prompt. * * Prompts can be loaded and used by name and new dynamically defined prompt templates can be * registered with the prompt manager. * @template TState Optional. Type of the applications turn state. */ export declare class PromptManager implements PromptFunctions { private readonly _options; private readonly _dataSources; private readonly _functions; private readonly _prompts; /** * Creates a new 'PromptManager' instance. * @param {PromptManagerOptions} options - Options used to configure the prompt manager. * @returns {PromptManager} A new prompt manager instance. */ constructor(options: PromptManagerOptions); /** * Gets the configured prompt manager options. * @returns {ConfiguredPromptManagerOptions} The configured prompt manager options. */ get options(): ConfiguredPromptManagerOptions; /** * Registers a new data source with the prompt manager. * @param {DataSource} dataSource - Data source to add. * @returns {this} The prompt manager for chaining. */ addDataSource(dataSource: DataSource): this; /** * Looks up a data source by name. * @param {string} name - Name of the data source to lookup. * @returns {DataSource} The data source. */ getDataSource(name: string): DataSource; /** * Checks for the existence of a named data source. * @param {string} name - Name of the data source to lookup. * @returns {boolean} True if the data source exists. */ hasDataSource(name: string): boolean; /** * Registers a new prompt template function with the prompt manager. * @param {string} name - Name of the function to add. * @param {PromptFunction} fn - Function to add. * @returns {this} - The prompt manager for chaining. */ addFunction(name: string, fn: PromptFunction): this; /** * Looks up a prompt template function by name. * @param {string} name - Name of the function to lookup. * @returns {PromptFunction} The function. */ getFunction(name: string): PromptFunction; /** * Checks for the existence of a named prompt template function. * @param {string} name Name of the function to lookup. * @returns {boolean} True if the function exists. */ hasFunction(name: string): boolean; /** * Invokes a prompt template function by name. * @param {string} name - Name of the function to invoke. * @param {TurnContext} context - Turn context for the current turn of conversation with the user. * @param {Memory} memory - An interface for accessing state values. * @param {Tokenizer} tokenizer - Tokenizer to use when rendering the prompt. * @param {string[]} args - Arguments to pass to the function. * @returns {Promise} Value returned by the function. */ invokeFunction(name: string, context: TurnContext, memory: Memory, tokenizer: Tokenizer, args: string[]): Promise; /** * Registers a new prompt template with the prompt manager. * @param {PromptTemplate} prompt - Prompt template to add. * @returns {this} The prompt manager for chaining. */ addPrompt(prompt: PromptTemplate): this; /** * Loads a named prompt template from the filesystem. * @remarks * The template will be pre-parsed and cached for use when the template is rendered by name. * * Any augmentations will also be added to the template. * @param {string} name - Name of the prompt to load. * @returns {Promise} The loaded and parsed prompt template. */ getPrompt(name: string): Promise; /** * Checks for the existence of a named prompt. * @param {string} name - Name of the prompt to load. * @returns {boolean} True if the prompt exists. */ hasPrompt(name: string): Promise; /** * @param {PromptTemplate} template - The prompt template to update. * @private */ private updateConfig; /** * @param {PromptTemplate} template - The prompt template to append augmentations to. * @param {PromptSection[]} sections - The prompt sections to append augmentations to. * @private */ private appendAugmentations; } //# sourceMappingURL=PromptManager.d.ts.map