/** * @module teams-ai */ /** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. */ import { AxiosRequestConfig } from 'axios'; import { TurnContext } from 'botbuilder'; import { ClientOptions } from 'openai'; import { Memory } from '../MemoryFork'; import { PromptFunctions, PromptTemplate } from '../prompts'; import { Tokenizer } from '../tokenizers'; import { PromptResponse } from '../types'; import { PromptCompletionModel, PromptCompletionModelEmitter } from './PromptCompletionModel'; /** * Base model options common to both OpenAI and Azure OpenAI services. */ export interface BaseOpenAIModelOptions { /** * Optional. Whether to log requests to the console. * @remarks * This is useful for debugging prompts and defaults to `false`. */ logRequests?: boolean; /** * Optional. Forces the model return a specific response format. * @remarks * This can be used to force the model to always return a valid JSON object. */ responseFormat?: { type: 'json_object'; }; /** * @deprecated * Optional. Retry policy to use when calling the OpenAI API. * @remarks * Use `maxRetries` instead. */ retryPolicy?: number[]; /** * Optional. Maximum number of retries to use when calling the OpenAI API. * @remarks * The default is to retry twice. */ maxRetries?: number; /** * @deprecated * Optional. Request options to use when calling the OpenAI API. * @abstract * Use `clientOptions` instead. */ requestConfig?: AxiosRequestConfig; /** * Optional. Custom client options to use when calling the OpenAI API. */ clientOptions?: ClientOptions; /** * Optional. A static seed to use when making model calls. * @remarks * The default is to use a random seed. Specifying a seed will make the model deterministic. */ seed?: number; /** * Optional. Whether to use `system` messages when calling the OpenAI API. * @remarks * The current generation of models tend to follow instructions from `user` messages better * then `system` messages so the default is `false`, which causes any `system` message in the * prompt to be sent as `user` messages instead. */ useSystemMessages?: boolean; /** * Optional. Whether the models responses should be streamed back using Server Sent Events (SSE.) * @remarks * Defaults to `false`. */ stream?: boolean; } /** * Options for configuring an `OpenAIModel` to call an OpenAI hosted model. */ export interface OpenAIModelOptions extends BaseOpenAIModelOptions { /** * API key to use when calling the OpenAI API. * @remarks * A new API key can be created at https://platform.openai.com/account/api-keys. */ apiKey: string; /** * Default model to use for completions. */ defaultModel: string; /** * Optional. Organization to use when calling the OpenAI API. */ organization?: string; /** * Optional. Endpoint to use when calling the OpenAI API. * @remarks * For Azure OpenAI this is the deployment endpoint. */ endpoint?: string; /** * Optional. Project to use when calling the OpenAI API. */ project?: string; } /** * Options for configuring an `OpenAIModel` to call an Azure OpenAI hosted model. */ export interface AzureOpenAIModelOptions extends BaseOpenAIModelOptions { /** * API key to use when making requests to Azure OpenAI. */ azureApiKey?: string; /** * Default name of the Azure OpenAI deployment (model) to use. */ azureDefaultDeployment: string; /** * Deployment endpoint to use. */ azureEndpoint: string; /** * Optional. Version of the API being called. Defaults to `2023-05-15`. */ azureApiVersion?: string; /** * Optional. A function that returns an access token for Microsoft Entra (formerly known as Azure Active Directory), * which will be invoked on every request. */ azureADTokenProvider?: () => Promise; } /** * A `PromptCompletionModel` for calling OpenAI and Azure OpenAI hosted models. * @remarks * The model has been updated to support calling OpenAI's new o1 family of models. That currently * comes with a few constraints. These constraints are mostly handled for you but are worth noting: * - The o1 models introduce a new `max_completion_tokens` parameter and they've deprecated the * `max_tokens` parameter. The model will automatically convert the incoming `max_tokens` parameter * to `max_completion_tokens` for you. But you should be aware that o1 has hidden token usage and costs * that aren't constrained by the `max_completion_tokens` parameter. This means that you may see an * increase in token usage and costs when using the o1 models. * - The o1 models do not currently support the sending of system messages which just means that the * `useSystemMessages` parameter is ignored when calling the o1 models. * - The o1 models do not currently support setting the `temperature`, `top_p`, and `presence_penalty` * parameters so they will be ignored. * - The o1 models do not currently support the use of tools so you will need to use the "monologue" * augmentation to call actions. */ export declare class OpenAIModel implements PromptCompletionModel { private readonly _events; private readonly _client; private readonly _useAzure; /** * Options the client was configured with. */ readonly options: OpenAIModelOptions | AzureOpenAIModelOptions; /** * Creates a new `OpenAIModel` instance. * @param {OpenAIModelOptions} options - Options for configuring the model client. */ constructor(options: OpenAIModelOptions | AzureOpenAIModelOptions); /** * Events emitted by the model. * @returns {PromptCompletionModelEmitter} The events emitted by the model. */ get events(): PromptCompletionModelEmitter; /** * Completes a prompt using OpenAI or Azure OpenAI. * @param {TurnContext} context - Current turn context. * @param {Memory} memory - An interface for accessing state values. * @param {PromptFunctions} functions - Functions to use when rendering the prompt. * @param {Tokenizer} tokenizer - Tokenizer to use when rendering the prompt. * @param {PromptTemplate} template - Prompt template to complete. * @returns {Promise>} A `PromptResponse` with the status and message. */ completePrompt(context: TurnContext, memory: Memory, functions: PromptFunctions, tokenizer: Tokenizer, template: PromptTemplate): Promise>; /** * Converts the messages to ChatCompletionMessageParam[]. * @param {Message} messages - The messages from result.output. * @returns {ChatCompletionMessageParam[]} - The converted messages. */ private convertMessages; /** * @private * @template TRequest * @param {Partial} target - The target TRequest. * @param {any} src - The source object. * @param {string[]} fields - List of fields to copy. * @returns {TRequest} The TRequest */ private copyOptionsToRequest; /** * @private * @param {string} model - Model to use. * @param {ChatCompletionMessageParam[]} messages - Messages to send. * @param {PromptTemplate} template Prompt template being used. * @returns {ChatCompletionCreateParams} Chat completion parameters. */ private getChatCompletionParams; private getInputMessage; private returnTooLong; private returnError; } //# sourceMappingURL=OpenAIModel.d.ts.map