/** * @module teams-ai */ /** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. */ import { Plan } from '../planners'; import { TurnState } from '../TurnState'; import { TurnContext } from 'botbuilder'; import { CreateModerationResponseResultsInner, AzureOpenAIClient, ContentSafetyHarmCategory } from '../internals'; import { OpenAIModerator, OpenAIModeratorOptions } from './OpenAIModerator'; export { ModerationSeverity } from '../internals'; /** * Options for the OpenAI based moderator. */ export interface AzureOpenAIModeratorOptions extends OpenAIModeratorOptions { /** * Azure OpenAI Content Safety Categories. * Each category is provided with a severity level threshold from 0 to 6. * If the severity level of a category is greater than or equal to the threshold, the category is flagged. */ categories?: ContentSafetyHarmCategory[]; /** * Text blocklist Name. Only support following characters: 0-9 A-Z a-z - . _ ~. You could attach multiple lists name here. */ blocklistNames?: string[]; /** * @deprecated * use `haltOnBlocklistHit` * * When set to true, further analyses of harmful content will not be performed in cases where blocklists are hit. * When set to false, all analyses of harmful content will be performed, whether or not blocklists are hit. * Default value is false. */ breakByBlocklists?: boolean; /** * When set to true, further analyses of harmful content will not be performed in cases where blocklists are hit. * When set to false, all analyses of harmful content will be performed, whether or not blocklists are hit. * Default value is false. */ haltOnBlocklistHit?: boolean; } /** * An Azure OpenAI moderator that uses OpenAI's moderation API to review prompts and plans for safety. * @remarks * This moderation can be configured to review the input from the user, output from the model, or both. * @template TState Optional. Type of the applications turn state. */ export declare class AzureContentSafetyModerator extends OpenAIModerator { private readonly _contentSafetyOptions; private readonly _azureContentSafetyClient; private readonly _azureContentSafetyCategories; /** * Creates a new instance of the OpenAI based moderator. * @param {AzureOpenAIModeratorOptions} options Configuration options for the moderator. */ constructor(options: AzureOpenAIModeratorOptions); /** * Creates a new instance of the Azure OpenAI client. * @protected * @param {OpenAIModeratorOptions} options The options for the moderator. * @returns {AzureOpenAIClient} The Azure OpenAI client. */ protected createClient(options: OpenAIModeratorOptions): AzureOpenAIClient; /** * @protected * @param {string} input The input to moderate. * @returns {Promise} The moderation results. * This method is called by the moderator to moderate the input. * @template TState Optional. Type of the applications turn state. */ protected createModeration(input: string): Promise; /** * Reviews an incoming prompt for safety violations. * @param {TurnContext} context - Context for the current turn of conversation. * @param {TState} state - Application state for the current turn of conversation. * @returns {Promise} An undefined value to approve the prompt or a new plan to redirect to if not approved. */ reviewInput(context: TurnContext, state: TState): Promise; /** * Reviews the SAY commands generated by the planner for safety violations. * @param {TurnContext} context - Context for the current turn of conversation. * @param {TState} state - Application state for the current turn of conversation. * @param {Plan} plan - Plan generated by the planner. * @returns {Promise} The plan to execute. Either the current plan passed in for review or a new plan. */ reviewOutput(context: TurnContext, state: TState, plan: Plan): Promise; } //# sourceMappingURL=AzureContentSafetyModerator.d.ts.map