/** * @license * Copyright 2021 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ import * as toxicity from '@tensorflow-models/toxicity'; import { TaskModelLoader } from '../../task_model'; import { Runtime, Task, TFJSModelCommonLoadingOption } from '../common'; import { SentimentDetectionBaseOptions, SentimentDetectionResult, SentimentDetector } from './common'; declare type ToxicityNS = typeof toxicity; /** Loading options. */ export interface ToxicityTFJSLoadingOptions extends TFJSModelCommonLoadingOption, SentimentDetectionBaseOptions { /** * An array of strings indicating which types of toxicity to detect. Labels * must be one of `toxicity` | `severe_toxicity` | `identity_attack` | * `insult` | `threat` | `sexual_explicit` | `obscene`. Defaults to all * labels. */ toxicityLabels?: string[]; } /** Inference options (placeholder). */ export interface ToxicityTFJSInferenceOptions { } /** Loader for toxicity TFJS model. */ export declare class ToxicityTFJSLoader extends TaskModelLoader { readonly metadata: { name: string; description: string; resourceUrls: { 'github': string; }; runtime: Runtime; version: string; supportedTasks: Task[]; }; readonly packageUrls: string[][]; readonly sourceModelGlobalNamespace = "toxicity"; protected transformSourceModel(sourceModelGlobal: ToxicityNS, loadingOptions?: ToxicityTFJSLoadingOptions): Promise; } /** * Pre-trained TFJS toxicity model. * * It detects whether text contains toxic content such as threatening language, * insults, obscenities, identity-based hate, or sexually explicit language. * * Usage: * * ```js * // Load the model with options (optional. See below for docs). * const model = await tfTask.SentimentDetection.Toxicity.TFJS.load(); * * // Run detection on text. * const result = await model.predict('You are stupid'); * console.log(result.sentimentLabels); * * // Clean up. * model.cleanUp(); * ``` * * By default, the model returns the prediction results of the following * sentiment labels: * * - toxicity * - severe_toxicity * - identity_attack * - insult * - threat * - sexual_explicit * - obscene * * Refer to `tfTask.SentimentDetection` for the `predict` and `cleanUp` method, * and more details about the result interface. * * @docextratypes [ * {description: 'Options for `load`', symbol: 'ToxicityTFJSLoadingOptions'}, * {description: 'Options for `predict`', symbol: * 'ToxicityTFJSInferenceOptions'} * ] * * @doc {heading: 'Sentiment Detection', subheading: 'Models'} */ export declare class ToxicityTFJS extends SentimentDetector { private toxicityModel?; private loadingOptions?; constructor(toxicityModel?: toxicity.ToxicityClassifier, loadingOptions?: ToxicityTFJSLoadingOptions); predict(text: string, options?: ToxicityTFJSInferenceOptions): Promise; } export declare const toxicityTfjsLoader: ToxicityTFJSLoader; export {};