import { RhinoInputOptions, RhinoOptions } from './types'; export type RhinoInference = { isUnderstood: boolean; intent?: string; slots?: Record; }; /** * Wraps the Rhino engine and context. * * Performs the calls to the Rhino node library. Does some basic parameter validation to prevent * errors occurring in the library layer. Provides clearer error messages in native JavaScript. */ export default class Rhino { private _pvRhino; private _handle; private readonly _version; private readonly _sampleRate; private readonly _frameLength; private isFinalized; /** * Creates an instance of Rhino with a specific context. * @param {string} accessKey AccessKey obtained from Picovoice Console (https://console.picovoice.ai/). * @param {string} contextPath the path to the Rhino context file (.rhn extension) * @param {number} options.sensitivity [0.5] the sensitivity in the range [0,1] * @param {number} options.endpointDurationSec Endpoint duration in seconds. An endpoint is a chunk of silence at the end of an * utterance that marks the end of spoken command. It should be a positive number within [0.5, 5]. A lower endpoint * duration reduces delay and improves responsiveness. A higher endpoint duration assures Rhino doesn't return inference * preemptively in case the user pauses before finishing the request. * @param {boolean} options.requireEndpoint If set to `true`, Rhino requires an endpoint (a chunk of silence) after the spoken command. * If set to `false`, Rhino tries to detect silence, but if it cannot, it still will provide inference regardless. Set * to `false` only if operating in an environment with overlapping speech (e.g. people talking in the background). * @param {string} options.device String representation of the device (e.g., CPU or GPU) to use for inference. * If set to `best`, the most suitable device is selected automatically. If set to `gpu`, the engine uses the * first available GPU device. To select a specific GPU device, set this argument to `gpu:${GPU_INDEX}`, where * `${GPU_INDEX}` is the index of the target GPU. If set to `cpu`, the engine will run on the CPU with the * default number of threads. To specify the number of threads, set this argument to `cpu:${NUM_THREADS}`, * where `${NUM_THREADS}` is the desired number of threads. * @param {string} options.modelPath the path to the Rhino model (.pv extension) * @param {string} options.libraryPath the path to the Rhino dynamic library (platform-dependent extension) */ constructor(accessKey: string, contextPath: string, options?: RhinoOptions); /** * @returns number of audio samples per frame (i.e. the length of the array provided to the process function) * @see {@link process} */ get frameLength(): number; /** * @returns the audio sampling rate accepted by Rhino */ get sampleRate(): number; /** * @returns the version of the Rhino engine */ get version(): string; /** * Process a frame of pcm audio. * * @param {Array} frame 16-bit integers of 16kHz linear PCM mono audio. * The specific array length is obtained from Rhino via the frameLength field. * @returns {boolean} true when Rhino has concluded processing audio and determined the intent (or that the intent was not understood), false otherwise. */ process(frame: Int16Array): boolean; /** * Resets the internal state of Rhino. It should be called before the engine can be used to infer intent from a new * stream of audio */ reset(): void; /** * Gets inference results from Rhino. If the phrase was understood, it includes the specific intent name * that was inferred, and (if applicable) slot keys and specific slot values. * * Should only be called after the process function returns true, otherwise Rhino * has not yet reached an inference conclusion. * @see {@link process} * * * @returns {Object} with inference information (isUnderstood, intent, slots) * * e.g.: * * { * isUnderstood: true, * intent: 'orderDrink', * slots: { * size: 'medium', * numberOfShots: 'double shot', * coffeeDrink: 'americano', * milkAmount: 'lots of milk', * sugarAmount: 'some sugar' * } * } */ getInference(): RhinoInference; /** * Gets the source of the Rhino context in YAML format. Shows the list of intents, * which expressions map to those intents, as well as slots and their possible values. * * @returns {string} the context YAML */ getContextInfo(): string; /** * Releases the resources acquired by Rhino. * * Be sure to call this when finished with the instance * to reclaim the memory that was allocated by the C library. */ release(): void; /** * Lists all available devices that Rhino can use for inference. Each entry in the list can be the `device` argument * of the constructor. * * @returns List of all available devices that Rhino can use for inference. */ static listAvailableDevices(options?: RhinoInputOptions): string[]; private handlePvStatus; } //# sourceMappingURL=rhino.d.ts.map