import type { BaseClientOptions, BaseRequestOptions } from "../../../../BaseClient"; import { type NormalizedClientOptions } from "../../../../BaseClient"; import * as core from "../../../../core"; import * as ElevenLabs from "../../../index"; import { TranscriptsClient } from "../resources/transcripts/client/Client"; export declare namespace SpeechToTextClient { type Options = BaseClientOptions; interface RequestOptions extends BaseRequestOptions { } } export declare class SpeechToTextClient { protected readonly _options: NormalizedClientOptions; protected _transcripts: TranscriptsClient | undefined; constructor(options?: SpeechToTextClient.Options); get transcripts(): TranscriptsClient; /** * Transcribe an audio or video file. If webhook is set to true, the request will be processed asynchronously and results sent to configured webhooks. When use_multi_channel is true and the provided audio has multiple channels, a 'transcripts' object with separate transcripts for each channel is returned. Otherwise, returns a single transcript. The optional webhook_metadata parameter allows you to attach custom data that will be included in webhook responses for request correlation and tracking. * * @param {ElevenLabs.BodySpeechToTextV1SpeechToTextPost} request * @param {SpeechToTextClient.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link ElevenLabs.UnprocessableEntityError} * * @example * import { createReadStream } from "fs"; * await client.speechToText.convert({ * enableLogging: true, * modelId: "scribe_v2" * }) */ convert(request: ElevenLabs.BodySpeechToTextV1SpeechToTextPost, requestOptions?: SpeechToTextClient.RequestOptions): core.HttpResponsePromise; private __convert; }