// Copyright © 2022 BytePlusRTC All rights reserved. // SPDX-License-Identifier: MIT import * as $p_a from '../android/index'; import * as $p_i from '../ios/index'; import { PositionInfo, AudioTrackType, AudioPlayType, VirtualBackgroundSource, AudioScenarioType, AudioProfileType, AnsMode, VoiceChangerType, VoiceReverbType, StreamIndex, RenderMode, VideoSuperResolutionMode, MirrorType, RemoteMirrorType, VideoRotationMode, CameraId, AudioRoute, MessageConfig, AudioFrameCallbackMethod, ZoomConfigType, ZoomDirectionType, VideoSourceType, AlphaLayout, AudioSourceType, AudioRenderType, PublishFallbackOption, RemoteUserPriority, SEICountPerFrame, EffectBeautyMode, VideoOrientation, RecordingType, ScreenMediaType, VideoDecoderConfig, EarMonitorMode, TorchState, VideoRotation, RTCLogConfig, VoiceEqualizationConfig, VoiceReverbConfig, VideoCanvas, RemoteStreamKey, RemoteVideoRenderConfig, AudioFormat, MixedStreamConfig, PushSingleStreamParam, VideoCaptureConfig, VideoEncoderConfig, ScreenVideoEncoderConfig, AudioFrame, RecordingConfig, AudioRecordingConfig, AudioPropertiesConfig, StreamSycnInfoConfig, PublicStreaming, EchoTestConfig, CloudProxyInfo, NetworkTimeInfo, MediaTypeEnhancementConfig, MediaStreamType, PauseResumeControlMediaType, AudioSelectionPriority, UserInfo, RTCRoomConfig, RemoteVideoConfig, SubtitleConfig, SingScoringConfig, DownloadLyricType, AttenuationType, Position, ReceiveRange, AudioMixingType, AudioMixingDualMonoMode, MediaPlayerConfig, MediaPlayerCustomSource, AudioEffectPlayerConfig } from './keytype'; import { IFaceDetectionObserver, RTCVideoEventHandler, IAudioFrameObserver, IAudioFrameProcessor, ILocalEncodedVideoFrameObserver, IRemoteEncodedVideoFrameObserver, IMixedStreamObserver, IPushSingleStreamToCDNObserver, IExternalVideoEncoderEventHandler, RTCRoomEventHandler, IMediaPlayerAudioFrameObserver, IMediaPlayerEventHandler, IAudioEffectPlayerEventHandler } from './callback'; import { Intent } from './types'; export declare class ForwardStreamInfo { constructor(roomId: string, token: string); constructor(); protected _instance: any; get roomId(): string; set roomId(value: string); get token(): string; set token(value: string); protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; } export declare class ISpatialAudio { protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @detail api * @region Spatial Audio * @author majun.lvhiei * @brief Enable/disable spatial audio function. * @param enable Whether to enable spatial audio function:
* - true:Enable * - false:Disable(Default setting) * @note You need to call {@link updatePosition updatePosition} as well to really enjoy the spatial audio effect. * */ enableSpatialAudio(enable: boolean): void; /** {en} * @detail api * @region Audio management * @author luomingkang.264 * @brief Turn off the effect of the orientation of the local user as the sound source.
* After the effect is off, all the other users in the room listen to the local user as if the local user is in right front of each of them. * @note * - After the orientation effect as the sound source is disabled, you cannot enable it during the lifetime of the `SpatialAudio` instance. * - Calling this API does not affect the orientation effect of the local user as a listener. See {@link updateSelfPosition updateSelfPosition} and {@link updateRemotePosition updateRemotePosition}. * */ disableRemoteOrientation(): void; /** {en} * @valid since 3.52 * @detail api * @region Audio management * @author wangjunzheng * @brief Sets the coordinate and orientation of the local user as a listener in the rectangular coordinate system the local user built to achieve expected spatial audio effects. * @param positionInfo Information on the local user's position. Refer to {@link PositionInfo PositionInfo} for details. * @return * - 0: Success. * - <0: Failure. * - -2: Failure. The reason is that any two of the 3D coordinate vectors of your position are not perpendicular to each other. * @note * - You need to call this API after joining the room. * - Before calling this API, you should call {@link enableSpatialAudio enableSpatialAudio} first to enable the spatial audio function. * - The settings made locally will not influence other users' spatial audio experience. * */ updateSelfPosition(positionInfo: PositionInfo): number; /** {en} * @valid since 3.52 * @detail api * @region Audio management * @author wangjunzheng * @brief Sets the coordinate and orientation of the remote user as a speaker in the rectangular coordinate system of the local user. In this case, the local user hears from the remote user with the expected spatial audio effects. * @param uid User ID * @param positionInfo Information on the remote user's position. Refer to {@link PositionInfo PositionInfo} for details. * @return * - 0: Success. * - <0: Failure. * - -2: Failure. The reason is that any two of the 3D coordinate vectors of the position of the remote user are not perpendicular to each other. * @note * - You must call this API after creating the room. * - The settings made locally will not influence other users' spatial audio experience. * */ updateRemotePosition(uid: string, positionInfo: PositionInfo): number; /** {en} * @valid since 3.52 * @detail api * @region Audio management * @author wangjunzheng * @brief Disables all spatial audio effects set by calling {@link updateRemotePosition updateRemotePosition} for a certain remote user. * @param uid User ID of the remote user. * @return * - 0: Success. * - <0: Failure. * */ removeRemotePosition(uid: string): number; /** {en} * @valid since 3.52 * @detail api * @region Audio management * @author wangjunzheng * @brief Disables all spatial audio effects set by calling {@link updateRemotePosition updateRemotePosition} for all remote users. * @return * - 0: Success. * - <0: Failure. * */ removeAllRemotePosition(): number; } export declare class IVideoDeviceManager { protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @platform android * @valid since 3.56 * @detail api * @region Video Facility Management * @author likai.666 * @brief Get a list of video capture devices in the current system. * @return Contains a list of all video capture devices in the system. See {@link VideoDeviceInfo VideoDeviceInfo}. * */ android_enumerateVideoCaptureDevices(): $p_a.List<$p_a.VideoDeviceInfo>; /** {en} * @valid since 3.56 * @detail api * @region Video Facility Management * @author likai.666 * @brief Set the current video capture device * @param deviceId Video device ID, which can be obtained through {@link enumerateVideoCaptureDevices enumerateVideoCaptureDevices} * @return * - 0: Success. * +! 0: failure * */ setVideoCaptureDevice(deviceId: string): number; /** {en} * @platform ios * @detail api * @region Video Device Management * @author zhangzhenyu.samuel * @brief Get the currently-using video capture device * @param deviceID video device ID * @return * - 0: Success. * +! 0: failure */ ios_getVideoCaptureDevice(deviceID: string): number; } export declare class IKTVPlayer { protected _instance: any; /** * @platform ios */ /** * @platform ios */ get ios_delegate(): $p_i.id<$p_i.ByteRTCKTVPlayerDelegate>; set ios_delegate(value: $p_i.id<$p_i.ByteRTCKTVPlayerDelegate>); protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @platform android * @detail api * @author lihuan.wuti2ha * @brief Sets the KTV player event handler. * @param playerEventHandler KTV player event handler. See {@link IKTVPlayerEventHandler IKTVPlayerEventHandler}. * */ android_setPlayerEventHandler(playerEventHandler: $p_a.IKTVPlayerEventHandler): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Plays the music. * @param musicId Music ID.
* If the song with the same musicId is playing when you call this API, the music will restart from the starting position. An error will be triggered if the audio file corresponding to musicId does not exist. * @param trackType Audio track type of the KTV player. See {@link AudioTrackType AudioTrackType}. * @param playType Audio play type. See {@link AudioPlayType AudioPlayType}. * @note * - After calling this API, you will receive the music play state through {@link onPlayStateChanged onPlayStateChanged} callback. * - If the music ID is invalid, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3023 and a playState of 4. * - If you didn't join the room, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3022 and a playState of 4. * - If the music file does not exist, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3020 and a playState of 4. * */ playMusic(musicId: string, trackType: AudioTrackType, playType: AudioPlayType): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Pauses the music. * @param musicId Music ID. * @note * - After calling this API, you will receive the music play state through {@link onPlayStateChanged onPlayStateChanged} callback. * - If the music ID is invalid, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3023 and a playState of 4. * - If you didn't join the room, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3022 and a playState of 4. * */ pauseMusic(musicId: string): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Resumes playing the music. * @param musicId Music ID. * @note * - After calling this API, you will receive the music play state through {@link onPlayStateChanged onPlayStateChanged} callback. * - If the music ID is invalid, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3023 and a playState of 4. * - If you didn't join the room, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3022 and a playState of 4. * */ resumeMusic(musicId: string): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Stops playing the music. * @param musicId Music ID. * @note * - After calling this API, you will receive the music play state through {@link onPlayStateChanged onPlayStateChanged} callback. * - If the music ID is invalid, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3023 and a playState of 4. * - If you didn't join the room, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3022 and a playState of 4. * */ stopMusic(musicId: string): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Sets the starting position of the music file. * @param musicId Music ID. * @param position The starting position of the music file in milliseconds. The value must be less than the total length of the music. * @note * - The music must be playing when you call this API. * - After calling this API, you will receive the music play state through {@link onPlayStateChanged onPlayStateChanged} callback. * - If the music ID is invalid, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3023 and a playState of 4. * - If you didn't join the room, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3022 and a playState of 4. * */ seekMusic(musicId: string, position: number): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Sets the volume of the playing music. The music must be playing when you set the volume. * @param musicId Music ID. * @param volume Volume. Adjustment range: [0,400].
* - 0: Mute. * - 100: Original volume. * - 400: 4 times the original volume (with overflow protection). * @note * - The music must be playing when you call this API. * - If the set volume is greater than 400, it will be adjusted by the maximum value of 400; if the set volume is less than 0, it will be adjusted by the minimum value of 0. * - If the music ID is invalid, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3023 and a playState of 4. * - If you didn't join the room, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3022 and a playState of 4. * */ setMusicVolume(musicId: string, volume: number): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Switches the audio track type between the original track and the instrumental track. * @param musicId Music ID. * @note The music must be playing when you call this API. * */ switchAudioTrackType(musicId: string): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Transposes up/down the music being played. * @param musicId Music ID. * @param pitch The pitch up/down value relative to the original pitch, in the range of [-12, 12], with the default value of 0.
* The difference in pitch between two adjacent values is a semitone. A positive value indicates an increase in pitch, and a negative value indicates a decrease in pitch. A larger absolute value means more pitch increase or decrease. * @note * - The music must be in the playing when you call this API. * - If the set pitch is greater than 12, it will be adjusted by the maximum value of 12; if the set pitch is less than –12, it will be adjusted by the minimum value of –12. * - If the music ID is invalid, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3023 and a playState of 4. * - If you didn't join the room, you will receive the {@link onPlayStateChanged onPlayStateChanged} callback, with an errorCode of -3022 and a playState of 4. * */ setMusicPitch(musicId: string, pitch: number): void; } export declare class IVideoEffect { protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @detail api * @author zhushufan.ref * @brief Checks video effect license, sets the video effect resource model path, and initializes video effect. * @param licenseFile The absolute path of the license file for authorization. * @param algoModelDir The absolute path of the Effects SDK's models file. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * */ initCVResource(licenseFile: string, algoModelDir: string): number; /** {en} * @detail api * @author zhushufan.ref * @brief Enables video effects including beauty and color filters. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * @note * - You must call {@link initCVResource initCVResource} before calling this API. * - This API does not turn on video effects directly, you must call {@link setEffectNodes setEffectNodes} or {@link setColorFilter setColorFilter} next. * - Call {@link disableVideoEffect disableVideoEffect} to turn off video effects. * */ enableVideoEffect(): number; /** {en} * @detail api * @author zhushufan.ref * @brief Disables video effects. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * @note Call {@link enableVideoEffect enableVideoEffect} to enable video effects. * */ disableVideoEffect(): number; /** {en} * @detail api * @author zhushufan.ref * @brief Sets the video effects material package. * @param effectNodes Array of effect material package paths.
* To remove the current video effect, set it to null. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * @note You must call {@link enableVideoEffect enableVideoEffect} before calling this API. * */ setEffectNodes(effectNodes: Array): number; /** {en} * @detail api * @author zhushufan.ref * @brief Sets the intensity of video effects. * @param effectNode The absolute path of the effects resource package, see [Resource Package Structure](https://docs.byteplus.com/effects/docs/resource-package-structure-v421-and-later). * @param key The name of the material key to be set, see [Functions of Resource Keys](https://docs.byteplus.com/effects/docs/functions-of-resource-keys-v421-and-later) for the value. * @param value The intensity value that needs to be set, the value range [0,1], and the setting is invalid when it exceeds the range. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * */ updateEffectNode(effectNode: string, key: string, value: number): number; /** {en} * @detail api * @author zhushufan.ref * @brief Sets the color filter. * @param filterRes Filter effects package absolute path. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * @note Call {@link setColorFilterIntensity setColorFilterIntensity} to set the intensity of the color filter enabled. Set the intensity to 0 to turn off color filter. * */ setColorFilter(filterRes: string): number; /** {en} * @detail api * @author zhushufan.ref * @brief Sets the intensity of the color filter enabled. * @param intensity Filter intensity. The value range [0,1] is set to be invalid when the range is exceeded.
* Set the intensity to 0 to turn off color filter. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * */ setColorFilterIntensity(intensity: number): number; /** {en} * @detail api * @author zhushufan.ref * @brief Sets the original background to a specified image or a solid color. * @param backgroundStickerRes The absolute path of virtual background effects. * @param source Virtual background source. See {@link VirtualBackgroundSource VirtualBackgroundSource}. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * @note * - You must call {@link initCVResource initCVResource} before calling this API. * - Call {@link disableVirtualBackground disableVirtualBackground} to turn off the virtual background. * */ enableVirtualBackground(backgroundStickerRes: string, source: VirtualBackgroundSource): number; /** {en} * @detail api * @author wangjunlin.3182 * @brief Turns off the virtual background. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * @note After calling {@link enableVirtualBackground enableVirtualBackground} to enable the virtual background function, you can call this API to turn it off. * */ disableVirtualBackground(): number; /** {en} * @detail api * @author wangjunlin.3182 * @brief Starts face detection and registers the observer for the result.
* With this observer, you will receive {@link onFaceDetectResult onFaceDetectResult} periodically. * @param observer See {@link IFaceDetectionObserver IFaceDetectionObserver}. * @param intervalMs The minimum time interval between two callbacks in milliseconds. The value should be greater than 0. The actual time interval is between interval_ms and interval_ms+the time slot of a captured video frame. * @param faceModelPath The absolute path of the face detection algorithm file. Typically it is the tt_face_vXXX.model file in the ttfacemodel folder. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - -1004: Initializing. This function will be available when the initialization is completed. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * */ enableFaceDetection(observer: IFaceDetectionObserver, intervalMs: number, faceModelPath: string): number; /** {en} * @detail api * @author wangjunlin.3182 * @brief Stops face detection. * @return * - 0: Success. * - –1000: The Effects SDK is not integrated. * - –1001: This API is unavailable for your Effects SDK. * - –1002: Your Effects SDK's version is incompatible. * - < 0: Other error. See [error code table](https://docs.byteplus.com/effects/docs/error-code-table) for specific instructions. * */ disableFaceDetection(): number; } export declare class RTCVideo { /** {en} * @platform android * @detail api * @region Engine Management * @author wangzhanqiang * @brief Creates an engine instance.
* This is the very first API that you must call if you want to use all the RTC capabilities.
* If there is no engine instance in current process, calling this API will create one. If an engine instance has been created, calling this API again will have the created engine instance returned. * @param context Android application context * @param handler Handler sent from SDK to App. See {@link IRTCVideoEventHandler IRTCVideoEventHandler} * @param config SDK Engine Config,see {@link EngineConfig EngineConfig} * @return * - RTCVideo: A successfully created engine instance. * - Null: EngineConfig is inValid see {@link EngineConfig EngineConfig}. Failed to load the so file * @note The lifecycle of the handler must be longer than that of the RTCVideo, i.e. the handler must be created before calling {@link createRTCVideo createRTCVideo} and destroyed after calling {@link destroyRTCVideo destroyRTCVideo}. * @order 1 * */ static android_createRTCVideo_context$appId$handler$eglContext$parameters(context: $p_a.Context, appId: string, handler: $p_a.IRTCVideoEventHandler, eglContext: Object, parameters: $p_a.JSONObject): $p_a.RTCVideo; /** {en} * @detail api * @region Engine management * @author wangzhanqiang * @brief Destroy the engine instance created by {@link createRTCVideo createRTCVideo}, and release all related resources. * @note * - Call this API after all business scenarios related to the engine instance are destroyed. * - When the API is called, RTC SDK destroys all memory associated with the engine instance and stops any interaction with the media server. * - Calling this API will start the SDK exit logic. The engine thread is held until the exit logic is complete. The engine thread is retained until the exit logic is complete. Therefore, do not call this API directly in the callback thread, or it will cause a deadlock. This function takes a long time to execute, so it's not recommended to call this API in the main thread, or the main thread may be blocked. * @order 3 * */ static destroyRTCVideo(): void; /** {en} * @detail api * @region Engine Management * @author wangzhanqiang * @brief Get the current version number of the SDK. * @return The current SDK version number. * @order 5 * */ static getSDKVersion(): string; /** {en} * @detail api * @region Engine Management * @author caofanglu * @brief Configures the local log parameters of RTC SDK, including the logging level, directory, the limits for total log file size, and the prefix to the log file. * @param logConfig Local log parameters. See {@link RTCLogConfig RTCLogConfig}. * @return * - 0: Success. * - –1: Failure. This API must be called before creating engine. * - –2: Failure. Invalid parameters. * @note This API must be called before {@link createRTCVideo createRTCVideo}. * @order 30 * */ static setLogConfig(logConfig: RTCLogConfig): number; /** {en} * @detail api * @region Engine management * @author panjian.fishing * @brief Gets the description text of different error codes and warning codes in the SDK. * @param code The values obtained through {@link onWarning onWarning} and {@link onError onError} callbacks,
* For details, please refer to {@link ErrorCode ErrorCode} and {@link WarningCode WarningCode} * @return String Description text * @order 15 * */ static getErrorDescription(code: number): string; constructor(); constructor(); protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @detail api * @region Engine Management * @author wangzhanqiang * @brief The receiving class that sets engine event callbacks must inherit from {@link IRTCVideoEventHandler IRTCVideoEventHandler}. * @param engineEventHandler
* Event processor interface class. See {@link IRTCVideoEventHandler IRTCVideoEventHandler}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - The caller needs to implement a class that inherits from {@link IRTCVideoEventHandler IRTCVideoEventHandler} and override the events that need attention. * - The callback is asynchronous recall * - All event callbacks will be triggered in a separate callback thread. Please pay attention to all operations related to the thread running environment when receiving callback events, such as operations that need to be performed in the UI thread. * Do not directly operate in the implementation of the callback function. * @order 20 * */ setRtcVideoEventHandler(engineEventHandler: RTCVideoEventHandler): number; /** {en} * @platform android * @detail api * @author dixing * @brief Gets audio device management API class. * @return See {@link IRTCAudioDeviceManager IRTCAudioDeviceManager}. * */ android_getAudioDeviceManager(): $p_a.IRTCAudioDeviceManager; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Enable internal video capture immediately. The default setting is off.
* Internal video capture refers to: capturing video using the built-in module.
* The local client will be informed via {@link onVideoDeviceStateChanged onVideoDeviceStateChanged} after starting video capture by calling this API.
* The remote clients in the room will be informed of the state change via {@link onUserStartVideoCapture onUserStartVideoCapture} after the visible client starts video capture by calling this API. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Since the upgrade in v3.37.0, you need to add Kotlin plugin to Gradle in the project to use this API. * - Call {@link stopVideoCapture stopVideoCapture} to stop the internal video capture. Otherwise, the internal video capture will sustain until you destroy the engine instance. * - Once you create the engine instance, you can start internal video capture regardless of the video publishing state. The video stream will start publishing only after the video capture starts. * - To switch from custom to internal video capture, stop publishing before disabling the custom video capture module and then call this API to enable the internal video capture. * - Call {@link switchCamera switchCamera} to switch the camera used by the internal video capture module. * - If the default video format can not meet your requirement, contact our technical specialist to help you with Cloud Config. After that, you can push and apply these configurations to Android clients at any time. * */ startVideoCapture(): number; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Disable internal video capture immediately. The default is off.
* Internal video capture refers to: capturing video using the built-in module.
* The local client will be informed via {@link onVideoDeviceStateChanged onVideoDeviceStateChanged} after stopping video capture by calling this API.
* The remote clients in the room will be informed of the state change via {@link onUserStopVideoCapture onUserStopVideoCapture} after the visible client stops video capture by calling this API. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Call startVideoCapture {@link startVideoCapture} to enable the internal video capture. * - Without calling this API the internal video capture will sustain until you destroy the engine instance. * */ stopVideoCapture(): number; /** {en} * @detail api * @region Audio management * @author dixing * @brief Start internal audio capture. The default is off.
* Internal audio capture refers to: capturing audio using the built-in module.
* The local client will be informed via {@link onAudioDeviceStateChanged onAudioDeviceStateChanged} after starting audio capture by calling this API.
* The remote clients in the room will be informed of the state change via {@link onUserStartAudioCapture onUserStartAudioCapture} after the visible user starts audio capture by calling this API. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - To enable a microphone without the user's permission will trigger {@link onAudioDeviceStateChanged onAudioDeviceStateChanged}. * - Call {@link stopAudioCapture stopAudioCapture} to stop the internal audio capture. Otherwise, the internal audio capture will sustain until you destroy the engine instance. * - To mute and unmute microphones, we recommend using {@link publishStream publishStream} and {@link unpublishStream unpublishStream}, other than {@link stopAudioCapture stopAudioCapture} and this API. Because starting and stopping capture devices often need some time waiting for the response of the device, that may lead to a short silence during the communication. * - To switch from custom to internal audio capture, stop publishing before disabling the custom audio capture module and then call this API to enable the internal audio capture. * */ startAudioCapture(): number; /** {en} * @detail api * @region audio management * @author dixing * @brief Stop internal audio capture. The default is off.
* Internal audio capture refers to: capturing audio using the built-in module.
* The local client will be informed via {@link onAudioDeviceStateChanged onAudioDeviceStateChanged} after stopping audio capture by calling this API.
* The remote clients in the room will be informed of the state change via {@link onUserStopAudioCapture onUserStopAudioCapture} after the visible client stops audio capture by calling this API. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Call {@link startAudioCapture startAudioCapture} to enable the internal audio capture. * - Without calling this API the internal audio capture will sustain until you destroy the engine instance. * */ stopAudioCapture(): number; /** {en} * @detail api * @region Audio management * @author zhangyuanyuan.0101 * @brief Sets the audio scenarios.
* You can choose the appropriate audio scenario according to your application.
* After selecting the audio scenario, the SDK will automatically apply the call volume/media volume based on the client's audio capture and playback devices and status, and synchronously change the corresponding audio-related algorithm configurations and capture settings. * @param audioScenario Audio scenarios. See {@link AudioScenarioType AudioScenarioType}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - It is recommended to call this API before joining the room and calling other audio related interfaces. If this API is called afterwards, audio lag may be introduced. * - Call volume is more suitable for calls, meetings and other scenarios that demand information accuracy. Call volume will activate the system hardware signal processor, making the sound clearer. The volume cannot be reduced to 0. * - Media volume is more suitable for entertainment scenarios, which require musical expression. The volume can be reduced to 0. * */ setAudioScenario(audioScenario: AudioScenarioType): number; /** {en} * @detail api * @region audio management * @author zhangyuanyuan.0101 * @brief Sets the sound quality. Call this API to change the sound quality if the audio settings in the current {@link ChannelProfile ChannelProfile} can not meet your requirements. * @param audioProfile Sound quality. See {@link AudioProfileType AudioProfileType} * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - This method can be called before and after entering the room. * - Support dynamic switching of sound quality during a call. * */ setAudioProfile(audioProfile: AudioProfileType): number; /** {en} * @detail api * @region audio management * @author liuchuang * @brief Set the Active Noise Cancellation(ANC) mode during audio and video communications. * @param ansMode ANC modes. See {@link AnsMode AnsMode}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - You can call this API before or after entering a room. When you repeatedly call it, only the last call takes effect. * - The noise reduction algorithm includes both traditional noise reduction and AI noise reduction. Traditional noise reduction is primarily aimed at suppressing steady noises, such as the hum of air conditioners and the whir of fans. AI noise reduction, on the other hand, is mainly designed to suppress non-stationary noises, like the tapping of keyboards and the clattering of tables and chairs. * - Before version V3.57.1, AI noise reduction can only be enabled through this interface when the following {@link ChannelProfile ChannelProfile} scenarios are engaged: * - Gaming voice mode: `CHANNEL_PROFILE_GAME(2)` * - High-fidelity gaming mode: `CHANNEL_PROFILE_GAME_HD(8)` * - Cloud gaming mode: `CHANNEL_PROFILE_CLOUD_GAME(3)` * - 1 vs 1 audio/video call: `CHANNEL_PROFILE_CHAT(5)` * - Multi-client synchronized audio/video playback: `CHANNEL_PROFILE_LW_TOGETHER(7)` * - Personal devices in cloud meetings: `CHANNEL_PROFIEL_MEETING` * - Classroom interaction mode: `CHANNEL_PROFILE_MEETING_ROOM(17)` * - Meeting room terminals in cloud meetings: `CHANNEL_PROFILE_CLASSROOM(18)` * */ setAnsMode(ansMode: AnsMode): number; /** {en} * @detail api * @valid since 3.32 * @region American Sound Effect Management * @author wangjunzheng * @brief Set the sound change effect type * @param voiceChanger The sound change effect type. See {@link VoiceChangerType VoiceChangerType} * @return API call result:
* - 0: Success. * - <0: Failure. See {@link ReturnStatus ReturnStatus} for specific reasons. * @note * - To use this feature, you need to integrate the SAMI library. See [On-Demand Plugin Integration](#1108726). * - You can call it before and after entering the room. * - Effective for both internal and external audio source. * - Only valid for mono-channel audio. * - Mutually exclusive with {@link setVoiceReverbType setVoiceReverbType}, and the effects set later will override the effects set first. * */ setVoiceChangerType(voiceChanger: VoiceChangerType): number; /** {en} * @detail api * @valid since 3.32 * @region Bel Sound Effect Management * @author wangjunzheng * @brief Set the reverb effect type * @param voiceReverb Reverb effect type. See {@link VoiceReverbType VoiceReverbType} * @return API call result:
* - 0: Success. * - <0: Failure. See {@link ReturnStatus ReturnStatus} for specific reasons. * @note * - You can call it before and after entering the room. * - Effective for both internal and external audio source. * - Only valid for mono-channel audio. * - Mutually exclusive with {@link setVoiceChangerType setVoiceChangerType}, and the effects set later will override the effects set first. * */ setVoiceReverbType(voiceReverb: VoiceReverbType): number; /** {en} * @detail api * @author wangjunzheng * @brief Set the equalization effect for the local captured audio. The audio includes both internal captured audio and external captured voice, but not the mixing audio file. * @param voiceEqualizationConfig See {@link VoiceEqualizationConfig VoiceEqualizationConfig}. * @return * - 0: Success. * - < 0: Failure. * @note According to the Nyquist acquisition rate, the audio acquisition rate must be greater than twice the set center frequency. Otherwise, the setting will not be effective. * */ setLocalVoiceEqualization(voiceEqualizationConfig: VoiceEqualizationConfig): number; /** {en} * @detail api * @author wangjunzheng * @brief Set the reverb effect for the local captured audio. The audio includes both internal captured audio and external captured voice, but not the mixing audio file. * @param config See {@link VoiceReverbConfig VoiceReverbConfig}. * @return * - 0: Success. * - < 0: Failure. * @note Call {@link enableLocalVoiceReverb enableLocalVoiceReverb} to enable the reverb effect. * */ setLocalVoiceReverbParam(config: VoiceReverbConfig): number; /** {en} * @detail api * @author wangjunzheng * @brief Enable the reverb effect for the local captured voice. * @param enable * @return * - 0: Success. * - < 0: Failure. * @note Call {@link setLocalVoiceReverbParam setLocalVoiceReverbParam} to set the reverb effect. * */ enableLocalVoiceReverb(enable: boolean): number; /** {en} * @detail api * @region Video Management * @author sunhang.io * @brief Sets the view to be used for local video rendering and the rendering mode. * @param streamIndex Video stream type. See [StreamIndex](70083#streamindex-2). * @param videoCanvas View information and rendering mode. See {@link VideoCanvas VideoCanvas}. * @return * - 0: Success * - -1: videoCanvas is empty * @note * - You should bind your stream to a view before joining the room. This setting will remain in effect after you leave the room. * - If you need to unbind the local video stream from the current view, you can call this API and set the videoCanvas to `null`. * */ setLocalVideoCanvas(streamIndex: StreamIndex, videoCanvas: VideoCanvas): number; /** {en} * @detail api * @region video management * @author wangfujun.911 * @brief Update the render mode and background color of local video rendering * @param streamIndex See [StreamIndex](70083#streamindex-2) * @param renderMode See {@link VideoCanvas VideoCanvas}.renderMode * @param backgroundColor See {@link VideoCanvas VideoCanvas}.backgroundColor * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note Calling this API during local video rendering will be effective immediately. * */ updateLocalVideoCanvas(streamIndex: StreamIndex, renderMode: RenderMode, backgroundColor: number): number; /** {en} * @valid since 3.56 * @detail api * @region Video management * @author zhongshenyou * @brief Modifies remote video frame rendering settings, including render mode, background color, and rotation angle, while using the internal rendering of the SDK. * @param streamKey Information about the remote stream. See {@link RemoteStreamKey RemoteStreamKey}. * @param remoteVideoRenderConfig Video rendering settings. See {@link RemoteVideoRenderConfig RemoteVideoRenderConfig}. * @return * - 0: Success. * - < 0 : Failure. See {@link ReturnStatus ReturnStatus} for more details * @note * - After setting the rendering configuration for the remote video frame with {@link setRemoteVideoCanvas setRemoteVideoCanvas}, you can call this API to update settings including render mode, background color, and rotation angle. * - Calling this API during remote video rendering will be effective immediately. * */ updateRemoteStreamVideoCanvas(streamKey: RemoteStreamKey, remoteVideoRenderConfig: RemoteVideoRenderConfig): number; /** {en} * @detail api * @region Custom Video Capturing & Rendering * @author sunhang.io * @brief Sets the view to be used for remote video rendering and the rendering mode.
* To unbind the canvas, set `videoCanvas` to Null. * @param streamKey See {@link RemoteStreamKey RemoteStreamKey}. * @param videoCanvas View information and rendering mode. See {@link VideoCanvas VideoCanvas}. Starting from version 3.56, you can set the rotation angle of the remote video rendering using `renderRotation`. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note When the local user leaves the room, the setting will be invalid. The remote user leaving the room does not affect the setting. * */ setRemoteVideoCanvas(streamKey: RemoteStreamKey, videoCanvas: VideoCanvas): number; /** {en} * @hidden not available * @detail api * @region Audio & Video Processing * @author yinkaisheng * @brief Sets the super resolution mode for remote video stream. * @param streamKey Remote stream information that specifies the source and type of the video stream. See {@link RemoteStreamKey RemoteStreamKey}. * @param mode Super resolution mode. See {@link VideoSuperResolutionMode VideoSuperResolutionMode}. * @return.
* - 0: RETURN_STATUS_SUCCESS. It does not indicate the actual status of the super resolution mode, you should refer to {@link onRemoteVideoSuperResolutionModeChanged onRemoteVideoSuperResolutionModeChanged} callback. * - -1: RETURN_STATUS_NATIVE_IN_VALID. Native library is not loaded. * - -2: RETURN_STATUS_PARAMETER_ERR. Invalid parameter. * - -9: RETURN_STATUS_SCREEN_NOT_SUPPORT. Failure. Screen stream is not supported. * See {@link ReturnStatus ReturnStatus} for more return value indications. * @note * - Call this API after joining room. * - The original resolution of the remote video stream should not exceed 640 × 360 pixels. * - You can only turn on super-resolution mode for one stream. * */ setRemoteVideoSuperResolution(streamKey: RemoteStreamKey, mode: VideoSuperResolutionMode): number; /** {en} * @platform android * @hidden not available * @detail api * @region Audio & Video Processing * @author Yujianli * @brief Sets the video noise reduction mode. * @param mode Video noise reduction mode. Refer to {@link VideoDenoiseMode VideoDenoiseMode} for more details. * @return * - 0: Success. Please refer to {@link onVideoDenoiseModeChanged onVideoDenoiseModeChanged} callback for the actual state of video noise reduction mode. * - < 0: Failure. * */ android_setVideoDenoiser(mode: $p_a.VideoDenoiseMode): number; /** {en} * @detail api * @region Video management * @author wangjunlin.3182 * @brief Sets the mirror mode for the captured video stream. * @param mirrorType Mirror type. See {@link MirrorType MirrorType}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Switching video streams does not affect the settings of the mirror type. * - This API is not applicable to screen-sharing streams. * - When using an external renderer, you can set `mirrorType` to `0` and `3`, but you cannot set it to `1`. * - Before you call this API, the initial states of each video stream are as follows: * * * * *
Front-facing cameraBack-facing cameraCustom capturingBuilt-in camera
Mobile deviceThe preview is mirrored. The published video stream is not mirrored.The preview and the published video stream are not mirrored.The preview and the published video stream are not mirrored./
PC//The preview and the published video stream are not mirrored.The preview is mirrored. The published video stream is not mirrored.
* */ setLocalVideoMirrorType(mirrorType: MirrorType): number; /** {en} * @detail api * @valid since 3.57 * @region Video Management * @brief When using internal rendering, enable mirroring for the remote stream. * @param remoteStreamKey Information about the remote stream, used to specify the source and attributes of the video stream that needs to be mirrored, see {@link RemoteStreamKey RemoteStreamKey}. * @param mirrorType The mirror type for the remote stream, see {@link RemoteMirrorType RemoteMirrorType}. * @return * - 0: Successful call. * - < 0: Call failed, see {@link ReturnStatus ReturnStatus} for more error details. * */ setRemoteVideoMirrorType(remoteStreamKey: RemoteStreamKey, mirrorType: RemoteMirrorType): number; /** {en} * @detail api * @region Video Management * @brief Set the orientation of the video capture. By default, the App direction is used as the orientation reference.
* During rendering, the receiving client rotates the video in the same way as the sending client did. * @param rotationMode Rotation reference can be the orientation of the App or gravity. Refer to {@link VideoRotationMode VideoRotationMode} for details. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - The orientation setting is effective for internal video capture only. That is, the orientation setting is not effective to the custom video source or the screen-sharing stream. * - If the video capture is on, the setting will be effective once you call this API. If the video capture is off, the setting will be effective on when capture starts. * */ setVideoRotationMode(rotationMode: VideoRotationMode): number; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Switch to the front-facing/back-facing camera used in the internal video capture
* The local client will be informed via {@link onVideoDeviceStateChanged onVideoDeviceStateChanged} after calling this API. * @param cameraId Camera ID. Refer to {@link CameraId CameraId} for more details. * @return * - 0: Success * - < 0: Failure * @note * - Front-facing camera is the default camera. * - If the internal video capturing is on, the switch is effective once you call this API. If the internal video capturing is off, the setting will be effective when capture starts. * */ switchCamera(cameraId: CameraId): number; /** {en} * @detail api * @author dixing * @brief Set the current audio playback route. The default device is set via {@link setDefaultAudioRoute setDefaultAudioRoute}.
* When the audio playback route changes, you will receive {@link onAudioRouteChanged onAudioRouteChanged}. * @param audioRoute Audio route. Refer to {@link AudioRoute AudioRoute}.
* For Android device, the valid audio playback devices may vary due to different audio device connection status. See [Set the Audio Route](https://docs.byteplus.com/byteplus-rtc/docs/117836). * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - You can implement most scenarios by calling {@link setDefaultAudioRoute setDefaultAudioRoute} and the default audio route switching strategy of the RTC SDK. For details about the strategy, see [Set the Audio Route](https://docs.byteplus.com/byteplus-rtc/docs/117836). You should use this API in a few exceptional scenarios like manually switching audio route with external audio device connected. * - This interface is only supported in the `AUDIO_SCENARIO_COMMUNICATION` audio scenario. Call {@link setAudioScenario setAudioScenario} to switch between different audio scenarios. * - For the volume type in different audio scenarios, refer to {@link AudioScenarioType AudioScenarioType}. * */ setAudioRoute(audioRoute: AudioRoute): number; /** {en} * @detail api * @region Audio management * @author dixing * @brief Get the information of currently-using playback route. * @return See {@link AudioRoute AudioRoute}. * @note To set the audio playback route, see {@link setAudioRoute setAudioRoute}. * */ getAudioRoute(): AudioRoute; /** {en} * @detail api * @author dixing * @brief Set the speaker or earpiece as the default audio playback device. * @param route Audio playback device. Refer to {@link AudioRoute AudioRoute}. You can only use earpiece and speakerphone. * @return * - 0: Success. * - < 0: failure. It fails when the device designated is neither a speaker nor an earpiece. * @note For the default audio route switching strategy of the RTC SDK, see [Set the Audio Route](https://docs.byteplus.com/byteplus-rtc/docs/117836). * */ setDefaultAudioRoute(route: AudioRoute): number; /** {en} * @detail api * @author zhangyuanyuan.0101 * @brief Enable the audio process mode for external sound card. * @param enable
* - true: enable * - false: disable (by default) * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - When you use external sound card for audio capture, enable this mode for better audio quality. * - When using the mode, you can only use earphones. If you need to use internal or external speaker, disable this mode. * */ enableExternalSoundCard(enable: boolean): number; /** {en} * @valid since 3.58.1 * @detail api * @region Volume management * @author shiyayun * @brief Set whether to mute the recording signal (without changing the local hardware). * @param index Stream index, specifying the main stream or screen stream volume adjustment. See [StreamIndex](70083#streamindex-2). * @param mute Whether to mute audio capture.
* - True: Mute (disable microphone) * - False: (Default) Enable microphone * @return * - 0: Success. * - < 0 : Failure. See {@link ReturnStatus ReturnStatus} for more details. * @note * - Calling this API does not affect the status of SDK audio stream publishing. * - Adjusting the volume by calling {@link setCaptureVolume setCaptureVolume} after muting will not cancel the mute state. The volume state will be retained until unmuted. * - You can use this interface to set the capture volume before or after calling {@link startAudioCapture startAudioCapture} to enable audio capture. * */ muteAudioCapture(index: StreamIndex, mute: boolean): number; /** {en} * @detail api * @region Volume management * @author huangshouqin * @brief Adjust the volume of the audio capture * @param index Index of the stream, whose volume needs to be adjusted. Refer to [StreamIndex](70083#streamindex-2) for more details. * @param volume Ratio of capture volume to original volume.
* This changes the volume property of the audio data other than the hardware volume.
Ranging: [0,400]. Unit: \%
* To ensure the audio quality, we recommend setting the volume to `100`.
* - 0: Mute * - 100: Original volume. To ensure the audio quality, we recommend [0, 100]. * - 400: Four times the original volume with signal-clipping protection. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note Call this API to set the volume of the audio capture before or during the audio capture. * */ setCaptureVolume(index: StreamIndex, volume: number): number; /** {en} * @detail api * @region volume management * @author huangshouqin * @brief Adjusts the locally playing volume after mixing sounds of all remote users. You can call this API before or during the playback. * @param volume Ratio(\%) of playback volume to original volume, in the range [0, 400], with overflow protection.
* To ensure the audio quality, we recommend setting the volume to `100`.
* - 0: mute * - 100: original volume * - 400: Four times the original volume with signal-clipping protection. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note Suppose a remote user A is always within the range of the target user whose playback volume will be adjusted, if you use both this method and {@link setRemoteAudioPlaybackVolume setRemoteAudioPlaybackVolume}{@link setRemoteRoomAudioPlaybackVolume /setRemoteRoomAudioPlaybackVolume}, the volume that the local user hears from user A is the overlay of both settings. * */ setPlaybackVolume(volume: number): number; /** {en} * @detail api * @region Audio Mixing * @author wangjunzheng * @brief Change local voice to a different key, mostly used in Karaoke scenarios.
* You can adjust the pitch of local voice such as ascending or descending with this method. * @param pitch The value that is higher or lower than the original local voice within a range from -12 to 12. The default value is 0, i.e. No adjustment is made.
* The difference in pitch between two adjacent values within the value range is a semitone, with positive values indicating an ascending tone and negative values indicating a descending tone, and the larger the absolute value set, the more the pitch is raised or lowered.
* Out of the value range, the setting fails and triggers {@link onWarning onWarning} callback, indicating `WARNING_CODE_SET_SCREEN_STREAM_INVALID_VOICE_PITCH` for invalid value setting with {@link WarningCode WarningCode}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * */ setLocalVoicePitch(pitch: number): number; /** {en} * @detail api * @region Audio Management * @author majun.lvhiei * @brief Enables/disables the loudness equalization function.
* If you call this API with the parameter set to True, the loudness of user's voice will be adjusted to -16lufs. If then you also call {@link setAudioMixingLoudness setAudioMixingLoudness} and import the original loudness of the audio data used in audio mixing, the loudness will be adjusted to -20lufs when the audio data starts to play. * @param enable Whether to enable loudness equalization function:
* - true: Yes * - false: No * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note You must call this API before starting to play the audio file with {@link startAudioMixing startAudioMixing}. * */ enableVocalInstrumentBalance(enable: boolean): number; /** {en} * @detail api * @region Audio Management * @author majun.lvhiei * @brief Enables/disables the playback ducking function. This function is usually used in scenarios where short videos or music will be played simultaneously during RTC calls.
* With the function on, if remote voice is detected, the local media volume will be lowered to ensure the clarity of the remote voice. If remote voice disappears, the local media volume restores. * @param enable Whether to enable playback ducking:
* - true: Yes * - false: No * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * */ enablePlaybackDucking(enable: boolean): number; /** {en} * @detail api * @region Real-time messaging * @author hanchenchen.c * @brief Log in first to call {@link sendUserMessageOutsideRoom sendUserMessageOutsideRoom} and {@link sendServerMessage sendServerMessage} to send P2P messages or send messages to a server without joining the RTC room.
* To log out, call {@link logout logout}. * @param token
* Token is required during login for authentication.
* This Token is different from that required by calling joinRoom. You can assign any value even null to `roomId` to generate a login token. During developing and testing, you can use temporary tokens generated on the console. Deploy the token generating application on your server. * @param uid
* User ID
* User ID is unique within one appid. * @return * - 0: Success. * - <0: Failure. See {@link ReturnStatus ReturnStatus} for specific reasons. * @note You will receive {@link onLoginResult onLoginResult} after calling this API and log in successfully. But remote users will not receive notification about that. * */ login(token: string, uid: string): number; /** {en} * @detail After api * @region real-time messaging * @author hanchenchen.c * @brief Call this method to log out, it is impossible to call methods related to out-of-room messages and end-to-server messages or receive related callbacks. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - After calling this interface to log out, you must first call {@link login login} to log in. * - After local users call this method to log out, they will receive the result of the {@link onLogout onLogout} callback notification, and remote users will not receive the notification. * */ logout(): number; /** {en} * @detail api * @region Real-time messaging * @author hanchenchen.c * @brief Update the Token
* Token used by the user for login has a certain valid period. When the Token expires, you need to call this method to update the login Token information.
* When calling the {@link login login} method to log in, if an expired token is used, the login will fail and you will receive an {@link onLoginResult onLoginResult} callback notification with an error code of 'LOGIN_ERROR_CODE_INVALID_TOKEN'. You need to reacquire the token and call this method to update the token. * @param token
* Updated dynamic key * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - If the token is invalid and the login fails, the SDK will automatically log in again after updating the token by calling this method, and the user does not need to call the {@link login login} method himself. * - Token expires, if you have successfully logged in, it will not be affected. An expired Token error will be notified to the user the next time you log in with an expired Token, or when you log in again due to a disconnection due to poor local network conditions. * */ updateLoginToken(token: string): number; /** {en} * @detail api * @region Real-time messaging * @author hanchenchen.c * @brief Set application server parameters
* Client side calls {@link sendServerMessage sendServerMessage} or {@link sendServerBinaryMessage sendServerBinaryMessage} Before sending a message to the application server, you must set a valid signature and application server address. * @param signature Dynamic signature. The App server may use the signature to verify the source of messages.
* You need to define the signature yourself. It can be any non-empty string. It is recommended to encode information such as UID into the signature.
* The signature will be sent to the address set through the "url" parameter in the form of a POST request. * @param url The address of the application server * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - The user must call {@link login login} to log in before calling this interface. * - After calling this interface, the SDK will use {@link onServerParamsSetResult onServerParamsSetResult} to return the corresponding result. * */ setServerParams(signature: string, url: string): number; /** {en} * @detail api * @region real-time messaging * @author hanchenchen.c * @brief Query the login status of the opposite or local user * @param peerUserID The user ID to be queried * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - You must call {@link login login} to log in before calling this interface. * - After calling this interface, the SDK notifies the query result using the {@link onGetPeerOnlineStatus onGetPeerOnlineStatus} callback. * - Before sending an out-of-room message, the user can know whether the peer user is logged in through this interface to decide whether to send a message. You can also check your login status through this interface. * */ getPeerOnlineStatus(peerUserID: string): number; /** {en} * @detail api * @region Real-time messaging * @author hanchenchen.c * @brief Send a text message (P2P) to a specified user outside the room * @param uid User ID of the message receiver * @param message
* Text message content sent.
* Message does not exceed 64 KB. * @param config Message type, see {@link MessageConfig MessageConfig}. * @return * - > 0: Sent successfully, return the number of the sent message, increment from 1 * - -1: Sent failed, RTCVideo instance not created * - -2: Sent failed, uid is empty * @note * - Before sending an out-of-room text message, you must call {@link login login} to login. * - After the user calls this interface to send a text message, he will receive an {@link onUserMessageSendResultOutsideRoom onUserMessageSendResultOutsideRoom} callback to know whether the message was successfully sent. * - If the text message is sent successfully, the user specified by uid receives the message via the {@link onUserMessageReceivedOutsideRoom onUserMessageReceivedOutsideRoom} callback. * */ sendUserMessageOutsideRoom(uid: string, message: string, config: MessageConfig): number; /** {en} * @detail api * @region Real-time messaging * @author hanchenchen.c * @brief Send binary messages (P2P) to the specified user outside the room * @param uid User ID of the message receiver * @param buffer
* Binary message content sent
* Message does not exceed 46KB. * @param config Message type, see {@link MessageConfig MessageConfig}. * @return * - > 0: Sent successfully, return the number of the sent message this time, increment from 1 * - -1: Sent failed, RTCVideo instance not created * - -2: Sent failed, uid is empty * @note * - Before sending out-of-room binary messages, you should call {@link login login} first. * - After the user calls this interface to send a binary message, he will receive an {@link onUserMessageSendResultOutsideRoom onUserMessageSendResultOutsideRoom} callback to notify whether the message was sent successfully; * - If the binary message is sent successfully, the user specified by uid will receive the message through the {@link onUserBinaryMessageReceivedOutsideRoom onUserBinaryMessageReceivedOutsideRoom} callback. * */ sendUserBinaryMessageOutsideRoom(uid: string, buffer: ArrayBuffer, config: MessageConfig): number; /** {en} * @detail api * @region Real-time messaging * @author hanchenchen.c * @brief The client side sends a text message to the application server (P2Server) * @param message
* The content of the text message sent
* The message does not exceed 64 KB. * @return * - > 0: Sent successfully, return the number of the sent message, increment from 1 * - -1: Sent failed, RTCVideo instance not created * @note * - Before sending a text message to the application server, you must first call {@link login login} to complete the login, and then call {@link setServerParams setServerParams} Set up the application server. * - After calling this interface, you will receive an {@link onServerMessageSendResult onServerMessageSendResult} callback to inform the message sender whether the message was sent successfully. * - If the text message is sent successfully, the application server that previously called the {@link setServerParams setServerParams} setting will receive the message. * */ sendServerMessage(message: string): number; /** {en} * @detail api * @region Real-time messaging * @author hanchenchen.c * @brief Client side sends binary messages to the application server (P2Server) * @param buffer
* Binary message content sent
* Message does not exceed 46KB. * @return * - > 0: Sent successfully, return the number of the sent message, increment from 1 * - -1: Sent failed, RTCVideo instance not created * @note * - Before sending a binary message to the application server, you must first call {@link login login} to complete the login, and then call {@link setServerParams setServerParams} Set up the application server. * - After calling this interface, you will receive an {@link onServerMessageSendResult onServerMessageSendResult} callback to inform the message sender that the sending succeeded or failed; * - If the binary message is sent successfully, the application server that previously called the {@link setServerParams setServerParams} setting will receive the message. * */ sendServerBinaryMessage(buffer: ArrayBuffer): number; /** {en} * @detail api * @region Pre-call network detection * @author hanchenchen.c * @brief Enable pre-call network detection * @param isTestUplink Whether to detect uplink bandwidth * @param expectedUplinkBitrate Expected uplink bandwidth in kbps, unit: kbps
Range: `{0, [100-10000]}`, `0`: Auto, that RTC will set the highest bite rate. * @param isTestDownlink Whether to detect downlink bandwidth * @param expectedDownlinkBitrate Expected downlink bandwidth in kbps, unit: kbps
Range: `{0, [100-10000]}`, `0`: Auto, that RTC will set the highest bite rate. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - After successfully calling this interface, you will receive {@link onNetworkDetectionResult onNetworkDetectionResult} within 3s and every 2s thereafter notifying the probe results; * - If the probe stops, you will receive {@link onNetworkDetectionStopped onNetworkDetectionStopped} to notify the probe to stop. * */ startNetworkDetection(isTestUplink: boolean, expectedUplinkBitrate: number, isTestDownlink: boolean, expectedDownlinkBitrate: number): number; /** {en} * @detail api * @region Pre-call network probe * @author hanchenchen.c * @brief Stop pre-call network probe * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - After calling this interface, you will receive an {@link onNetworkDetectionStopped onNetworkDetectionStopped} callback to notify the probe to stop. * */ stopNetworkDetection(): number; /** {en} * @detail api * @region Audio Data Callback * @author gongzhengduo * @brief Enable audio frames callback and set the format for the specified type of audio frames. * @param method Audio data callback method. See {@link AudioFrameCallbackMethod AudioFrameCallbackMethod}.
* If `method` is set as `AUDIO_FRAME_CALLBACK_RECORD(0)`, `AUDIO_FRAME_CALLBACK_PLAYBACK(1)`, or `AUDIO_FRAME_CALLBACK_MIXED(2)`, set `format` to the accurate value listed in the audio parameters format.
* If `method` is set as `AUDIO_FRAME_CALLBACK_REMOTE_USER(3)`, set `format` to `auto`. * @param format Audio parameters format. See {@link AudioFormat AudioFormat}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note After calling this API and {@link registerAudioFrameObserver registerAudioFrameObserver}, {@link IAudioFrameObserver IAudioFrameObserver} will receive the corresponding audio data callback. However, these two APIs are independent of each other and the calling order is not restricted. * */ enableAudioFrameCallback(method: AudioFrameCallbackMethod, format: AudioFormat): number; /** {en} * @detail api * @region Audio Data Callback * @author gongzhengduo * @brief Disables audio data callback. * @param method Audio data callback method. See {@link AudioFrameCallbackMethod AudioFrameCallbackMethod}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note Call this API after calling {@link enableAudioFrameCallback enableAudioFrameCallback}. * */ disableAudioFrameCallback(method: AudioFrameCallbackMethod): number; /** {en} * @detail api * @region Audio Data Callback * @author gongzhengduo * @brief Register an audio frame observer. * @param observer Audio data callback observer. See {@link IAudioFrameObserver IAudioFrameObserver}. Use `null` to cancel the registration. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note After calling this API and {@link enableAudioFrameCallback enableAudioFrameCallback}, {@link IAudioFrameObserver IAudioFrameObserver} receives the corresponding audio data callback. You can retrieve the audio data and perform processing on it without affecting the audio that RTC SDK uses to encode or render. * */ registerAudioFrameObserver(observer: IAudioFrameObserver): number; /** {en} * @detail api * @author gongzhengduo * @brief Register a custom audio preprocessor.
* After that, you can call {@link enableAudioProcessor enableAudioProcessor} to process the audio streams that either captured locally or received from the remote side. RTC SDK then encodes or renders the processed data. * @param processor Custom audio processor. See {@link IAudioFrameProcessor IAudioFrameProcessor}。
* SDK only holds weak references to the processor, you should guarantee its Life Time. To cancel registration, set the parameter to nullptr. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - When this interface is repeatedly called, only the last call takes effect. * - Refer to [Custom Audio Processing](https://docs.byteplus.com/en/byteplus-rtc/docs/80635) for more information. * */ registerAudioProcessor(processor: IAudioFrameProcessor): number; /** {en} * @detail api * @author likai.666 * @valid since 3.51 * @brief Set the step size for each digital zooming control to the local videos. * @param type Required. Identifying which type the `size` is referring to. Refer to {@link ZoomConfigType ZoomConfigType}. * @param size Required. Reserved to three decimal places. It defaults to `0`.
* The meaning and range vary from different `type`s. If the scale or moving distance exceeds the range, the limit is taken as the result.
* - `kZoomFocusOffset`: Increasement or decrease to the scaling factor. Range: [0, 7]. For example, when it is set to 0.5 and {@link setVideoDigitalZoomControl setVideoDigitalZoomControl} is called to zoom in, the scale will increase `0.5`. The scale ranges [1,8] and defaults to `1`, which means an original size. * - `kZoomMoveOffset`:Ratio of the distance to the border of video images. It ranges [0, 0.5] and defaults to `0`, which means no offset. When you call {@link setVideoDigitalZoomControl setVideoDigitalZoomControl} and choose `CAMERA_MOVE_LEFT`, the moving distance is size x original width. While for the `CAMERA_MOVE_UP`, the moving distance is size x original height. Suppose that a video spans 1080 px and the `size` is set to `0.5` so that the distance would be 0.5 x 1080 px = 540 px. * @return * - 0: Success. * - ! 0: Failure. * @note * - Only one size can be set for a single call. You must call this API to pass values respectively if you intend to set multiple `size`s. * - As the default `size` is `0`, you must call this API before performing any digital zoom control by calling {@link setVideoDigitalZoomControl setVideoDigitalZoomControl} or {@link startVideoDigitalZoomControl startVideoDigitalZoomControl}. * */ setVideoDigitalZoomConfig(type: ZoomConfigType, size: number): number; /** {en} * @detail api * @author likai.666 * @valid since 3.51 * @brief Digital zoom or move the local video image once. This action affects both the video preview locally and the stream published. * @param direction Action of the digital zoom control. Refer to {@link ZoomDirectionType ZoomDirectionType}. * @return * - 0: Success. * - ! 0: Failure. * @note * - As the default offset is `0`, you must call {@link setVideoDigitalZoomConfig setVideoDigitalZoomConfig} before this API. * - You can only move video images after they are magnified via this API or {@link startVideoDigitalZoomControl startVideoDigitalZoomControl}. * - When you request an out-of-range scale or movement, SDK will execute it with the limits. For example, when the image has been moved to the border, the image cannot be zoomed out, or has been magnified to 8x. * - Call {@link startVideoDigitalZoomControl startVideoDigitalZoomControl} to have a continuous and repeatedly digital zoom control. * - Refer to {@link setCameraZoomRatio setCameraZoomRatio} if you intend to have an optical zoom control to the camera. * */ setVideoDigitalZoomControl(direction: ZoomDirectionType): number; /** {en} * @detail api * @author likai.666 * @valid since 3.51 * @brief Continuous and repeatedly digital zoom control. This action effect both the video preview locally and the stream published. * @param direction Action of the digital zoom control. Refer to {@link ZoomDirectionType ZoomDirectionType}. * @return * - 0: Success. * - ! 0: Failure. * @note * - As the default offset is `0`, you must call {@link setVideoDigitalZoomConfig setVideoDigitalZoomConfig} before this API. * - You can only move video images after they are magnified via this API or {@link setVideoDigitalZoomControl setVideoDigitalZoomControl}. * - The control process stops when the scale reaches the limit, or the images have been moved to the border. if the next action exceeds the scale or movement range, SDK will execute it with the limits. * - Call {@link stopVideoDigitalZoomControl stopVideoDigitalZoomControl} to stop the ongoing zoom control. * - Call {@link setVideoDigitalZoomControl setVideoDigitalZoomControl} to have a one-time digital zoom control. * - Refer to {@link setCameraZoomRatio setCameraZoomRatio} if you intend to have an optical zoom control to the camera. * */ startVideoDigitalZoomControl(direction: ZoomDirectionType): number; /** {en} * @detail api * @author likai.666 * @valid since 3.51 * @brief Stop the ongoing digital zoom control instantly. * @return * - 0: Success. * - ! 0: Failure. * @note Refer to {@link startVideoDigitalZoomControl startVideoDigitalZoomControl} for starting digital zooming. * */ stopVideoDigitalZoomControl(): number; /** {en} * @detail api * @region Video Data Callback * @author liuyangyang * @brief Register a local video frame observer.
* This method applys to both internal capturing and custom capturing.
* After calling this API, SDK triggers {@link onLocalEncodedVideoFrame onLocalEncodedVideoFrame} whenever a video frame is captured. * @param observer Local video frame observer. See {@link ILocalEncodedVideoFrameObserver ILocalEncodedVideoFrameObserver}. You can cancel the registration by setting it to `null`. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note You can call this API before or after entering the RTC room. Calling this API before entering the room ensures that video frames are monitored and callbacks are triggered as early as possible. * */ registerLocalEncodedVideoFrameObserver(observer: ILocalEncodedVideoFrameObserver): number; /** {en} * @detail api * @region video management * @author wangzhanqiang * @brief Video data callback after registering remote encoding.
* After registration, when the SDK detects a remote encoded video frame, it will trigger the {@link onRemoteEncodedVideoFrame onRemoteEncodedVideoFrame} callback * @param observer Remote encoded video data monitor. See {@link IRemoteEncodedVideoFrameObserver IRemoteEncodedVideoFrameObserver} * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - See [Custom Video Encoding and Decoding](https://docs.byteplus.com/byteplus-rtc/docs/82921#custom-video-decoding) for more details about custom video decoding. * - This method applys to manual subscription mode and can be called either before or after entering the Room. It is recommended to call it before entering the room. * - The engine needs to be unregistered before it is destroyed. Call this method to set the parameter to "null". * */ registerRemoteEncodedVideoFrameObserver(observer: IRemoteEncodedVideoFrameObserver): number; /** {en} * @detail api * @region Video Management * @author liuyangyang * @brief Set the video source, including the screen recordings.
* The internal video capture is the default, which refers to capturing video using the built-in module. * @param index Stream index. Refer to [StreamIndex](70083#streamindex-2) for more details. * @param type Video source type. Refer to {@link VideoSourceType VideoSourceType} for more details. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - You can call this API whether the user is in a room or not. * - Calling this API to switch to the custom video source will stop the enabled internal video capture. * - To switch to internal video capture, call this API to stop custom capture and then call {@link startVideoCapture startVideoCapture} to enable internal video capture. * - To push custom encoded video frames to the SDK, call this API to switch `VideoSourceType` to `VIDEO_SOURCE_TYPE_ENCODED_WITH_SIMULCAST(2)` or `VIDEO_SOURCE_TYPE_ENCODED_WITHOUT_SIMULCAST(3)`. * */ setVideoSourceType(index: StreamIndex, type: VideoSourceType): number; /** {en} * @valid since 3.52 * @detail api * @region Push to CDN * @author liyunlong.zx * @brief Create a new task of pushing mixed media streams to CDN and sets the relevant configurations.
* When pushing more than one live streams in the same task, SDK will first mix those streams into one single stream and then push it to CDN. * @param taskId Task ID. The length should not exceed 126 bytes.
* You may want to push more than one mixed stream to CDN from the same room. When you do that, use different ID for corresponding tasks; if you will start only one task, use an empty string. * @param mixedConfig Configurations to be set when pushing streams to CDN. See {@link MixedStreamConfig MixedStreamConfig}. * @param observer Invalid parameter. Please ignore it. * @return API call result:
* - 0: Success * - !0: Failure * @note * - Before calling this API,you need to enable Push to CDN on the [console](https://console.byteplus.com/rtc/workplaceRTC). * - After calling this API, you will be informed of the result and errors during the pushing process via the {@link onMixingEvent onMixingEvent} callback. * - If you have subscribed to the push-to-CDN server callback in the [console](https://console.byteplus.com/rtc/cloudRTC?tab=callback), calling this API will receive a [TranscodeStarted](https://docs.byteplus.com/en/byteplus-rtc/docs/75125#transcodestarted) server callback notification. When calling this API repeatedly, subsequent calls to this API will trigger both [TranscodeStarted](https://docs.byteplus.com/en/byteplus-rtc/docs/75125#transcodestarted) and [TranscodeUpdated](https://docs.byteplus.com/en/byteplus-rtc/docs/75125#transcodeupdated) callbacks. * - Call {@link stopPushStreamToCDN stopPushStreamToCDN} to stop pushing streams to CDN. * */ startPushMixedStreamToCDN(taskId: string, mixedConfig: MixedStreamConfig, observer: IMixedStreamObserver): number; /** {en} * @valid since 3.52 * @detail api * @region Push to CDN * @author liyunlong.zx * @brief Update parameters needed when pushing mixed media streams to CDN. You will be informed of the change via the {@link onMixingEvent onMixingEvent} callback.
* After calling {@link startPushMixedStreamToCDN startPushMixedStreamToCDN} to enable the function of pushing streams to CDN, you can call this API to update the relevant configurations. * @param taskId Task ID. Specifys of which pushing task you want to update the parameters. * @param mixedConfig Configurations that you want to update. See {@link MixedStreamConfig MixedStreamConfig} for specific indications. You can update any property for the task unless it is specified as unavailable for updates.
* If you left some properties blank, you can expect these properties to be set to their default values. * @return API call result:
* - 0: Success * - !0: Failure * */ updatePushMixedStreamToCDN(taskId: string, mixedConfig: MixedStreamConfig): number; /** {en} * @detail api * @region Push to CDN * @author liujingchao * @brief Create a new task of pushing a single media stream to CDN. * @param taskId Task ID.
* You may want to start more than one task to push streams to CDN. When you do that, use different IDs for corresponding tasks; if you will start only one task, use an empty string. * @param param Configurations for pushing a single stream to CDN. See {@link PushSingleStreamParam PushSingleStreamParam}. * @param observer Register this observer to receive callbacks from the SDK. See {@link IPushSingleStreamToCDNObserver IPushSingleStreamToCDNObserver}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Before calling this API,you need to enable Push to CDN on the [console](https://console.byteplus.com/rtc/workplaceRTC). * - After calling this API, you will be informed of the result and errors during the pushing process with {@link onStreamPushEvent onStreamPushEvent}. * - Call {@link stopPushStreamToCDN stopPushStreamToCDN} to stop the task. * - Since this API does not perform encoding and decoding, the video stream pushed to RTMP will change according to the resolution, encoding method, and turning off the camera of the end of pushing streams. * */ startPushSingleStreamToCDN(taskId: string, param: PushSingleStreamParam, observer: IPushSingleStreamToCDNObserver): number; /** {en} * @detail api * @region Push to CDN * @author liujingchao * @brief Stops the task to push a single media stream to CDN.
* This API can be used to stop both pushing single and mixed stream to CDN. You need to distinguish the tasks to be stopped by different "taskId". * @param taskId Task ID. Specifys the task you want to stop. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - To start pushing single stream to CDN, see {@link startPushSingleStreamToCDN startPushSingleStreamToCDN}. * - To start pushing mixed stream to CDN, see {@link startPushMixedStreamToCDN startPushMixedStreamToCDN}. * */ stopPushStreamToCDN(taskId: string): number; /** {en} * @detail api * @region Video Management * @author liuyangyang * @brief Set the video capture parameters for internal capture of the RTC SDK.
* If your project uses the SDK internal capture module, you can specify the video capture parameters including preference, resolution and frame rate through this interface. * @param videoCaptureConfig Video capture parameters. See: {@link VideoCaptureConfig VideoCaptureConfig}. * @return * - 0: Success; * - < 0: Failure; * @note * - This interface can be called after the engine is created. It is recommended to call this interface before calling {@link startVideoCapture startVideoCapture}. * - It is recommended that different Engines on the same device use the same video capture parameters. * - If you used the internal module to start video capture before calling this interface, the capture parameters default to Auto. * */ setVideoCaptureConfig(videoCaptureConfig: VideoCaptureConfig): number; /** {en} * @detail api * @region Video Management * @author zhaomingliang * @brief Enables/Disables the mode of publishing multiple video streams with different encoding configuration. * @param enabled Whether to enable the mode of publishing multiple video streams:
* - true: Yes * - false: No(Default setting) * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - You should call this API * - before entering the room, or * - after entering the room but before publishing streams * - When the simulcast mode is turned on, it cannot be turned off dynamically, nor can the parameters for video encoding be updated. * - After setting this API to "True", you can call [setVideoEncoderConfig](#RTCVideo-setvideoencoderconfig-1) to set encoding configuration for each stream before you publish stream. * - If this function is off, or if this function is on but you don't set the configuration of any stream, only one stream will be sent with a resolution of 640px × 360px and a frame rate of 15fps. * */ enableSimulcastMode(enabled: boolean): number; /** {en} * @detail api * @region Video Management * @author zhaomingliang * @brief \\Video publisher call this API to set the parameters of the maximum resolution video stream that is expected to be published, including resolution, frame rate, bitrate, and fallback strategy in poor network conditions.
* You can only set configuration for one stream with this API. If you want to set configuration for multiple streams, Call [setVideoEncoderConfig](#RTCVideo-setvideoencoderconfig-2). * @param maxSolution The maximum video encoding parameter. See {@link VideoEncoderConfig VideoEncoderConfig}. * @return API call result:
* - 0: Success * - ! 0: Failure * @note * - You can use {@link enableSimulcastMode enableSimulcastMode} simultaneously to publish streams with different resolutions. Specifically, if you want to publish multiple streams with different resolutions, you need to call this method and enable the simulcast mode with {@link enableSimulcastMode enableSimulcastMode} before publishing your streams. The SDK will intelligently adjust the number of streams to be published (up to 4) and their parameters based on the settings of the subscribing end. The resolution set by calling this method will be the maximum resolution among the streams. For specific rules, please refer to [Simulcasting](https://docs.byteplus.com/en/byteplus-rtc/docs/70139). * - Without calling this API, SDK will only publish one stream for you with a resolution of 640px × 360px and a frame rate of 15fps. * - In custom capturing scenario, you must call this API to set encoding configurations to ensure the integrity of the picture received by the remote users. * - This API is applicable to the video stream captured by the camera, see {@link setScreenVideoEncoderConfig setScreenVideoEncoderConfig} for setting parameters for screen sharing video stream. * */ setVideoEncoderConfig(channelSolutions: VideoEncoderConfig[]): number; /** {en} * @detail api * @region Screen Sharing * @author zhoubohui * @brief Set the encoding configuration for shared-screen streams, including the resolution, frame rate, bitrate, and fallback strategies under challenging network conditions. * @param screenSolution The encoding configuration for shared-screen streams. See {@link ScreenVideoEncoderConfig ScreenVideoEncoderConfig}. * @return * - 0: Success. * - ! 0: Failure. * @note We recommend that you set the encoding configuration before video capture. Otherwise, the video will be captured based on the default configuration(1080p\@15fps). * */ setScreenVideoEncoderConfig(screenSolution: ScreenVideoEncoderConfig): number; /** {en} * @valid since 3.58 * @detail api * @region Video Management * @brief Enables the Alpha channel encoding feature for custom captured video frames.
* Suitable for scenarios where the video subject and background need to be separated at the push stream end, and the background can be custom rendered at the pull stream end. * @param streamIndex The type of video stream for which this feature should be enabled. Currently, only StreamIndex.STREAM_INDEX_MAIN, i.e., the main stream, is supported. * @param alphaLayout The relative position of the separated Alpha channel to the RGB channel information. Currently, only AlphaLayout.TOP is supported, which means it is placed above the RGB channel information. * @return Method call result:
* - 0: Success; * - !0: Failure. * @note * - This API only applies to custom captured video frames that use the RGBA color model, including VideoPixelFormat.TEXTURE_2D, VideoPixelFormat.TEXTURE_OES, VideoPixelFormat.RGBA. * - This API must be called before publishing the video stream. * - After calling this API to enable Alpha channel encoding, you must call {@link pushExternalVideoFrame pushExternalVideoFrame} to push the custom captured video frames to the RTC SDK. If a video frame format that is not supported is pushed, calling {@link pushExternalVideoFrame pushExternalVideoFrame} will return the error code ReturnStatus.RETURN_STATUS_PARAMETER_ERR. * */ enableAlphaChannelVideoEncode(streamIndex: StreamIndex, alphaLayout: AlphaLayout): number; /** {en} * @valid since 3.58 * @detail api * @region Video Management * @brief Disables the Alpha channel encoding feature for externally captured video frames. * @param streamIndex The type of video stream for which this feature should be disabled. Currently, only supports setting to StreamIndex.STREAM_INDEX_MAIN, i.e., the main stream. * @return Method call result:
* - 0: Success; * - !0: Failure. * @note This API must be called after stopping the publish of the video stream. * */ disableAlphaChannelVideoEncode(streamIndex: StreamIndex): number; /** {en} * @detail api * @region Custom Audio Capture and Rendering * @author gongzhengduo * @brief Switch the audio capture type. * @param type Audio input source type. See {@link AudioSourceType AudioSourceType}
* Use internal audio capture by default. The audio capture type and the audio render type may be different from each other. * @return Method call result:
* - >0: Success. * - -1: Failure. * @note * - You can call this API before or after joining the room. * - If you call this API to switch from internal audio capture to custom capture, the internal audio capture is automatically disabled. You must call {@link pushExternalAudioFrame pushExternalAudioFrame} to push custom captured audio data to RTC SDK for transmission. * - If you call this API to switch from custom capture to internal capture, you must then call {@link startAudioCapture startAudioCapture} to enable internal capture. * */ setAudioSourceType(type: AudioSourceType): number; /** {en} * @detail api * @region Custom Audio Capture and Rendering * @author gongzhengduo * @brief Switch the audio render type. * @param type Audio output source type. See {@link AudioRenderType AudioRenderType}.
* Use internal audio render by default. The audio capture type and the audio render type may be different from each other. * @return Method call result:
* - >0: Success. * - -1: Failure. * @note * - You can call this API before or after joining the room. * - After calling this API to enable custom audio rendering, call {@link pullExternalAudioFrame pullExternalAudioFrame} for audio data. * */ setAudioRenderType(type: AudioRenderType): number; /** {en} * @detail api * @region Custom audio acquisition rendering * @author gongzhengduo * @brief Pulls audio data for external playback.
* After calling this method, the SDK will actively fetch the audio data to play, including the decoded and mixed audio data from the remote source, for external playback. * @param audioFrame Audio data frame. See {@link AudioFrame AudioFrame} * @return Method call result
* - 0: Setup succeeded * - < 0: Setup failed * @note * - Before pulling external audio data, {@link setAudioRenderType setAudioRenderType} must be called Enable custom audio rendering. * - You should pull audio data every 10 milliseconds since the duration of a RTC SDK audio frame is 10 milliseconds. Samples x call frequency = audioFrame's sample rate. Assume that the sampling rate is set to 48000, call this API every 10 ms, so that 480 sampling points should be pulled each time. * - The audio sampling format is S16. The data format in the audio buffer is PCM data, and its capacity size is audioFrame.samples × audioFrame.channel × 2. * */ pullExternalAudioFrame(audioFrame: AudioFrame): number; /** {en} * @detail api * @region Room Management * @author shenpengliang * @brief Create a room instance.
* This API only returns a room instance. You still need to call {@link joinRoom joinRoom} to actually create/join the room.
* Each call of this API creates one {@link RTCRoom RTCRoom} instance. Call this API as many times as the number of rooms you need, and then call {@link joinRoom joinRoom} of each RTCRoom instance to join multiple rooms at the same time.
* In multi-room mode, a user can subscribe to media streams in the joined rooms at the same time. * @param roomId The string matches the regular expression: `[a-zA-Z0-9_\@\\-\\.]{1,128}`. * @return {@link RTCRoom RTCRoom} instance. * @note * - If the room that you wish to join already exists, you still need to call this API first to create the RTCRoom instance, and then call {@link joinRoom joinRoom}. * - Do not create multiple rooms with the same roomId, otherwise the newly created room instance will replace the old one. * - To forward streams to the other rooms, call {@link startForwardStreamToRooms startForwardStreamToRooms} instead of enabling Multi-room mode. * */ createRTCRoom(roomId: string): RTCRoom; /** {en} * @detail api * @region Audio & Video Fallback * @author panjian.fishing * @brief Sets the fallback option for published audio & video streams.
* You can call this API to set whether to automatically lower the resolution you set of the published streams under limited network conditions. * @param option Fallback option, see {@link PublishFallbackOption PublishFallbackOption}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - This API only works after you call {@link enableSimulcastMode enableSimulcastMode} to enable the mode of publishing multiple streams. * - You must call this API before entering the room. * - After calling this method, if there is a performance degradation or recovery due to poor performance or network conditions, the local end will receive early warnings through the {@link onPerformanceAlarms onPerformanceAlarms} callback to adjust the capture device. * - After you allow video stream to fallback, your stream subscribers will receive {@link onSimulcastSubscribeFallback onSimulcastSubscribeFallback} when the resolution of your published stream are lowered or restored. * - You can alternatively set fallback options with distrubutions from server side, which is of higher priority. * */ setPublishFallbackOption(option: PublishFallbackOption): number; /** {en} * @detail api * @region audio & video fallback * @author panjian.fishing * @brief Set user priority. * @param roomid Room ID * @param uid
* The ID of the remote user. * @param priority
* Priority for remote users. See enumeration type {@link RemoteUserPriority RemoteUserPriority}. * @return * - 0: Success. * - < 0: Failed. * @note * - This method is used with {@link setSubscribeFallbackOption setSubscribeFallbackOption}. * - If the subscription flow fallback option is turned on, weak connections or insufficient performance will give priority to ensuring the quality of the flow received by high-priority users. * - This method can be used before and after entering the room, and the priority of the remote user can be modified. * */ setRemoteUserPriority(roomid: string, uid: string, priority: RemoteUserPriority): number; /** {en} * @detail api * @region Message * @author wangzhanqiang * @brief \\Sends SEI data.
* In a video call scenario, SEI is sent with the video frame, while in a voice call scenario, SDK will automatically publish a black frame with a resolution of 16 × 16 pixels to carry SEI data. * @param streamIndex Specifys the type of media stream that carries SEI data. See [StreamIndex](70083#streamindex-2).
* In a voice call, you should set this parameter to `STREAM_INDEX_MAIN`, otherwise the SEI data is discarded and cannot be sent to the remote user. * @param message SEI data. No more than 4 KB SEI data per frame is recommended. * @param repeatCount Number of times a message is sent repeatedly. The value range is [0, max{29, \%{video frame rate}-1}]. Recommended range: [2,4].
* After calling this API, the SEI data will be added to a consecutive `\%{repeatCount}+1` number of video frames starting from the current frame. * @param mode SEI sending mode. See {@link SEICountPerFrame SEICountPerFrame}. * @return * - >= 0: The number of SEIs to be added to the video frame * - < 0: Failure * @note * - We recommend the number of SEI messages per second should not exceed the current video frame rate. In a voice call, the blank-frame rate is 15 fps. * - In a voice call, this API can be called to send SEI data only in internal capture mode. * - In a video call, the custom captured video frame can also be used for sending SEI data if the original video frame contains no SEI data, otherwise calling this method will not take effect. * - Each video frame carrys only the SEI data received within 2s before and after. In a voice call scenario, if no SEI data is sent within 1min after calling this API, SDK will automatically cancel publishing black frames. * - After the message is sent successfully, the remote user who subscribed your video stream will receive {@link onSEIMessageReceived onSEIMessageReceived}. * - When you switch from a voice call to a video call, SEI data will automatically start to be sent with normally captured video frames instead of black frames. * */ sendSEIMessage(streamIndex: StreamIndex, message: ArrayBuffer, repeatCount: number, mode: SEICountPerFrame): number; /** {en} * @detail api * @region Engine management * @author wangzhanqiang * @brief Sets the business ID
* You can use businessId to distinguish different business scenarios. You can customize your businessId to serve as a sub AppId, which can share and refine the function of the AppId, but it does not need authentication. * @param businessId
* Your customized businessId
* BusinessId is a tag, and you can customize its granularity. * @return * - 0: Success * - < 0: Failure * - -6001: The user is already in the room. * - -6002: The input is invalid. Legal characters include all lowercase letters, uppercase letters, numbers, and four other symbols, including '.', '-','_', and '\@'. * @note * - You must call this API before entering the room, otherwise it will be invalid. * @order 25 * */ setBusinessId(businessId: string): number; /** {en} * @detail api * @region Audio & Video Processing * @author zhushufan.ref * @brief Gets video effect interfaces. * @return Video effect interfaces. See {@link IVideoEffect IVideoEffect}. * */ getVideoEffectInterface(): IVideoEffect; /** {en} * @detail api * @region Audio & Video Processing * @author wangjunlin.3182 * @brief Enables/Disables basic beauty effects. * @param enable Whether to enable basic beauty effects.
* - true: Enables basic beauty effects. * - false: (Default) Disables basic beauty effects. * @return * - 0: Success. * - –1000: The Effect SDK is not integrated. * - –1001: This API is not available for your current RTC SDK. * - –1002: This API is not available for your current Effect SDK. You can upgrade your Effect SDK to v4.4.2+. * - –1003: Contact our technical support team for further instructions. * - –1004: Downloading related resources. The beauty effects will take effect after downloading. * - <0: Failure. Effect SDK internal error. For specific error code, see [Error Code Table](https://docs.byteplus.com/effects/docs/error-code-table). * @note * - You cannot use the basic beauty effects and the advanced effect features at the same time. See [how to use advanced effect features](https://docs.byteplus.com/byteplus-rtc/docs/114717) for more information. * - You need to integrate Effect SDK before calling this API. Effect SDK v4.4.2+ is recommended. * - Call {@link setBeautyIntensity setBeautyIntensity} to set the beauty effect intensity. If you do not set the intensity before calling this API, the default intensity will be enabled. The default values for the intensity of each beauty mode are as follows: 0.7 for brightning, 0.8 for smoothing, 0.5 for sharpening, and 0.7 for clarity. * - This API is not applicable to screen capturing. * */ enableEffectBeauty(enable: boolean): number; /** {en} * @detail api * @region Audio & Video Processing * @author wangjunlin.3182 * @brief Sets the beauty effect intensity. * @param beautyMode Basic beauty effect. See {@link EffectBeautyMode EffectBeautyMode}. * @param intensity Beauty effect intensity in range of [0,1]. When you set it to 0, the beauty effect will be turned off.
* The default values for the intensity of each beauty mode are as follows: 0.7 for brightning, 0.8 for smoothing, 0.5 for sharpening, and 0.7 for clarity. * @return * - 0: Success. * - –1000: The Effect SDK is not integrated. * - –1001: This API is not available for your current RTC SDK. * - <0: Failure. Effect SDK internal error. For specific error code, see [error codes](https://docs.byteplus.com/effects/docs/error-code-table). * @note * - If you call this API before calling {@link enableEffectBeauty enableEffectBeauty}, the default settings of beauty effect intensity will adjust accordingly. * - If you destroy the engine, the beauty effect settings will be invalid. * */ setBeautyIntensity(beautyMode: EffectBeautyMode, intensity: number): number; /** {en} * @detail api * @region Audio & Video Processing * @author wangjunlin.3182 * @brief Sets the orientation of the video frame before custom video processing and encoding. The default value is `Adaptive`.
* You should set the orientation to `Portrait` when using video effects or custom processing.
* You should set the orientation to `Portrait` or `Landscape` when pushing a single stream to the CDN. * @param orientation Orientation of the video frame. See {@link VideoOrientation VideoOrientation}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - The orientation setting is only applicable to internal captured video sources. For custom captured video sources, setting the video frame orientation may result in errors, such as swapping width and height. Screen sources do not support video frame orientation setting. * - We recommend setting the orientation before joining room. The updates of encoding configurations and the orientation are asynchronous, therefore can cause a brief malfunction in preview if you change the orientation after joining room. * */ setVideoOrientation(orientation: VideoOrientation): number; /** {en} * @detail api * @brief Get [IRTCVideo](https://docs.byteplus.com/byteplus-rtc/docs/70095#irtcvideo) in C++ layer. * @return * - >0:Success. Return the address of `IRTCVideo` in C++ layer. * - -1:Failure. * @note In some scenarios, getting and working with `IRTCVideo` in C++ layer has much higher execution efficiency than through the Java encapsulation layer. Typical scenarios include: custom processing of video/audio frames, encryption of audio and video calls, etc. * */ getNativeHandle(): number; /** {en} * @hidden currently not available * @detail api * @region speech recognition service * @author luomingkang * @brief Turn off speech recognition service * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * */ stopASR(): number; /** {en} * @detail api * @region Local recording * @author wangzhanqiang * @brief This method records the audio & video data during the call to a local file. * @param type Stream property, specifying whether to record mainstream or screen streams. See [StreamIndex](70083#streamindex-2) * @param config Local recording parameter configuration. See {@link RecordingConfig RecordingConfig} * @param recordingType Locally recorded media type, see {@link RecordingType RecordingType} * @return 0: Normal
* -1: Parameter setting exception
* -2: The current version of the SDK does not support this feature, please contact technical support staff * @note * - Tune When you use this method, you get an {@link onRecordingStateUpdate onRecordingStateUpdate} callback. * - If the recording is normal, the system will notify the recording progress through the {@link onRecordingProgressUpdate onRecordingProgressUpdate} callback every second. * */ startFileRecording(type: StreamIndex, config: RecordingConfig, recordingType: RecordingType): number; /** {en} * @detail api * @region Local recording * @author wangzhanqiang * @brief Stop local recording * @param type Stream property, specify to stop mainstream or screen stream recording. See [StreamIndex](70083#streamindex-2) * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Call {@link startFileRecording startFileRecording} After starting local recording, you must call this method to stop recording. * - After calling this method, you will receive an {@link onRecordingStateUpdate onRecordingStateUpdate} callback prompting you to record the result. * */ stopFileRecording(type: StreamIndex): number; /** {en} * @detail api * @author huangshouqin * @brief Starts recording audio communication, and generate the local file.
* If you call this API before or after joining the room without internal audio capture, then the recording task can still begin but the data will not be recorded in the local files. Only when you call {@link startAudioCapture startAudioCapture} to enable internal audio capture, the data will be recorded in the local files. * @param config See {@link AudioRecordingConfig AudioRecordingConfig}. * @return * - 0: Success * - -2: Invalid parameters * - -3: Not valid in this SDK. Please contact the technical support. * @note * - All audio effects are valid in the file. Mixed audio file is not included in the file. * - Call {@link stopAudioRecording stopAudioRecording} to stop recording. * - You can call this API before and after joining the room. If this API is called before you join the room, you need to call {@link stopAudioRecording stopAudioRecording} to stop recording. If this API is called after you join the room, the recording task ends automatically. If you join multiple rooms, audio from all rooms are recorded in one file. * - After calling the API, you'll receive {@link onAudioRecordingStateUpdate onAudioRecordingStateUpdate}. * */ startAudioRecording(config: AudioRecordingConfig): number; /** {en} * @detail api * @author huangshouqin * @brief Stop audio recording. * @return * - 0: Success * - <0: Failure * @note Call {@link startAudioRecording startAudioRecording} to start the recording task. * */ stopAudioRecording(): number; /** {en} * @valid since 3.53 * @detail api * @brief Create an instance for audio effect player. * @return See {@link IAudioEffectPlayer IAudioEffectPlayer}. * */ getAudioEffectPlayer(): IAudioEffectPlayer; /** {en} * @detail api * @valid since 3.53 * @brief Create a media player instance. * @param playerId Media player id. The range is `[0, 3]`. You can create up to 4 instances at the same time. If it exceeds the range, nullptr will be returned. * @return Media player instance. See {@link IMediaPlayer IMediaPlayer}. * */ getMediaPlayer(playerId: number): IMediaPlayer; /** {en} * @detail api * @region Screen Sharing * @author liyi.000 * @brief Sets the screen audio source type. (internal capture/custom capture) * @param sourceType Screen audio source type. See {@link AudioSourceType AudioSourceType}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - The default screen audio source type is RTC SDK internal capture. * - You should call this API before calling {@link publishScreen publishScreen}. Otherwise, you will receive {@link onWarning onWarning} with 'WARNING_CODE_SET_SCREEN_AUDIO_SOURCE_TYPE_FAILED'. * - When using internal capture, you need to call {@link startScreenCapture startScreenCapture} to start capturing. After that, as you switch to an external source by calling this API, the internal capture will stop. * - When using custom capture, you need to call {@link pushScreenAudioFrame pushScreenAudioFrame} to push the audio stream to the RTC SDK. * - Whether you use internal capture or custom capture, you must call {@link publishScreen publishScreen} to publish the captured screen audio stream. * */ setScreenAudioSourceType(sourceType: AudioSourceType): number; /** {en} * @detail api * @region Screen sharing * @author liyi.000 * @brief Set the mixing mode of the screen audio stream and the audio stream collected by the microphone during screen sharing * @param index Mixing mode. See [StreamIndex](70083#streamindex-2)
* - 'STREAM_INDEX_MAIN': Mixing the screen audio stream and the audio stream collected by the microphone * - 'STREAM_INDEX_SCREEN': By default, it divides the screen audio stream and the audio stream collected by the microphone into two audio streams * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note You should call this method before {@link publishScreen publishScreen}. Otherwise, you will receive an error from {@link onWarning onWarning}: 'WARNING_CODE_SET_SCREEN_STREAM_INDEX_FAILED * */ setScreenAudioStreamIndex(index: StreamIndex): number; /** {en} * @detail api * @region Screen Sharing * @author wangqianqian.1104 * @brief The RTC SDK start capturing the screen audio and/or video stream internally. * @param type Media type. See {@link ScreenMediaType ScreenMediaType} * @param mediaProjectionResultData The Intent obtained after applying for screen sharing permission from the Android device. See [getMediaProjection](https://developer.android.com/reference/android/media/projection/MediaProjectionManager#getMediaProjection (int,\% 20android.content. Intent)). * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - The call of this API takes effects only when you are using RTC SDK to record screen. You will get a warning by {@link onVideoDeviceWarning onVideoDeviceWarning} or {@link onAudioDeviceWarning onAudioDeviceWarning} after calling this API when the source is set to an external recorder. * - After capturing, you need to call {@link publishScreen publishScreen} to push to the remote end. * - You will receive {@link onVideoDeviceStateChanged onVideoDeviceStateChanged} and {@link onAudioDeviceStateChanged onAudioDeviceStateChanged} when the capturing is started. * - To stop capturing, call {@link stopScreenCapture stopScreenCapture}. * */ startScreenCapture(type: ScreenMediaType, mediaProjectionResultData: Intent): number; /** {en} * @detail api * @region Screen Sharing * @author wangqianqian.1104 * @brief Updates the media type of the internal screen capture. * @param type Media type. See {@link ScreenMediaType ScreenMediaType}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note Call this API after calling {@link startScreenCapture startScreenCapture}. * */ updateScreenCapture(type: ScreenMediaType): number; stopScreenCapture(): number; /** {en} * @detail api * @region Video management * @author wangzhanqiang * @brief Register custom coded frame push event callback * @param handler Custom coded frame callback class. See {@link IExternalVideoEncoderEventHandler IExternalVideoEncoderEventHandler} * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - This method needs to be called before entering the room. * - The engine needs to be unregistered before it is destroyed. Call this method to set the parameter to "null". * */ setExternalVideoEncoderEventHandler(handler: IExternalVideoEncoderEventHandler): number; /** {en} * @detail api * @region Video Management * @author wangzhanqiang * @brief Before subscribing to the remote video stream, set the remote video data decoding method * @param key The remote stream information specifies which video stream to decode. See {@link RemoteStreamKey RemoteStreamKey}. * @param config Video decoding method. See {@link VideoDecoderConfig VideoDecoderConfig}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - When you want to custom decode a remote stream, you need to call {@link registerRemoteEncodedVideoFrameObserver registerRemoteEncodedVideoFrameObserver} to register the remote video stream monitor, and then call the interface to set the decoding method to custom decoding. The monitored video data will be called back through {@link onRemoteEncodedVideoFrame onRemoteEncodedVideoFrame}. * - Since version 3.56, for automatic subscription, you can set the `RoomId` and `UserId` of `key` as `nullptr`. In this case, the decoding settings set by calling the API applies to all remote main streams or screen sharing streams based on the `StreamIndex` value of `key`. * */ setVideoDecoderConfig(key: RemoteStreamKey, config: VideoDecoderConfig): number; /** {en} * @detail api * @region Video Management * @author liuyangyang * @brief After subscribing to the remote video stream, request the keyframe * @param streamKey Remote stream information to the remote. See {@link RemoteStreamKey RemoteStreamKey}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - This method is only suitable for manual subscription mode and is used after successful subscription to the remote flow. * - This method is suitable for calling {@link setVideoDecoderConfig setVideoDecoderConfig} to turn on the custom decoding function, and the custom decoding fails * */ requestRemoteVideoKeyFrame(streamKey: RemoteStreamKey): number; /** {en} * @detail api * @region Audio management * @author majun.lvhiei * @brief Enable/Disable in-ear monitoring. * @param mode Whether to enable in-ear monitoring. See {@link EarMonitorMode EarMonitorMode}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - In-ear monitoring is effective for audios captured by the RTC SDK. * - We recommend that you use wired earbuds/headphones for a low-latency experience. * - The RTC SDK supports both the hardware-level in-ear monitoring and the SDK-level in-ear monitoring. Most hardware-level in-ear monitors enjoy lower latency and better audio quality. If your App is in the special list of the smartphone manufacturer, RTC SDK uses hardware-level ear monitoring by default. * */ setEarMonitorMode(mode: EarMonitorMode): number; /** {en} * @detail api * @region Audio management * @author majun.lvhiei * @brief Set the monitoring volume. * @param volume The monitoring volume with the adjustment range between 0\% and 100\%. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Call {@link setEarMonitorMode setEarMonitorMode} before setting the volume. * */ setEarMonitorVolume(volume: number): number; /** {en} * @detail api * @region Audio Management * @author wangjunzheng * @brief Enable audio information prompts. After that, you will receive {@link onLocalAudioPropertiesReport onLocalAudioPropertiesReport}, {@link onRemoteAudioPropertiesReport onRemoteAudioPropertiesReport}, and {@link onActiveSpeaker onActiveSpeaker}. * @param config See {@link AudioPropertiesConfig AudioPropertiesConfig} * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * */ enableAudioPropertiesReport(config: AudioPropertiesConfig): number; /** {en} * @detail api * @region Audio management * @author wangjunzheng * @brief Send audio stream synchronization information. The message is sent to the remote end through the audio stream and synchronized with the audio stream. After the interface is successfully called, the remote user will receive a {@link onStreamSyncInfoReceived onStreamSyncInfoReceived} callback. * @param data Message content. * @param config Configuration related to audio stream synchronization information. See {@link StreamSycnInfoConfig StreamSycnInfoConfig}. * @return * - > = 0: Message sent successfully. Returns the number of successful sends. * - -1: Message sending failed. Message length greater than 16 bytes. * - -2: Message sending failed. The content of the incoming message is empty. * - -3: Message sending failed. This screen stream was not published when the message was synchronized through the screen stream. * - -4: Message sending failed. This audio stream is not yet published when you synchronize messages with an audio stream captured by a microphone or custom device, as described in {@link ErrorCode ErrorCode}. * @note * - Regarding the frequency, we recommend no more than 50 calls per second. * - When using `CHANNEL_PROFILE_INTERACTIVE_PODCAST` as room profile, the data will be delivered. For other coom profiles, the data may be lost when the local user is muted. * */ sendStreamSyncInfo(data: ArrayBuffer, config: StreamSycnInfoConfig): number; /** {en} * @detail api * @region video management * @author zhangzhenyu.samuel * @brief Detect the currently used camera (front/postcondition), whether flash is supported. * @return * - true: Support * - false: Not supported * @note You must have called {@link startVideoCapture startVideoCapture} for video capture using the SDK internal capture module to detect flash capability. * */ isCameraTorchSupported(): boolean; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Detect whether the currently used camera (front/postcondition) supports zoom (digital/optical zoom). * @return * - true: Support * - false: Not supported * @note Camera zoom capability can only be detected if {@link startVideoCapture startVideoCapture} is used for video capture using the SDK internal capture module. * */ isCameraZoomSupported(): boolean; /** {en} * @detail api * @region video management * @author zhangzhenyu.samuel * @brief Change the optical zoom magnification. * @param zoom Zoom magnification of the currently used camera (front/postcondition). The value range is [1, < Maximum Zoom Multiplier >].
* The maximum zoom factor can be obtained by calling {@link getCameraZoomMaxRatio getCameraZoomMaxRatio}. * @return * - 0: Success. * - < 0: Failed. * @note * - The camera zoom factor can only be set when {@link startVideoCapture startVideoCapture} is called for video capture using the SDK internal capture module. * - The setting result fails after calling {@link stopVideoCapture stopVideoCapture} to turn off internal collection. * - Call {@link setVideoDigitalZoomConfig setVideoDigitalZoomConfig} to set digital zoom. Call {@link setVideoDigitalZoomControl setVideoDigitalZoomControl} to perform digital zoom. * */ setCameraZoomRatio(zoom: number): number; /** {en} * @detail api * @region Video management * @author zhangzhenyu.samuel * @brief Get the maximum zoom factor of the currently used camera (front/postcondition) * @return Maximum zoom factor * @note You must have called {@link startVideoCapture startVideoCapture} using the SDK internal capture module for video capture, the maximum zoom factor of the camera can be detected. * */ getCameraZoomMaxRatio(): number; /** {en} * @detail api * @region video management * @author zhangzhenyu.samuel * @brief Turn on/off the flash state of the currently used camera (front/postcondition) * @param torchState Flash state. Refer to {@link TorchState TorchState} * @return * - 0: Success. * - < 0: Failed. * @note * - The flash can only be set if you have called {@link startVideoCapture startVideoCapture} for video capture using the SDK internal capture module. * - The setting result fails after calling {@link stopVideoCapture stopVideoCapture} to turn off internal collection. * */ setCameraTorch(torchState: TorchState): number; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Checks if manual focus is available for the currently used camera. * @return * - true: Available. * - false: Unavailable. * @note You must call {@link startVideoCapture startVideoCapture} to start SDK internal video capturing before calling this API. * */ isCameraFocusPositionSupported(): boolean; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Sets the manual focus position for the currently used camera. * @param x The x-coordinate of the focus point in range of [0, 1]. The upper-left corner of the canvas is set as the origin. * @param y The y-coordinate of the focus point in range of [0, 1]. The upper-left corner of the canvas is set as the origin. * @return * - 0: Success. * - < 0: Failure. * @note * - You must call {@link startVideoCapture startVideoCapture} to start SDK internal video capturing, and use SDK internal rendering before calling this API. * - The focus point setting will be canceled when you move the device. * - The camera focus point setting will be invalid after calling {@link stopVideoCapture stopVideoCapture} to stop internal capturing. * */ setCameraFocusPosition(x: number, y: number): number; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Checks if manual exposure setting is available for the currently used camera. * @return * - true: Available. * - false: Unavailable. * @note You must call {@link startVideoCapture startVideoCapture} to start SDK internal video capturing before calling this API. * */ isCameraExposurePositionSupported(): boolean; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Sets the manual exposure position for the currently used camera. * @param x The x-coordinate of the exposure point in range of [0, 1]. The upper-left corner of the canvas is set as the origin. * @param y The y-coordinate of the focus point in range of [0, 1]. The upper-left corner of the canvas is set as the origin. * @return * - 0: Success. * - < 0: Failure. * @note * - You must call {@link startVideoCapture startVideoCapture} to start SDK internal video capturing, and use SDK internal rendering before calling this API. * - The exposure point setting will be canceled when you move the device. * - The camera exposure point setting will be invalid after calling {@link stopVideoCapture stopVideoCapture} to stop internal capturing. * */ setCameraExposurePosition(x: number, y: number): number; /** {en} * @detail api * @region Video Management * @author zhangzhenyu.samuel * @brief Sets the exposure compensation for the currently used camera. * @param val Exposure compensation in range of [-1, 1]. Default to 0, which means no exposure compensation. * @return * - 0: Success. * - < 0: Failure. * @note * - You must call {@link startVideoCapture startVideoCapture} to start SDK internal video capturing, and use SDK internal rendering before calling this API. * - The camera exposure compensation setting will be invalid after calling {@link stopVideoCapture stopVideoCapture} to stop internal capturing. * */ setCameraExposureCompensation(val: number): number; /** {en} * @detail api * @valid since 353 * @author yinkaisheng * @brief Enable or disable face auto exposure mode during internal video capture. This mode fixes the problem that the face is too dark under strong backlight; but it will also cause the problem of too bright/too dark in the area outside the ROI region. * @param enable Whether to enable the mode. True by default. * @return * - 0: Success. * - < 0: Failure. * @note You must call this API before calling {@link startVideoCapture startVideoCapture} to enable internal capture to make the setting valid. * */ enableCameraAutoExposureFaceMode(enable: boolean): number; /** {en} * @hidden(macOS) * @detail api * @valid since 353 * @brief Set the minimum frame rate of of the dynamic framerate mode during internal video capture. * @param framerate The minimum value in fps. The default value is 7.
* The maximum value of the dynamic framerate mode is set by calling {@link setVideoCaptureConfig setVideoCaptureConfig}. When minimum value exceeds the maximum value, the frame rate is set to a fixed value as the maximum value; otherwise, dynamic framerate mode is enabled. * @return * - 0: Success. * - !0: Failure. * @note * - You must call this API before calling {@link startVideoCapture startVideoCapture} to enable internal capture to make the setting valid. * - If the maximum frame rate changes due to performance degradation, static adaptation, etc., the set minimum frame rate value will be re-compared with the new maximum value. Changes in comparison results may cause switch between fixed and dynamic frame rate modes. * */ setCameraAdaptiveMinimumFrameRate(framerate: number): number; /** {en} * @author qipengxiang * @hidden currently not available * @detail api * @brief Start publishing a public media stream.
* Users within the same `appID` can call {@link startPlayPublicStream startPlayPublicStream} to subscribe to the public stream regardless the user has joined which room or has not joined any room. * @param publicStreamId ID of the public stream * @param publicStream Properties of the public stream. Refer to {@link PublicStreaming PublicStreaming} for more details.
* A public stream can include a bundle of media streams and appears as the designated layout. * @return * - 0: Success. And you will be informed by {@link onPushPublicStreamResult onPushPublicStreamResult}. * - !0: Failure because of invalid parameter or empty parameters. * @note * - Call {@link updatePublicStreamParam updatePublicStreamParam} to update the properties of the public stream which is published by the same user. Calling this API with the same stream ID repeatedly by the same user can not update the existing public stream. * - If Users with different userID call this API with the same stream ID, the public stream will be updated with the parameters passed in the latest call. * - To publish multiple public streams, call this API with different stream ID respectively. * - To stop publishing the public stream, call {@link stopPushPublicStream stopPushPublicStream}. * - Please contact ts to enable this function before using it. * */ startPushPublicStream(publicStreamId: string, publicStream: PublicStreaming): number; /** {en} * @hidden currently not available. * @detail api * @brief Stop the public stream published by the current user.
* Refer to {@link startPushPublicStream startPushPublicStream} for details about starting publishing a public stream. * @param publicStreamId ID of the public stream
* The public stream must be published by the current user. * @return * - 0: Success * - !0: Failure * */ stopPushPublicStream(publicStreamId: string): number; /** {en} * @hidden currently not available. * @detail api * @brief Update the properties of the public stream published by the current user.
* Refer to {@link startPushPublicStream startPushPublicStream} for details about starting publishing a public stream. * @param publicStreamId ID of the public stream
* The stream to be updated must be published by the current user. * @param transcoding Properties of the public stream. Refer to {@link PublicStreaming PublicStreaming} for more details.current user. * @return * - 0: Success * - !0: Failure * @note Make sure the public stream has started successfully via {@link onPushPublicStreamResult onPushPublicStreamResult} before calling this API. * */ updatePublicStreamParam(publicStreamId: string, transcoding: PublicStreaming): number; startPlayPublicStream(publicStreamId: string): number; /** {en} * @hidden currently not available. * @detail api * @brief Cancel subscribing the public stream.
* Call this method to cancel subscribing to the public stream by calling {@link startPlayPublicStream sstartPlayPublicStream}. * @param publicStreamId ID of the public stream * @return * - 0: Success * - !0: Failure * */ stopPlayPublicStream(publicStreamId: string): number; /** {en} * @hidden currently not available. * @detail api * @brief Assign a internal render view to the public stream * @param publicStreamId ID of the public stream * @param canvas Internal render view. Set to be a blank view if you want to unbind. Refer to {@link VideoCanvas VideoCanvas} for more details. * @return * - 0: Success * - !0: Failure * */ setPublicStreamVideoCanvas(publicStreamId: string, canvas: VideoCanvas): number; /** {en} * @detail api * @valid since 3.51 * @author qipengxiang * @brief Set the audio playback volume of the public stream. * @param publicStreamId ID of the public stream * @param volume Ratio(\%) of the audio playback volume to the original volume, in the range `[0, 400]`, with overflow protection. The default volume is 100.
* To ensure the audio quality, the recommended range is `[0, 100]`. * @return * - 0: Success. * - -2: Wrong parameter. * */ setPublicStreamAudioPlaybackVolume(publicStreamId: string, volume: number): number; /** {en} * @detail api * @region Network Management * @author qipengxiang * @brief Starts a call test.
* Before entering the room, you can call this API to test whether your local audio/video equipment as well as the upstream and downstream networks are working correctly.
* Once the test starts, SDK will record your sound or video. If you receive the playback within the delay range you set, the test is considered normal. * @param config Test configurations, see {@link EchoTestConfig EchoTestConfig}. * @param delayTime Delayed audio/video playback time specifying how long you expect to receive the playback after starting the. The range of the value is [2,10] in seconds and the default value is 2. * @return API call result:
* - 0: Success * - -1: Failure, testing in progress * - -2: Failure, you are in the room * - -3: Failure, neither video nor audio is captured * - -4: Failure, parameter exception * - -5: Failure, the roomID is already used * @note * - Once you start the test, you can either call {@link stopEchoTest stopEchoTest} or wait until the test stops automatically after 60s, to start the next test or enter the room. * - All APIs related to device control and stream control called before this API are invalidated during the test and are restored after the test. * - All APIs related to device control, stream control, and room entry called during the test do not take effect, and you will receive {@link onWarning onWarning} with the warning code `WARNING_CODE_IN_ECHO_TEST_MODE`. * - You will receive the test result from {@link onEchoTestResult onEchoTestResult}. * */ startEchoTest(config: EchoTestConfig, delayTime: number): number; /** {en} * @detail api * @region Network Management * @author qipengxiang * @brief Stop the current call test.
* After calling {@link startEchoTest startEchoTest}, you must call this API to stop the test. * @return API call result:
* - 0: Success * - -1: Failure, no test is in progress * @note After stopping the test with this API, all the system devices and streams are restored to the state they were in before the test. * */ stopEchoTest(): number; /** {en} * @detail api * @region Audio & Video Processing * @author zhushufan.ref * @brief Removes video watermark from designated video stream. * @param streamIndex Targeting stream index of the watermark. See [StreamIndex](70083#streamindex-2). * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * */ clearVideoWatermark(streamIndex: StreamIndex): number; /** {en} * @detail api * @region Audio & Video Transport * @author liuyangyang * @brief Set an alternative image when the local internal video capture is not enabled.
* When you call `stopVideoCapture`, an alternative image will be pushed. You can set the path to null or open the camera to stop publishing the image.
* You can repeatedly call this API to update the image. * @param filePath Set the path of the static image.
* You can use the absolute path (file://xxx) or the asset directory path (/assets/xx.png). The maximum size for the path is 512 bytes.
* You can upload a .JPG, .JPEG, .PNG, or .BMP file.
* When the aspect ratio of the image is inconsistent with the video encoder configuration, the image will be proportionally resized, with the remaining pixels rendered black. The framerate and the bitrate are consistent with the video encoder configuration. * @return * - 0: Success. * - -1: Failure. * @note * - The API is only effective when publishing an internally captured video. * - You cannot locally preview the image. * - You can call this API before and after joining an RTC room. In the multi-room mode, the image can be only displayed in the room you publish the stream. * - You cannot apply effects like filters and mirroring to the image, while you can watermark the image. * - The image is not effective for a screen-sharing stream. * - When you enable the simulcast mode, the image will be added to all video streams, and it will be proportionally scaled down to smaller encoding configurations. * */ setDummyCaptureImagePath(filePath: string): number; /** {en} * @detail api * @region cloud proxy * @author daining.nemo * @brief Start cloud proxy * @param cloudProxiesInfo cloud proxy informarion list. See {@link CloudProxyInfo CloudProxyInfo}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Call this API before joining the room. * - Start pre-call network detection after starting cloud proxy. * - After starting cloud proxy and connects the cloud proxy server successfully, receives {@link onCloudProxyConnected onCloudProxyConnected}. * - To stop cloud proxy, call {@link stopCloudProxy stopCloudProxy}. * */ startCloudProxy(cloudProxiesInfo: Array): number; /** {en} * @detail api * @region cloud proxy * @author daining.nemo * @brief Stop cloud proxy * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note To start cloud proxy, call {@link startCloudProxy startCloudProxy}. * */ stopCloudProxy(): number; /** {en} * @detail api * @author wangjunzheng * @brief Create a karaoke scoring management interface. * @return Karaoke scoring management interface. See {@link ISingScoringManager ISingScoringManager}. * */ getSingScoringManager(): ISingScoringManager; /** {en} * @detail api * @author songxiaomeng.19 * @brief Obtain the synchronization network time information. * @return See {@link NetworkTimeInfo NetworkTimeInfo}. * @note * - When you call this API for the first time, you starts synchornizing the network time information and receive the return value `0`. After the synchonization finishes, you will receive {@link onNetworkTimeSynchronized onNetworkTimeSynchronized}. After that, calling this API will get you the correct network time. * - Under chorus scenario, participants shall start audio mixing at the same network time. * */ getNetworkTimeInfo(): NetworkTimeInfo; /** {en} * @hidden currently not available * @detail api * @author lihuan.wuti2ha * @brief Creates the KTV manager interfaces. * @return KTV manager interfaces. See {@link IKTVManager IKTVManager}. * */ getKTVManager(): IKTVManager; /** {en} * @detail api * @region Audio Management * @author zhangcaining * @brief Start echo detection before joining a room. * @param testAudioFilePath Absolute path of the music file for the detection. It is expected to encode with UTF-8. The following files are supported: mp3, aac, m4a, 3gp, wav.
* We recommend to assign a music file whose duration is between 10 to 20 seconds.
* Do not pass a Silent file. * @return Method call result:
* - 0: Success. * - -1: Failure due to the onging process of the previous detection. Call {@link stopHardwareEchoDetection stopHardwareEchoDetection} to stop it before calling this API again. * - -2: Failure due to an invalid file path or file format. * @note * - You can use this feature only when {@link ChannelProfile ChannelProfile} is set to `CHANNEL_PROFIEL_MEETING` or `CHANNEL_PROFILE_MEETING_ROOM`. * - Before calling this API, ask the user for the permissions to access the local audio devices. * - Before calling this api, make sure the audio devices are activate and keep the capture volume and the playback volume within a reasonable range. * - The detection result is passed as the argument of onHardwareEchoDetectionResult. * - During the detection, the SDK is not able to response to the other testing APIs, such as {@link startEchoTest startEchoTest}, {@link startAudioDeviceRecordTest startAudioDeviceRecordTest} or {@link startAudioPlaybackDeviceTest startAudioPlaybackDeviceTest}. * - Call {@link stopHardwareEchoDetection stopHardwareEchoDetection} to stop the detection and release the audio devices. * */ startHardwareEchoDetection(testAudioFilePath: string): number; /** {en} * @detail api * @region Audio Management * @author zhangcaining * @brief Stop the echo detection before joining a room. * @return Method call result:
* - 0: Success. * - -1: Failure. * @note * - Refer to {@link startHardwareEchoDetection startHardwareEchoDetection} for information on how to start a echo detection. * - We recommend calling this API to stop the detection once getting the detection result from {@link onHardwareEchoDetectionResult onHardwareEchoDetectionResult}. * - You must stop the echo detection to release the audio devices before the user joins a room. Otherwise, the detection may interfere with the call. * */ stopHardwareEchoDetection(): number; /** {en} * @detail api * @brief Enable cellular network assisted communication to improve call quality. * @param config See {@link MediaTypeEnhancementConfig MediaTypeEnhancementConfig}. * @return Method call result:
* - 0: Success. * - -1: Failure, internal error. * - -2: Failure, invalid parameters. * @note The function is off by default. * */ setCellularEnhancement(config: MediaTypeEnhancementConfig): number; /** {en} * @platform android * @valid since 3.56 * @detail api * @region Video Facility Management * @author likai.666 * @brief Create a video Facility Management instance * @return Video Facility Management instance. See {@link IVideoDeviceManager IVideoDeviceManager} * */ android_getVideoDeviceManager(): $p_a.IVideoDeviceManager; /** {en} * @detail api * @brief Set the rotation of the video images captured from the local device.
* Call this API to rotate the videos when the camera is fixed upside down or tilted. For rotating videos on a phone, we recommend to use {@link setVideoRotationMode setVideoRotationMode}. * @param rotation It defaults to `VIDEO_ROTATION_0(0)`, which means not to rotate. Refer to {@link VideoRotation VideoRotation}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - For the videos captured by the internal module, the rotation will be combined with that set by calling {@link setVideoRotationMode setVideoRotationMode}. * - This API affects the external-sourced videos. The final rotation would be the original rotation angles adding up with the rotation set by calling this API. * - The elements added during the video pre-processing stage, such as video sticker and background applied using {@link enableVirtualBackground:withSource: enableVirtualBackground} will also be rotated by this API. * - The rotation would be applied to both locally rendered video s and those sent out. However, if you need to rotate a video which is intended for pushing to CDN individually, use {@link setVideoOrientation setVideoOrientation}. * */ setVideoCaptureRotation(rotation: VideoRotation): number; /** {en} * @platform ios * @hidden(macOS) * @detail api * @region Audio Management * @author dixing * @brief On iOS, you can change the Bluetooth profile when the media volume is set in all scenarios. * @param mode The Bluetooth profiles. See {@link ByteRTCBluetoothMode ByteRTCBluetoothMode}. * @return * - 0: Success. * - < 0 : Fail. See {@link ByteRTCReturnStatus ByteRTCReturnStatus} for more details * @note You will receive {@link rtcEngine:onAudioDeviceWarning:deviceType:deviceWarning rtcEngine:onAudioDeviceWarning:deviceType:deviceWarning:} in the following scenarios: 1) You cannot change the Bluetooth profile to HFP.;2) The media volume is not set in all scenarios. We suggest that you call {@link ByteRTCVideosetaudioscenario setAudioScenario:} to set the media volume scenario before calling this API. */ ios_setBluetoothMode(mode: $p_i.ByteRTCBluetoothMode): number; /** {en} * @platform ios * @detail api * @region Video Management * @author zhaomingliang * @brief Video publisher call this API to set the parameters of the maximum resolution video stream that is expected to be published, including resolution, frame rate, bitrate, and fallback strategy in poor network conditions.
* You can only set configuration for one stream with this API. If you want to set configuration for multiple streams, Call {@link setVideoEncoderConfig setVideoEncoderConfig:}. * @param encoderConfig The maximum video encoding parameter. See {@link ByteRTCVideoEncoderConfig ByteRTCVideoEncoderConfig}. * @return API call result:
* - 0: Success * - ! 0: Failure * @note * - You can use {@link enableSimulcastMode enableSimulcastMode:} simultaneously to publish streams with different resolutions. Specifically, if you want to publish multiple streams with different resolutions, you need to call this method and enable the simulcast mode with {@link enableSimulcastMode enableSimulcastMode:} before publishing your streams. The SDK will intelligently adjust the number of streams to be published (up to 4) and their parameters based on the settings of the subscribing end. The resolution set by calling this method will be the maximum resolution among the streams. For specific rules, please refer to [Simulcasting](https://docs.byteplus.com/en/byteplus-rtc/docs/70139). * - Without calling this API, SDK will only publish one stream for you with a resolution of 640px × 360px and a frame rate of 15fps. * - In custom capturing, you must call this API to set the encoding parameters to ensure the integrity of the picture received by remote users. * - This API is applicable to the video stream captured by the camera, see {@link setScreenVideoEncoderConfig setScreenVideoEncoderConfig:} for setting parameters for screen sharing video stream. */ ios_setMaxVideoEncoderConfig(encoderConfig: $p_i.ByteRTCVideoEncoderConfig): number; /** {en} * @platform ios * @detail api * @region encryption * @author wangjunlin.3182 * @brief Sets custom encryption and decryption methods. * @param handler Custom encryption handler, which needs to implement the encryption and decryption method. See {@link ByteRTCEncryptHandler ByteRTCEncryptHandler}. * @return * - 0: Success. * - < 0 : Fail. See {@link ByteRTCReturnStatus ByteRTCReturnStatus} for more details * @note * - The method and {@link setEncryptInfo:key setEncryptInfo:key:} are mutually exclusive relationships, that is, according to the call order, the last call method takes effect version. * - This method must be called before calling {@link joinRoom:userInfo:roomConfig joinRoom:userInfo:roomConfig:}, which can be called repeatedly, taking the last called parameter as the effective parameter. * - Whether encrypted or decrypted, the length of the modified data needs to be controlled under 180\%. That is, if the input data is 100 bytes, the processed data must be less than 180 bytes. If the encryption or decryption result exceeds the limit, the audio & video frame may be discarded. * - Data encryption/decryption is performed serially, so depending on the implementation The method may affect the final rendering efficiency. Whether to use this method needs to be carefully evaluated by the user. */ ios_setCustomizeEncryptHandler(handler: $p_i.id<$p_i.ByteRTCEncryptHandler>): number; /** {en} * @platform ios * @hidden(macOS) * @detail api * @region Screen Sharing * @author wangzhanqiang * @brief Set Extension configuration. It should be set before capturing screen internally. * @param groupId Your app and Extension should belong to the same App Group. You need to put in their Group ID here. * @return * - 0: Success. * - < 0 : Fail. See {@link ByteRTCReturnStatus ByteRTCReturnStatus} for more details * @note You must call this API immediately after calling {@link createRTCVideo:delegate:parameters createRTCVideo:delegate:parameters:}. You only need to call this API once in the life cycle of the engine instance. */ ios_setExtensionConfig(groupId: string): number; /** {en} * @platform ios * @hidden(macOS) * @detail api * @region Screen Sharing * @author wangzhanqiang * @brief Sends message to screen capture Extension * @param messsage Message sent to the Extension * @return * - 0: Success. * - < 0 : Fail. See {@link ByteRTCReturnStatus ByteRTCReturnStatus} for more details * @note * - Call this API after calling {@link startScreenCapture:bundleId startScreenCapture:bundleId:}. * - The extension will receive {@link onReceiveMessageFromApp onReceiveMessageFromApp:} when the message is sent. */ ios_sendScreenCaptureExtensionMessage(messsage: $p_i.NSData): number; } export declare class RTCRoom { protected _instance: any; /** {en} * @platform ios * @detail callback */ get ios_delegate(): $p_i.id<$p_i.ByteRTCRoomDelegate>; set ios_delegate(value: $p_i.id<$p_i.ByteRTCRoomDelegate>); protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @detail api * @region multi-room * @author shenpengliang * @brief Listens for event callbacks related to the {@link RTCRoom RTCRoom} instance by setting the event handler of this instance. * @param rtcRoomEventHandler Refer to {@link IRTCRoomEventHandler IRTCRoomEventHandler}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * */ setRTCRoomEventHandler(rtcRoomEventHandler: RTCRoomEventHandler): number; /** {en} * @detail api * @region Room Management * @author yejing.luna * @brief Subscribes to all remote media streams captured by camera/microphone. Or update the subscribe options of all subscribed user. * @param type Media stream type, used for specifying whether to subscribe to the audio stream or the video stream. See {@link MediaStreamType MediaStreamType}. * @return API call result:
* - 0: Success * - !0: Failure * @note * - If the subscription options conflict with the previous ones, they are subject to the configurations in the last call. * - With the Audio selection enabled, if the number of media streams exceeds the limit, we recommend you call {@link subscribeStream subscribeStream} to subscribe each target media stream other than calling this API. * - After calling this API, you will be informed of the calling result with {@link onStreamSubscribed onStreamSubscribed}. * - Once the local user subscribes to the stream of a remote user, the subscription to the remote user will sustain until the local user leaves the room or unsubscribe from it by calling {@link unsubscribeStream unsubscribeStream}. * - Any other exceptions will be included in {@link onStreamStateChanged onStreamStateChanged}, see {@link ErrorCode ErrorCode} for the reasons. * */ subscribeAllStreams(type: MediaStreamType): number; /** {en} * @detail api * @region Room Management * @author yejing.luna * @brief Unsubscribes from all remote media streams captured by camera/microphone.
* You can call this API to unsubscribe from streams that are subscribed to either automatically or manually. * @param type Media stream type, used for specifying whether to unsubscribe from the audio stream or the video stream. See {@link MediaStreamType MediaStreamType}. * @return API call result:
* - 0: Success * - !0: Failure * @note * - After calling this API, you will be informed of the calling result with {@link onStreamSubscribed onStreamSubscribed}. * - Any other exceptions will be included in {@link onStreamStateChanged onStreamStateChanged}, see {@link ErrorCode ErrorCode} for the reasons. * */ unsubscribeAllStreams(type: MediaStreamType): number; /** {en} * @detail api * @region multiple rooms * @author shenpengliang * @brief Leave and destroy the room instance created by calling {@link createRTCRoom createRTCRoom}. * */ destroy(): void; /** {en} * @detail api * @region Multiple rooms * @author shenpengliang * @brief Join the room.
* After creating a room by calling {@link createRTCRoom createRTCRoom}, call this API to join the room and make audio & video calls with other users in the room. * @param token Dynamic key. It is used for authentication and verification of users entering the room.
* You need to bring Token to enter the room. When testing, you can use the console to generate temporary tokens. The official launch requires the use of the key SDK to generate and issue tokens at your server level. See [Use Token to complete authentication](#70121) for token validity and generation method.
* Apps with different AppIDs are not interoperable.
* Make sure that the AppID used to generate the Token is the same as the AppID used to create the engine, otherwise it will cause the join room to fail. * @param userInfo User information. See {@link UserInfo UserInfo}. * @param roomConfig Room parameter configuration, set the room mode and whether to automatically publish or subscribe to the flow. See {@link RTCRoomConfig RTCRoomConfig} for the specific configuration mode. * @return * - 0: Success. * - -1: RoomID/userI nfo.uid contains invalid parameters. * - -2: Already in the room. After the interface call is successful, as long as the return value of 0 is received and {@link leaveRoom leaveRoom} is not called successfully, this return value is triggered when the room entry interface is called again, regardless of whether the filled room ID and user ID are duplicated. * The reason for the failure will be communicated via the {@link onRoomStateChanged onRoomStateChanged} callback. * @note * - In the same room with the same App ID, the user ID of each user must be unique. If two users have the same user ID, the user who entered the room later will kick the user who entered the room out of the room, and the user who entered the room will receive the {@link onRoomStateChanged onRoomStateChanged} callback notification. For the error type. See kErrorCodeDuplicateLogin in {@link ERROR_CODE_DUPLICATE_LOGIN ERROR_CODE_DUPLICATE_LOGIN}. * - Local users will receive {@link onRoomStateChanged onRoomStateChanged} callback notification after calling this method to join the room successfully. If the local user is also a visible user, the remote user will receive an {@link onUserJoined onUserJoined} callback notification when joining the room. * - By default, the user is visible in an RTC room. Joining fails when the number of users in an RTC room reaches the upper limit. To avoid this, call {@link setUserVisibility setUserVisibility} to change the visibility of the audience users to `false` by considering the capacity for the invisible users is much larger than that for visible users. An RTC room can accommodate a maximum of 50 visible users, and 30 media streams can be published simultaneously. For more information, see [Capability of Users and Streams](https://docs.byteplus.com/en/byteplus-rtc/docs/257549). * - After the user successfully joins the room, the SDK may lose connection to the server in case of poor local network conditions. At this point, {@link onConnectionStateChanged onConnectionStateChanged} callback will be triggered and the SDK will automatically retry until it successfully reconnects to the server. After successful reconnection, {@link onRoomStateChanged onRoomStateChanged} callback notification will be received locally. * */ joinRoom(token: string, userInfo: UserInfo, roomConfig: RTCRoomConfig): number; /** {en} * @detail api * @region Multiple rooms * @author shenpengliang * @brief Leave the room.
* The user calls this method to leave the room, end the call process, and release all call-related resources.
* This method is an asynchronous operation, and the call returns without actually exiting the room. After you actually exit the room, you will receive a callback notification from {@link onLeaveRoom onLeaveRoom} locally. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - After a user who calls {@link setUserVisibility setUserVisibility} to make himself visible leaves the room, other users in the room will receive a callback notification from {@link onUserLeave onUserLeave}. * - If the engine is destroyed immediately after this method is called, the SDK will not be able to trigger the {@link onLeaveRoom onLeaveRoom} callback. * */ leaveRoom(): number; /** {en} * @detail api * @region Room management * @author shenpengliang * @brief Set the visibility of the user in the room. The local user is visible to others by default before calling this API.
* An RTC room can accommodate a maximum of 50 visible users, and 30 media streams can be published simultaneously. For more information, see [Room Capacity](https://docs.byteplus.com/en/byteplus-rtc/docs/257549). * @param enable Visibility of the user in the room.
* - true: The user can publish media streams. The other users in the room get informed of the behaviors of the user, such as joining room, starting video capture, and leaving room. * - false: The user cannot publish media streams. The other users in the room do not get informed of the behaviors of the user, such as joining room, starting video capture, or leaving room. * @return * - 0: Success. * - < 0 : Failure. See {@link ReturnStatus ReturnStatus}. * @note * - You can call this API whether the user is in a room or not. * - You will receive {@link onUserVisibilityChanged onUserVisibilityChanged} after calling this API. (Available since v3.54) * • If you call this API before joining room, and the set value is different from the default value, you will receive the callback when you join the room.
* • If you call this API after joining room, and the current visibility is different from the previous value, you will receive the callback.
* • When reconnecting after losing internet connection, if the visibility changes, you will receive the callback.
* - When you call this API while you are in the room, the other users in the room will be informed via the following callback. * • When you switch from `false` to `true`, they will receive {@link onUserJoined onUserJoined}.
* • When you switch from `true` to `false`, they will receive {@link onUserLeave onUserLeave}.
* • The invisible user will receive the warning code, `WARNING_CODE_PUBLISH_STREAM_FORBIDEN`, when trying to publish media streams. * */ setUserVisibility(enable: boolean): number; /** {en} * @detail api * @author shenpengliang * @brief Update Token.
* You must call this API to update token to ensure the RTC call to continue when you receive {@link onTokenWillExpire onTokenWillExpire}, {@link onPublishPrivilegeTokenWillExpire onPublishPrivilegeTokenWillExpire}, or {@link onSubscribePrivilegeTokenWillExpire onSubscribePrivilegeTokenWillExpire}. * @param token Valid token.
* If the Token is invalid, you will receive {@link onRoomStateChanged onRoomStateChanged} with the error code of `-1010`. * @return API call result:
* - 0: Success. * - <0: Failure. See {@link ReturnStatus ReturnStatus} for specific reasons. * @note * - In versions before 3.50, the publish and subscribe privileges contained in the Token are reserved parameters with no actual control permissions. In version 3.50 and later, the publish/subscribe privileges will be effective for whitelisted users. Please contact the technical support team to include you in the whitelist if you need this function. * - Do not call both {@link updateToken updateToken} and {@link joinRoom joinRoom} to update the Token. If you fail to join the room or have been removed from the room due to an expired or invalid Token, call {@link joinRoom joinRoom} with a valid token to rejoin the room. * */ updateToken(token: string): number; /** {en} * @detail api * @region Multi-room * @author hanchenchen.c * @brief Send a point-to-point text message to the specified user in the room * @param userId User ID of the message receiver * @param messageStr Text message content sent. Message does not exceed 64 KB. * @param config See {@link MessageConfig MessageConfig}. * @return * - > 0: Sent successfully, return the number of the sent message, increment from 1 * - -1: Sent failed, RTCRoom instance not created * - -2: Sent failed, uid is empty * @note * - Before sending an in-room text message, you must call {@link joinRoom joinRoom} to join the room. * - After the call, you will receive an {@link onUserMessageSendResult onUserMessageSendResult} callback to notify that the message was sent successfully or failed; * - If the message was sent successfully, the user specified by the userId will receive an {@link onUserMessageReceived onUserMessageReceived} callback. * */ sendUserMessage(userId: string, messageStr: string, config: MessageConfig): number; /** {en} * @detail api * @region multiroom * @author hanchenchen.c * @brief Sends a point-to-point binary message to the specified user in the room. * @param userId Message Receives user ID * @param buffer Binary message content sent. Message does not exceed 46KB. * @param config See {@link MessageConfig MessageConfig} * @note * - Before sending in-room binary messages, you must call {@link joinRoom joinRoom} to join the room. * - After the call, you will receive an {@link onUserMessageSendResult onUserMessageSendResult} callback to notify that the message was sent successfully or failed; * - If the message was sent successfully, the user specified by the userId will receive an {@link onUserBinaryMessageReceived onUserBinaryMessageReceived} callback. * */ sendUserBinaryMessage(userId: string, buffer: ArrayBuffer, config: MessageConfig): number; /** {en} * @detail api * @region Multi-room * @author hanchenchen.c * @brief Mass text messages to all other users in the room. * @param messageStr The content of the text message sent. The message does not exceed 64 KB. * @note * - Before broadcasting a text message in the room, you must call {@link joinRoom joinRoom} to join the room. * - After the call, you will receive the {@link onRoomMessageSendResult onRoomMessageSendResult} callback; * - Other users in the same room will receive the {@link onRoomMessageReceived onRoomMessageReceived} callback. * */ sendRoomMessage(messageStr: string): number; /** {en} * @detail api * @region Multi-room * @author hanchenchen.c * @brief Group binary messages to all other users in the room. * @param buffer The content of the binary message sent. The message does not exceed 46KB. * @note * - Before broadcasting binary messages in the room, you must call {@link joinRoom joinRoom} to join the room. * - After the call, you will receive the {@link onRoomMessageSendResult onRoomMessageSendResult} callback; * - Other users in the same room will receive the {@link onRoomBinaryMessageReceived onRoomBinaryMessageReceived} callback. * */ sendRoomBinaryMessage(buffer: ArrayBuffer): number; /** {en} * @detail api * @region Multi-room * @author wangzhanqiang * @brief Synchronizes published audio and video.
* When the same user simultaneously uses separate devices to capture and publish audio and video, there is a possibility that the streams are out of sync due to the network disparity. In this case, you can call this API on the video publisher side and the SDK will automatically line the video stream up according to the timestamp of the audio stream, ensuring that the audio the receiver hears corresponds to the video the receiver watches. * @param audioUserId The ID of audio publisher. You can stop the current A/V synchronization by setting this parameter to null. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - You can call this API anytime before or after entering the room. * - The source user IDs of the audio and video stream to be synchronized must be in the same RTC room. * - When the A/V synchronization state changes, you will receive {@link onAVSyncStateChange onAVSyncStateChange}. * - More than one pair of audio and video can be synchronized simultaneously in the same RTC room, but you should note that one single audio source cannot be synchronized with multiple video sources at the same time. * - If you want to change the audio source, call this API again with a new `audioUserId`. If you want to change the video source, you need to stop the current synchronization first, then call this API on the new video publisher side. * */ setMultiDeviceAVSync(audioUserId: string): number; /** {en} * @detail api * @region Video Management * @author zhaomingliang * @brief Sets your expected configuration of the remote video stream that you want to subscribe to or have subscribed to. * @param userId ID of the remote video stream publisher you expect to configure subscription parameters for. * @param remoteVideoConfig The parameters you expect to configure for the remote video stream, see {@link RemoteVideoConfig RemoteVideoConfig}. * @return API call result:
* - 0: Success. * - <0: Failure. See {@link ReturnStatus ReturnStatus} for specific reasons. * @note * - This API only works after the publisher calls {@link enableSimulcastMode enableSimulcastMode} to enable publishing multiple video streams, in which case the subscriber will receive the stream from the publisher that is closest to the set configuration; otherwise the subscriber will only receive one video stream with a resolution of 640px × 360px and a frame rate of 15fps. * - If you don't call this API after the publisher enables the function of publishing multiple streams, you will receive by default the video stream with the largest resolution set by the publisher. * - You should call this API in the room. If you want to call it before entering the room, you should set the `remoteVideoConfig` in the roomConfig` when calling {@link joinRoom joinRoom}. * - SDK will automatically select the stream to be published or subcribed based on the settings of both sides see for details. * */ setRemoteVideoConfig(userId: string, remoteVideoConfig: RemoteVideoConfig): number; /** {en} * @detail api * @valid since 3.60. * @brief Start or stop publishing video streams captured by camera in the current room. * @param publish Whether to publish the media stream. * @return * - 0: Success. * - < 0 : Fail. See ReturnStatus{@link #ReturnStatus} for more details * @note * - You don't need to call this API if you set it to Auto-publish when calling joinRoom{@link #RTCRoom#joinRoom}. * - An invisible user cannot publish media streams. Call setUserVisibility{@link #RTCRoom#setUserVisibility} to change your visibility in the room. * - Call publishScreenAudio{@link #RTCRoom#publishScreenAudio} (not supported on Linux) and/or publishScreenVideo{@link #RTCRoom#publishScreenVideo} to start or stop screen sharing. * - Call publishStreamAudio{@link #RTCRoom#publishStreamAudio} to start or stop publishing the audio stream captured by the microphone. * - Call startForwardStreamToRooms{@link #RTCRoom#startForwardStreamToRooms} to forward the published streams to the other rooms. * - After you call this API, the other users in the room will receive onUserPublishStreamVideo{@link #IRTCRoomEventHandler#onUserPublishStreamVideo}. Those who successfully received your streams will receive onFirstRemoteVideoFrameDecoded{@link #IRTCEngineEventHandler#onFirstRemoteVideoFrameDecoded} at the same time. * */ publishStreamVideo(publish: boolean): number; /** {en} * @detail api * @valid since 3.60. * @brief Start or stop publishing media streams captured by the local microphone in the current room. * @param publish Whether to publish the media stream. * @return * - 0: Success. * - < 0 : Fail. See ReturnStatus{@link #ReturnStatus} for more details * @note * - You don't need to call this API if you set it to Auto-publish when calling joinRoom{@link #RTCRoom#joinRoom}. * - An invisible user cannot publish media streams. Call setUserVisibility{@link #RTCRoom#setUserVisibility} to change your visibility in the room. * - Call publishScreenAudio{@link #RTCRoom#publishScreenAudio} (not supported on Linux) and/or publishScreenVideo{@link #RTCRoom#publishScreenVideo} to start or stop screen sharing. * - Call publishStreamVideo{@link #RTCRoom#publishStreamVideo} to start or stop publishing the video stream captured by the camera. * - Call startForwardStreamToRooms{@link #RTCRoom#startForwardStreamToRooms} to forward the published streams to the other rooms. * - After you call this API, the other users in the room will receive onUserPublishStreamAudio{@link #IRTCRoomEventHandler#onUserPublishStreamAudio}. Those who successfully received your streams will receive onFirstRemoteAudioFrame{@link #IRTCEngineEventHandler#onFirstRemoteAudioFrame} at the same time. * */ publishStreamAudio(publish: boolean): number; /** {en} * @detail api * @valid since 3.60. * @brief Start or stop sharing the local screen in the room. * If you need to share your screen in multiple rooms, you can use the same uid to join multiple rooms and call this API in each room. Also, you can publish different types of screen-sharing streams in different rooms. * @param publish Whether to publish video stream. * @return * - 0: Success. * - < 0 : Fail. See ReturnStatus{@link #ReturnStatus} for more details * @note * - You need to call this API to publish screen even if you set it to Auto-publish when calling joinRoom{@link #RTCRoom#joinRoom}. * - An invisible user cannot publish media streams. Call setUserVisibility{@link #RTCRoom#setUserVisibility} to change your visibility in the room. * - Call publishScreenAudio{@link #RTCRoom#publishScreenAudio} (not supported on Linux) to start or stop sharing computer audio. * - Call publishStreamVideo{@link #RTCRoom#publishStreamVideo} to start or stop publishing the video stream captured by the camera. * - Call publishStreamAudio{@link #RTCRoom#publishStreamAudio} to start or stop publishing the audio stream captured by the microphone. * - Call startForwardStreamToRooms{@link #RTCRoom#startForwardStreamToRooms} to forward the published streams to the other rooms. * - After you called this API, the other users in the room will receive onUserPublishScreenVideo{@link #IRTCRoomEventHandler#onUserPublishScreenVideo}. Those who successfully received your streams will receive onFirstRemoteVideoFrameDecoded{@link #IRTCEngineEventHandler#onFirstRemoteVideoFrameDecoded} at the same time. * - After calling this API, you'll receive onScreenVideoFrameSendStateChanged{@link #IRTCEngineEventHandler#onScreenVideoFrameSendStateChanged}. * - Refer to [Sharing Screen in PC](https://docs.byteplus.com/byteplus-rtc/docs/70144) for more information. * */ publishScreenVideo(publish: boolean): number; /** {en} * @hidden(Linux) * @detail api * @valid since 3.60. * @brief Manually publishes local screen-sharing streams in the current room.
* If you need to share your screen in multiple rooms, you can use the same uid to join multiple rooms and call this API in each room. Also, you can publish different types of screen-sharing streams in different rooms. * @param publish Media stream type, used for specifying whether to publish audio stream or video stream. * @return * - 0: Success. * - < 0 : Fail. See ReturnStatus{@link #ReturnStatus} for more details * @note * - You need to call this API to publish screen even if you set it to Auto-publish when calling joinRoom{@link #RTCRoom#joinRoom}. * - An invisible user cannot publish media streams. Call setUserVisibility{@link #RTCRoom#setUserVisibility} to change your visibility in the room. * - Call publishScreenVideo{@link #RTCRoom#publishScreenVideo} to start or stop sharing the local screen. * - Call publishStreamVideo{@link #RTCRoom#publishStreamVideo} to start or stop publishing the video stream captured by the camera. * - Call publishStreamAudio{@link #RTCRoom#publishStreamAudio} to start or stop publishing the audio stream captured by the microphone. * - Call startForwardStreamToRooms{@link #RTCRoom#startForwardStreamToRooms} to forward the published streams to the other rooms. * - After you called this API, the other users in the room will receive onUserPublishScreenAudio{@link #IRTCRoomEventHandler#onUserPublishScreenAudio}. Those who successfully received your streams will receive onFirstRemoteAudioFrame{@link #IRTCEngineEventHandler#onFirstRemoteAudioFrame} at the same time. * - Refer to [Sharing Screen in PC](https://docs.byteplus.com/byteplus-rtc/docs/70144) for more information. * */ publishScreenAudio(publish: boolean): number; /** {en} * @detail api * @valid since 3.60. * @brief Subscribes to specific remote media streams captured by the local camera. Or update the subscribe options of the subscribed user. * @param userId The ID of the remote user who published the target video stream. * @param subscribe Whether to subscribe to the stream. * @return API call result:
* - 0: Success. * - <0: Failure. See ReturnStatus{@link #ReturnStatus} for specific reasons. * @note * - Calling this API to update the subscribe configuration when the user has subscribed the remote user either by calling this API or by auto-subscribe. * - You must first get the remote stream information through onUserPublishStreamVideo{@link #IRTCRoomEventHandler#onUserPublishStreamVideo} before calling this API to subscribe to streams accordingly. * - After calling this API, you will be informed of the calling result with onVideoSubscribeStateChanged{@link #IRTCRoomEventHandler#onVideoSubscribeStateChanged}. * - Once the local user subscribes to the stream of a remote user, the subscription to the remote user will sustain until the local user leaves the room or unsubscribe from it by calling subscribeStreamVideo{@link #RTCRoom#subscribeStreamVideo}. * - Any other exceptions will be included in onVideoSubscribeStateChanged{@link #IRTCRoomEventHandler#onVideoSubscribeStateChanged}, see SubscribeStateChangeReason{@link #SubscribeStateChangeReason} for the reasons. * */ subscribeStreamVideo(userId: string, subscribe: boolean): number; /** {en} * @detail api * @valid since 3.60. * @brief Subscribes to specific remote media streams captured by the local microphone. Or update the subscribe options of the subscribed user. * @param userId The ID of the remote user who published the target media stream. * @param subscribe Whether to subscribe to the audio stream. * @return API call result:
* - 0: Success. * - <0: Failure. See ReturnStatus{@link #ReturnStatus} for specific reasons. * @note * - Calling this API to update the subscribe configuration when the user has subscribed the remote user either by calling this API or by auto-subscribe. * - You must first get the remote stream information through onUserPublishStreamAudio{@link #IRTCRoomEventHandler#onUserPublishStreamAudio} before calling this API to subscribe to streams accordingly. * - After calling this API, you will be informed of the calling result with onAudioSubscribeStateChanged{@link #IRTCRoomEventHandler#onAudioSubscribeStateChanged}. * - Once the local user subscribes to the stream of a remote user, the subscription to the remote user will sustain until the local user leaves the room or unsubscribe from it by calling subscribeStreamAudio{@link #RTCRoom#subscribeStreamAudio}. * - Any other exceptions will be included in onAudioSubscribeStateChanged{@link #IRTCRoomEventHandler#onAudioSubscribeStateChanged}, see ErrorCode{@link #ErrorCode} for the reasons. * */ subscribeStreamAudio(userId: string, subscribe: boolean): number; /** {en} * @detail api * @valid since 3.60. * @brief Subscribes to specific screen sharing media stream. Or update the subscribe options of the subscribed user. * @param userId The ID of the remote user who published the target screen video stream. * @param subscribe Whether to subscribe to the screen video stream. * @return API call result:
* - 0: Success. * - <0: Failure. See ReturnStatus{@link #ReturnStatus} for specific reasons. * @note * - Calling this API to update the subscribe configuration when the user has subscribed the remote user either by calling this API or by auto-subscribe. * - You must first get the remote stream information through onUserPublishScreenVideo{@link #IRTCRoomEventHandler#onUserPublishScreenVideo} before calling this API to subscribe to streams accordingly. * - After calling this API, you will be informed of the calling result with onScreenVideoSubscribeStateChanged{@link #IRTCRoomEventHandler#onScreenVideoSubscribeStateChanged}. * - Once the local user subscribes to the stream of a remote user, the subscription to the remote user will sustain until the local user leaves the room or unsubscribe from it by calling subscribeScreenVideo{@link #RTCRoom#subscribeScreenVideo}. * - Any other exceptions will be included in onScreenVideoSubscribeStateChanged{@link #IRTCRoomEventHandler#onScreenVideoSubscribeStateChanged}, see SubscribeStateChangeReason{@link #SubscribeStateChangeReason} for the reasons. * */ subscribeScreenVideo(userId: string, subscribe: boolean): number; /** {en} * @detail api * @valid since 3.60. * @brief Subscribes to specific screen sharing media stream. Or update the subscribe options of the subscribed user. * @param userId The ID of the remote user who published the target screen audio stream. * @param subscribe Whether to subscribe to the screen audio stream. * @return API call result:
* - 0: Success. * - <0: Failure. See ReturnStatus{@link #ReturnStatus} for specific reasons. * @note * - Calling this API to update the subscribe configuration when the user has subscribed the remote user either by calling this API or by auto-subscribe. * - You must first get the remote stream information through onUserPublishScreenAudio{@link #IRTCRoomEventHandler#onUserPublishScreenAudio} before calling this API to subscribe to streams accordingly. * - After calling this API, you will be informed of the calling result with onScreenAudioSubscribeStateChanged{@link #IRTCRoomEventHandler#onScreenAudioSubscribeStateChanged}. * - Once the local user subscribes to the stream of a remote user, the subscription to the remote user will sustain until the local user leaves the room or unsubscribe from it by calling subscribeScreenAudio{@link #RTCRoom#subscribeScreenAudio}. * - Any other exceptions will be included in onScreenAudioSubscribeStateChanged{@link #IRTCRoomEventHandler#onScreenAudioSubscribeStateChanged}, see SubscribeStateChangeReason{@link #SubscribeStateChangeReason} for the reasons. * */ subscribeScreenAudio(userId: string, subscribe: boolean): number; /** {en} * @detail api * @region Multi-room * @author shenpengliang * @brief Pause receiving remote media streams. * @param mediaType Media stream type subscribed to. Refer to {@link PauseResumeControlMediaType PauseResumeControlMediaType} for more details. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Calling this API does not change the capture state and the transmission state of the remote clients. * - Calling this API does not cancel the subscription or change any subscription configuration. * - To resume, call {@link resumeAllSubscribedStream resumeAllSubscribedStream}. * - In a multi-room scenario, this API only pauses the reception of streams published in the current room. * */ pauseAllSubscribedStream(mediaType: PauseResumeControlMediaType): number; /** {en} * @detail api * @region Multi-room * @author shenpengliang * @brief Resume receiving remote media streams * @param mediaType Media stream type subscribed to. Refer to {@link PauseResumeControlMediaType PauseResumeControlMediaType} for more details. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Calling this API does not change the capture state and the transmission state of the remote clients. * - Calling this API does not change any subscription configuration. * */ resumeAllSubscribedStream(mediaType: PauseResumeControlMediaType): number; startForwardStreamToRooms(forwardStreamInfos: Array): number; updateForwardStreamToRooms(forwardStreamInfos: Array): number; /** {en} * @detail api * @region Multi-room * @author shenpengliang * @brief Call to this method to stop relaying media stream to all rooms after calling {@link startForwardStreamToRooms startForwardStreamToRooms}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note * - Call this method will trigger {@link onForwardStreamStateChanged onForwardStreamStateChanged}. * - The other users in the room will receive callback of {@link onUserJoined onUserJoined} and {@link onUserPublishStream onUserPublishStream}{@link onUserPublishScreen /onUserPublishScreen} when you stop relaying. * - To stop relaying media stream to specific rooms, call {@link updateForwardStreamToRooms updateForwardStreamToRooms} instead. * - To resume the relaying in a short time, call {@link pauseForwardStreamToAllRooms pauseForwardStreamToAllRooms} instead and then call {@link resumeForwardStreamToAllRooms resumeForwardStreamToAllRooms} to recsume after that. * */ stopForwardStreamToRooms(): number; /** {en} * @detail api * @region Multi-room * @author shenpengliang * @brief Call this method to pause relaying media stream to all rooms after calling {@link startForwardStreamToRooms startForwardStreamToRooms}.
* After that, call {@link resumeForwardStreamToAllRooms resumeForwardStreamToAllRooms} to resume. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note The other users in the room will receive callback of {@link onUserUnpublishStream onUserUnpublishStream}{@link onUserUnpublishScreen /onUserUnpublishScreen} and {@link onUserLeave onUserLeave} when you pause relaying. * */ pauseForwardStreamToAllRooms(): number; /** {en} * @detail api * @region Multi-room * @author shenpengliang * @brief Call this method to resume relaying to all rooms from the pause by calling {@link pauseForwardStreamToAllRooms pauseForwardStreamToAllRooms}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note The other users in the room will receive callback of {@link onUserJoined onUserJoined} and {@link onUserPublishStream onUserPublishStream}{@link onUserPublishScreen /onUserPublishScreen} when you resume relaying. * */ resumeForwardStreamToAllRooms(): number; /** {en} * @detail api * @region Range Audio * @author luomingkang * @brief Gets range audio instance. * @return API call result:
* - IRangeAudio: Success. You will get an {@link IRangeAudio IRangeAudio} returned from the SDK. * - null: Failure. The current SDK does not offer range audio function. * @note The first time this API is called must be between you create a room and you actually enter the room. Refer to [Range Voice](https://docs.byteplus.com/byteplus-rtc/docs/114727) for more information. * */ getRangeAudio(): IRangeAudio; /** {en} * @detail api * @region Spatial voice * @author wangjunzheng * @brief Get location audio interface instances. * @return Spatial audio management interface instance. If NULL is returned, spatial audio is not supported. See {@link ISpatialAudio ISpatialAudio}. * @note * - The first time this API is called must be between you create a room and you actually enter the room. Refer to [Spatial Audio](https://docs.byteplus.com/byteplus-rtc/docs/93903) for more information. * - The spatial audio effect can only be turned on when using a device that supports true two-channel playback; * - In the case of poor network conditions, even if this function is turned on, the spatial audio effect will not be generated; * - Insufficient performance of the model may cause the audio card. When using a low-end machine, it is not recommended to turn on the spatial audio effect; * - Spatial audio effects do not take effect when server level routing is enabled. * */ getSpatialAudio(): ISpatialAudio; /** {en} * @detail api * @region multi-room * @author zhangcaining * @brief Adjusts the audio playback volume from all the remote users in a room. * @param volume Ratio(\%) of playback volume to original volume, in the range [0, 400], with overflow protection.
* To ensure the audio quality, we recommend setting the volume to `100`.
* - 0: mute * - 100: original volume. Default value. * - 400: Up to 4 times the original volume (with overflow protection) * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details * @note Suppose a remote user A is always within the range of the target user whose playback volume will be adjusted,
* - If you use both this method and {@link setRemoteAudioPlaybackVolume setRemoteAudioPlaybackVolume}, the volume that the local user hears from user A is the volume set by the method called later. * - If you use both this method and {@link setPlaybackVolume setPlaybackVolume}, the volume that the local user hears from user A is the overlay of both settings. * */ setRemoteRoomAudioPlaybackVolume(volume: number): number; /** {en} * @valid since 3.52 * @detail api * @region Room Management * @author yejing.luna * @brief Set the priority of the local audio stream to be published. * @param audioSelectionPriority The priority of the local audio stream which defaults to be subscribable only up to the result of the Audio Selection. Refer to {@link AudioSelectionPriority AudioSelectionPriority}. * @note You must enable Audio Selection in the RTC console before using this API. You can call this API whether the user has joined a room. Refer to [Audio Selection](https://docs.byteplus.com/byteplus-rtc/docs/113547).
* The setting is independent in each room that the user joins. * */ setAudioSelectionConfig(audioSelectionPriority: AudioSelectionPriority): number; /** {en} * @valid since 3.52 * @detail api * @region Room Management * @author lichangfeng.rtc * @brief Sets extra information about the room the local user joins. * @param key Key of the extra information, less than 10 bytes in length.
* A maximum of 5 keys can exist in the same room, beyond which the first key will be replaced. * @param value Content of the extra information, less than 128 bytes in length. * @return API call result:
* - 0: Success with a taskId returned. * - <0: Failure. See {@link SetRoomExtraInfoResult SetRoomExtraInfoResult} for the reasons. * @note * - Call {@link joinRoom joinRoom} first before you call this API to set extra information. * - After calling this API, you will receive {@link onSetRoomExtraInfoResult onSetRoomExtraInfoResult} callback informing you the result of the setting. * - After the extra information is successfully set, other users in the same room will receive the information through {@link onRoomExtraInfoUpdate onRoomExtraInfoUpdate} callback. * - Users who join the room later will be notified of all extra information in the room set prior to entering. * */ setRoomExtraInfo(key: string, value: string): number; /** {en} * @hidden currently not available * @detail api * @region Subtitle translation service * @author qiaoxingwang * @brief Recognizes or translates the speech of all speakers in the room and converts the results into captions.
* When calling this method, you can choose the subtitle mode in {@link SubtitleMode SubtitleMode}. If you choose the recognition mode, you will receive the {@link onSubtitleMessageReceived onSubtitleMessageReceived} callback which contains the transcribed text.
* If you choose the translation mode, you will receive two {@link onSubtitleMessageReceived onSubtitleMessageReceived} callbacks simultaneously, one contains the transcribed text and the other contains the translated text.
* After calling this method, you will receive the {@link onSubtitleStateChanged onSubtitleStateChanged} to inform you of whether subtitles are on. * @param subtitleConfig Subtitle configurations. Refer to {@link SubtitleConfig SubtitleConfig} for details. * @return * - 0: Success. * - !0: Failure. * @note * - Call this method after joining the room. * - You can set your source language to Chinese by calling `joinRoom` and importing a json formatted string `"source_language": "zh"` through the parameter of extraInfo, to English by importing `"source_language": "en"` , and to Japanese by importing `"source_language": "ja"` . If you don't set the source language, SDK will set the language of your system as the source language. If the language of your system is not Chinese, English or Japanese, SDK will set Chinese as the source language. * */ startSubtitle(subtitleConfig: SubtitleConfig): number; /** {en} * @hidden currently not available * @detail api * @region Subtitle translation service * @author qiaoxingwang * @brief Turns off subtitles.
* After calling this method, you will receive the {@link onSubtitleStateChanged onSubtitleStateChanged} to inform you of whether subtitles are off. * @return * - 0: Success. * - !0: Failure. * */ stopSubtitle(): number; /** {en} * @valid since 3.53 * @detail api * @author gechangwu * @brief Get room ID. * @return Room ID. * */ getRoomId(): string; } export declare class ISingScoringManager { protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @detail api * @author wangjunzheng * @brief Set the configuration of karaoke scoring. * @param config The parameters of karaoke scoring. See {@link SingScoringConfig SingScoringConfig}. * @return * - 0:Success. * - -1:Interface call failed. * - -2: Karaoke scoring module not integrated. * - >0: Other errors. For details, see[Error code]. * */ setSingScoringConfig(config: SingScoringConfig): number; /** {en} * @detail api * @author wangjunzheng * @brief Start karaoke scoring. * @param position You can get the playback position where you start karaoke scoring. Unit: ms. * @param scoringInfoInterval Time interval between two real-time callbacks. Unit: ms; Default interval: 50 ms. Minimum interval: 20 ms. * @return * - 0:Success. * - -1:Interface call failed. * - -2: Karaoke scoring module not integrated. * - >0: Other error. For details, see[Error code]. * @note * - You can call this API after calling {@link initSingScoring initSingScoring} to initialize karaoke scoring. * - After this interface is called, you will receive the scoring result {@link onCurrentScoringInfo onCurrentScoringInfo} at set interval. * - If you call the {@link startAudioMixing startAudioMixing} to play an audio file, call this interface after you receive {@link onAudioMixingStateChanged onAudioMixingStateChanged}(AUDIO_MIXING_STATE_PLAYING(1)). * */ startSingScoring(position: number, scoringInfoInterval: number): number; /** {en} * @detail api * @author wangjunzheng * @brief Stop karaoke scoring. * @return * - 0:Success. * - <0:Failure. * */ stopSingScoring(): number; /** {en} * @detail api * @author wangjunzheng * @brief Get the score for the previous lyric. You can call this API after {@link startSingScoring startSingScoring} is called. * @return * - <0:Failed to get the score for the previous lyric. * - >=0:The score for the previous lyric. * */ getLastSentenceScore(): number; /** {en} * @detail api * @author wangjunzheng * @brief Get the total score for the user's current performance.You can call this API after {@link startSingScoring startSingScoring} is called. * @return * - <0:Failed to get the total score. * - >=0:The current total score. * */ getTotalScore(): number; /** {en} * @detail api * @author wangjunzheng * @brief Get the average score for the user's current performance. * @return * - <0:Failed to get the average score. * - >=0:The average score. * */ getAverageScore(): number; } export declare class IKTVManager { protected _instance: any; /** * @platform ios */ /** * @platform ios */ get ios_delegate(): $p_i.id<$p_i.ByteRTCKTVManagerDelegate>; set ios_delegate(value: $p_i.id<$p_i.ByteRTCKTVManagerDelegate>); protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @platform android * @detail api * @author lihuan.wuti2ha * @brief Sets the KTV event handler. * @param ktvManagerEventHandler KTV event handler. See {@link IKTVManagerEventHandler IKTVManagerEventHandler}. * */ android_setKTVManagerEventHandler(ktvManagerEventHandler: $p_a.IKTVManagerEventHandler): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Sets the maximum cache for storing music files. * @param maxCacheSizeMB The maximum cache to be set in MB.
* If the setting value is less than or equal to 0, it will be adjusted to 1,024 MB. * */ setMaxCacheSize(maxCacheSizeMB: number): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Gets music detail. * @param musicId Music ID. * @note After calling this API, you will receive the music detail through {@link onMusicDetailResult onMusicDetailResult} callback. * */ getMusicDetail(musicId: string): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Download music. * @param musicId Music ID. * @return Download task ID. * @note * - If the music is successfully downloaded, you will receive {@link onDownloadSuccess onDownloadSuccess}. * - If the music fails to download, you will receive {@link onDownloadFailed onDownloadFailed}. * - When the music download progress is updated, you will receive {@link onDownloadMusicProgress onDownloadMusicProgress}. * */ downloadMusic(musicId: string): number; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Download lyrics. * @param musicId Music ID. * @param lyricType The lyrics file's format. See {@link DownloadLyricType DownloadLyricType}. * @return Download task ID. * @note * - If the lyrics are successfully downloaded, you will receive {@link onDownloadSuccess onDownloadSuccess}. * - If the lyrics fail to download, you will receive {@link onDownloadFailed onDownloadFailed}. * */ downloadLyric(musicId: string, lyricType: DownloadLyricType): number; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Download MIDI files. * @param musicId Music ID. * @return Download task ID. * @note * - If the file is successfully downloaded, you will receive {@link onDownloadSuccess onDownloadSuccess}. * - If the file fails to download, you will receive {@link onDownloadFailed onDownloadFailed}. * */ downloadMidi(musicId: string): number; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Cancels download task. * @param downloadId Download task ID. * */ cancelDownload(downloadId: number): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Clear music cache, including music and lyrics. * */ clearCache(): void; /** {en} * @detail api * @author lihuan.wuti2ha * @brief Gets the KTV player. * @return KTV player interfaces. See {@link IKTVPlayer IKTVPlayer}. * */ getKTVPlayer(): IKTVPlayer; } export declare class IRangeAudio { protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @detail api * @region Range Audio * @author chuzhongtao * @brief Enable/disable the range audio function.
* Range audio means that within a certain range in a same RTC room, the audio volume of the remote user received by the local user will be amplified/attenuated as the remote user moves closer/away. The audio coming from out of the range cannot be heard. See {@link updateReceiveRange updateReceiveRange} to set audio receiving range. * @param enable Whether to enable audio range function:
* - true: Enable * - false: Disable(Defaulting setting) * @note You can call this API anytime before or after entering a room. To ensure a smooth switch to the range audio mode after entering the room, you need to call {@link updatePosition updatePosition} before this API to set your own position coordinates, and then enable the range audio function. * */ enableRangeAudio(enable: boolean): void; /** {en} * @detail api * @region Range Audio * @author chuzhongtao * @brief Updates the coordinate of the local user's position in the rectangular coordinate system in the current room. * @param pos 3D coordinate values, the default value is [0, 0, 0], see [Position](70083#position-2). * @return API call result:
* - 0: Success * - !0: Failure * @note * - After calling this API, you should call {@link enableRangeAudio enableRangeAudio} to enable range audio function to actually enjoy the range audio effect. * */ updatePosition(pos: Position): number; /** {en} * @detail api * @region Range Audio * @author chuzhongtao * @brief Updates the audio receiving range for the local user. * @param range Audio receiving range, see {@link ReceiveRange ReceiveRange}. * @return API call result:
* - 0: Success * - !0: Failure * */ updateReceiveRange(range: ReceiveRange): number; /** {en} * @detail api * @region Range Audio * @author huangshouqin * @brief Set the volume roll-off mode that a 3D sound has in an audio source when using the Range Audio feature. * @param type Volume roll-off mode. It is linear roll-off mode by default. Refer to {@link AttenuationType AttenuationType} for more details. * @param coefficient Coefficient for the exponential roll-off mode. The default value is 1. It ranges [0.1,100]. We recommended to set it to `50`. The volume roll-off speed gets faster as this value increases. * @return Result of the call
* - `0`: Success * - `-1`: Failure because of calling this API before the user has joined a room or before enabling the Range Audio feature by calling {@link enableRangeAudio enableRangeAudio}. * @note Call {@link updateReceiveRange updateReceiveRange} to set the range outside which the volume of the sound does not attenuate. * */ setAttenuationModel(type: AttenuationType, coefficient: number): number; /** {en} * @detail api * @region Range Audio * @author chuzhongtao * @brief Set the flags to mark the user groups, within which the users talk without attenuation.
* In the RTC room, if the flags of the users intersects with each other, the users talk without attenuation.
* For example, the user is a member of multiple teams, and teammates of the same team talks without attentuation. You can set the flag for each team, and includes the flags of the user's teams in the user's flags. * @param flags Array of flags. * */ setNoAttenuationFlags(flags: Array): void; } export declare class IAudioMixingManager { protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; } export declare class IMediaPlayer { protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @detail api * @brief Open the audio file.
* You can only open one audio file with one player instance at the same time. For multiple audio files at the same time, create multiple player instances.
* For audio file in PCM format, see {@link openWithCustomSource openWithCustomSource}. `openWithCustomSource` and this API are mutually exclusive. * @param filePath Audio file paths.
* URL of online file, URI of local file, full path to local file, or path to local file starting with `/assets/` are supported. For URL of online file, only the https protocol is supported.
* Recommended sample rate for audio effect files: 8KHz、16KHz、22.05KHz、44.1KHz、48KHz.
* Local audio effect file formats supported by different platforms:
* * * * * *
mp3mp4aacm4a3gpwavoggtswma
AndroidYYYYYYY
iOS/macOSYYYYYY
WindowsYYYYYYYY
* Online audio effect file formats supported by different platforms.
* * * * * *
mp3mp4aacm4a3gpwavoggtswma
AndroidYYYYY
iOS/macOSYYYY
WindowsYYYYYYY
* @param config See {@link MediaPlayerConfig MediaPlayerConfig}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * */ open(filePath: string, config: MediaPlayerConfig): number; /** {en} * @detail api * @brief Start playing the audio. Call this API when you call {@link open open} and set `AutoPlay=False`. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - For audio file in PCM format, see {@link openWithCustomSource openWithCustomSource}. `openWithCustomSource` and this API are mutually exclusive. * - After calling this API, call {@link stop stop} to stop playing the audio file. * */ start(): number; /** {en} * @detail api * @brief Enable audio mixing with audio raw data.
* To open the audio file, see {@link open open}. `open` and this API are mutually exclusive. * @param source See {@link MediaPlayerCustomSource MediaPlayerCustomSource}. * @param config See {@link MediaPlayerConfig MediaPlayerConfig}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - After calling this API, you must call {@link pushExternalAudioFrame pushExternalAudioFrame} to push audio data and start the audio mixing. * - To stop the raw data audio mixing, call {@link stop stop}. * */ openWithCustomSource(source: MediaPlayerCustomSource, config: MediaPlayerConfig): number; /** {en} * @detail api * @brief After calling {@link open open}, {@link start start}, or {@link openWithCustomSource openWithCustomSource} to start audio mixing, call this method to stop audio mixing. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * */ stop(): number; /** {en} * @detail api * @brief After calling {@link open open}, or {@link start start} to start audio mixing, call this API to pause audio mixing. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - After calling this API to pause audio mixing, call {@link resume resume} to resume audio mixing. * - The API is valid for audio file, not PCM data. * */ pause(): number; /** {en} * @detail api * @brief After calling {@link pause pause} to pause audio mixing, call this API to resume audio mixing. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note The API is valid for audio file, not PCM data. * */ resume(): number; /** {en} * @detail api * @brief Adjusts the volume of the specified audio mixing, including media file mixing and PCM mixing. * @param volume The ratio of the volume to the original volume in \% with overflow protection. The range is `[0, 400]` and the recommended range is `[0, 100]`. * @param type See {@link AudioMixingType AudioMixingType}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note Call this API only when audio is mixing. * */ setVolume(volume: number, type: AudioMixingType): number; /** {en} * @detail api * @brief Gets the current volume. * @param type See {@link AudioMixingType AudioMixingType}. * @return * - >0: Success, the current volume. * - < 0: Failed. * @note Call this API only when audio is mixing, including media file mixing and PCM mixing. * */ getVolume(type: AudioMixingType): number; /** {en} * @detail api * @brief Gets the duration of the media file. * @return * - >0: Success, the duration of the media file in milliseconds. * - < 0: Failed. * @note * - Call this API only when audio is mixing. * - The API is valid for audio file, not PCM data. * */ getTotalDuration(): number; /** {en} * @detail api * @brief Gets the actual playback duration of the mixed media file, in milliseconds. * @return * - >0: Success, the actual playback time. * - < 0: Failed. * @note * - The actual playback time refers to the playback time of the song without being affected by stop, jump, double speed, and freeze. For example, if the song stops playing for 30 seconds at 1:30 or skips to 2:00, and then continues to play normally for 2 minutes, the actual playing time is 3 minutes and 30 seconds. * - Call this API only when audio is mixing and the interval set by {@link setProgressInterval setProgressInterval} is above `0`. * - The API is valid for audio file, not PCM data. * */ getPlaybackDuration(): number; /** {en} * @detail api * @brief Gets the playback progress of the media file. * @return * - >0: Success, the playback progress of media file in ms. * - < 0: Failed. * @note * - Call this API only when audio is mixing. * - The API is valid for audio file, not PCM data. * */ getPosition(): number; /** {en} * @detail api * @brief Set the pitch of the local audio file mixing. Usually used in karaoke scenes. * @param pitch The increase or decrease value compared with the original pitch of the music file. The range is `[-12, 12]`. The default value is 0. The pitch distance between two adjacent values is half a step. A positive value indicates a rising pitch, and a negative value indicates a falling pitch. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - Call this API only when audio is mixing. * - Support audio file only and not PCM data. * */ setAudioPitch(pitch: number): number; /** {en} * @detail api * @brief Sets the starting playback position of the media file. * @param position The starting position of the media file in milliseconds.
* You can get the total duration of the media file through {@link getTotalDuration getTotalDuration}. The value of position should be less than the total duration of the media file. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - The API is valid for audio file, not PCM data. * - When playing online files, calling this API may cause playback delay. * */ setPosition(position: number): number; /** {en} * @detail api * @brief Sets the channel mode of the mixing of the media file. * @param mode The mode of channel. The default channel mode is the same as the source file. See {@link AudioMixingDualMonoMode AudioMixingDualMonoMode}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - Call this API only when audio is mixing. * - Audio file is supported, but not PCM data. * */ setAudioDualMonoMode(mode: AudioMixingDualMonoMode): number; /** {en} * @detail api * @brief Gets the track count of the current media file. * @return + >= 0:Success. Return the track count of the current media file. * - < 0:Failed. * @note * - Call this API only when audio is mixing. * - This API is valid for audio file, not PCM data. * */ getAudioTrackCount(): number; /** {en} * @detail api * @brief Specifies the playback track of the current media file. * @param index The specified playback audio track, starting from 0, and the range is `[0, getAudioTrackCount()-1]`. The value must be less than the return value of {@link getAudioTrackCount getAudioTrackCount}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - Call this API only when audio is mixing. * - This API is valid for audio file, not PCM data. * */ selectAudioTrack(index: number): number; /** {en} * @detail api * @brief Set the playback speed of the audio file. * @param speed The ratio of the actual playback speed than that of the original speed of the audio file in \%. The range is `[50,200]`. The default value is 100. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - Call this API only when audio is mixing. * - The API is valid for audio file and not PCM data. * */ setPlaybackSpeed(speed: number): number; /** {en} * @detail api * @brief Set the interval of the periodic callback {@link onMediaPlayerPlayingProgress onMediaPlayerPlayingProgress} during audio mixing. * @param interval interval in ms.
* - interval > 0: The callback is enabled. The actual interval is `10*(mod(10)+1)`. * - interval <= 0: The callback is disabled. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - Call this API only when audio is mixing. * - This API is valid for audio file, not PCM data. * */ setProgressInterval(interval: number): number; /** {en} * @detail api * @brief To call {@link enableVocalInstrumentBalance enableVocalInstrumentBalance} to adjust the volume of the mixed media file or the PCM audio data, you must pass in its original loudness through this API. * @param loudness Original loudness in lufs. The range is `[-70.0, 0.0]`.
* When the value is less than -70.0lufs, it will be adjusted to -70.0lufs by default, and if it is more than 0.0lufs, the loudness will not be equalized. The default value is 1.0lufs, which means no processing. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - Call this API only when audio is mixing. * - The API is valid for audio file and PCM data. * */ setLoudness(loudness: number): number; /** {en} * @detail api * @brief Register an observer to receive related callbacks when the local media file is mixing. * @param observer See {@link IMediaPlayerAudioFrameObserver IMediaPlayerAudioFrameObserver}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note The API is valid for audio file, not PCM data. * */ registerAudioFrameObserver(observer: IMediaPlayerAudioFrameObserver): number; /** {en} * @detail api * @brief Push PCM audio frame data for mixing. * @param audioFrame See {@link AudioFrame AudioFrame}.
* - The audio sampling format must be S16. The data format within the audio buffer must be PCM, and its capacity size should be `audioFrame.samples × audioFrame.channel × 2`. * - A specific sample rate and the number of channels must be specified. Setting them to automatic is not supported. * @return * - 0: Success. * - < 0: Failed. * @note * - Before calling this method, the raw audio data mixing must be enabled through {@link openWithCustomSource openWithCustomSource}. * - Suggestions: Before pushing data for the first time, please cache a certain amount of data (like 200 ms) on the application side, and then push it at once. Schedule subsequent push operation every 10 ms with audio data of 10 ms. * - To pause the playback, just pause the push. * */ pushExternalAudioFrame(audioFrame: AudioFrame): number; /** {en} * @detail api * @brief Set the event handler. * @param handler See {@link IMediaPlayerEventHandler IMediaPlayerEventHandler}. * @return * - 0: Success. * - < 0: Failed. * */ setEventHandler(handler: IMediaPlayerEventHandler): number; } export declare class IAudioEffectPlayer { protected _instance: any; protected __init(...args: any[]): void; protected __new_instance(...args: any[]): any; /** {en} * @detail api * @brief Starts to play the audio effect file.
* This API can be called multiple times with different IDs and filepaths for multiple effects at the same time. * @param effectId Audio effect ID. Used for identifying the audio effect, please ensure that the audio effect ID is unique.
* If this API is called repeatedly with the same ID, the previous effect will stop and the next effect will start, and you'll receive {@link onAudioEffectPlayerStateChanged onAudioEffectPlayerStateChanged}. * @param filePath Audio effect file paths.
* URL of online file, URI of local file, full path to local file, or path to local file starting with `/assets/` are supported. For URL of online file, only the https protocol is supported.
* Recommended sample rate for audio effect files: 8KHz、16KHz、22.05KHz、44.1KHz、48KHz.
* Local audio effect file formats supported by different platforms:
* * * * * *
mp3mp4aacm4a3gpwavoggtswma
AndroidYYYYYYY
iOS/macOSYYYYYY
WindowsYYYYYYYY
* Online audio effect file formats supported by different platforms.
* * * * * *
mp3mp4aacm4a3gpwavoggtswma
AndroidYYYYY
iOS/macOSYYYY
WindowsYYYYYYY
* @param config See {@link AudioEffectPlayerConfig AudioEffectPlayerConfig}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - If the file has been loaded into memory via {@link preload preload}, make sure that the ID here is the same as the ID set by {@link preload preload}. * - After starting to play an audio effect file, you can call the {@link stop stop} API to stop playing the audio effect file. * */ start(effectId: number, filePath: string, config: AudioEffectPlayerConfig): number; /** {en} * @detail api * @brief Stops the playback of audio effect files. * @param effectId Audio effect ID * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - After calling the {@link start start} API to start playing the audio effect file, you can call this API to stop playing the audio effect file. * - After calling this API to stop playing an audio effect file, the audio effect file will be unloaded automatically. * */ stop(effectId: number): number; /** {en} * @detail api * @brief Stops playback of all audio effect files. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - After calling {@link start start} to start playing audio effect files, you can call this API to stop playing all audio effect files. * - After calling this API to stop playing all audio effect files, the audio effect files will be unloaded automatically. * */ stopAll(): number; /** {en} * @detail api * @brief Preloads specified music files into memory to avoid repeated loading when playing the same file frequently and reduce CPU usage. * @param effectId Audio effect ID。Used for identifying the audio effect. Please ensure that the audio effect ID is unique.
* If this API is called repeatedly with the same ID, the later one will overwrite the previous one.
* If you call {@link start start} first and then call this API with the same ID, the SDK will stop the previous effect and then load the next one, and you will receive {@link onAudioEffectPlayerStateChanged onAudioEffectPlayerStateChanged}.
* After calling this API to preload A.mp3, if you need to call {@link start start} to play B.mp3 with the same ID, please call {@link unload unload} to unload A.mp3 first, otherwise SDK will report an error AUDIO_MIXING_ERROR_LOAD_CONFLICT. * @param filePath The filepath of effect file. URI of local file, full path to local file, or path to local files starting with `/assets/` are supported.
* The length of the pre-loaded file must not exceed 20s.
* Audio effect file formats supported are the same as {@link start start}. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - This API just preloads the specified audio effect file, and only calls the {@link start start} API to start playing the specified audio effect file. * - The specified audio effect file preloaded by calling this API can be unloaded by {@link unload unload}. * */ preload(effectId: number, filePath: string): number; /** {en} * @detail api * @brief Unloads the specified audio effect file. * @param effectId Audio effect ID * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note Call this API after {@link start start} or {@link preload preload}. * */ unload(effectId: number): number; /** {en} * @detail api * @brief Unloads all audio effect files. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * */ unloadAll(): number; /** {en} * @detail api * @brief Pauses the playback of audio effect files. * @param effectId Audio effect ID * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - After calling the {@link start start} API to start playing the audio effect file, you can pause the audio effect file by calling this API. * - After calling this API to pause the audio effect file, you can call the {@link resume resume} API to resume playback. * */ pause(effectId: number): number; /** {en} * @detail api * @brief Pauses the Playback of all audio effect files. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - After calling the {@link start start} API to start playing audio effect files, you can pause playing all audio effect files by calling this API. * - After calling this API to pause the playback of all audio effect files, you can call the {@link resumeAll resumeAll} API to resume all playback. * */ pauseAll(): number; /** {en} * @detail api * @brief Resumes the playback of audio effect files. * @param effectId Audio effect ID * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note After calling the {@link pause pause} API to pause the audio effect file, you can resume playback by calling this API. * */ resume(effectId: number): number; /** {en} * @detail api * @brief Resumes the playback of all audio effect files. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note After calling the {@link pauseAll pauseAll} API to pause all the audio effect files being played, you can resume playback by calling this API. * */ resumeAll(): number; /** {en} * @detail api * @brief Sets the start position of the audio effect file. * @param effectId Audio effect ID * @param position The starting playback position of the audio effect file in milliseconds.
* You can get the total duration of the audio effect file by calling {@link getDuration getDuration}, the value of position should be less than the total duration of the audio effect file. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note * - When playing online files, calling this API may cause a delay in playback. * - Call this API after {@link start start}. * */ setPosition(effectId: number, position: number): number; /** {en} * @detail api * @brief Gets the current position of audio effect file playback. * @param effectId Audio effect ID * @return * - >0: Success, the current progress of audio effect file playback in milliseconds. * - < 0: Failure. * @note * - When playing online files, calling this API may cause a delay in playback. * - Call this API after {@link start start}. * */ getPosition(effectId: number): number; /** {en} * @detail api * @brief Adjusts the volume level of a specified audio effect, including audio effect file and PCM effect. * @param effectId Audio effect ID * @param volume The ratio of the volume to the original volume in \% with overflow protection. The range is `[0, 400]` and the recommended range is `[0, 100]`. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note Call this API after {@link start start}. * */ setVolume(effectId: number, volume: number): number; /** {en} * @detail api * @brief Sets the volume of all audio effect, including audio effect files and PCM effects. * @param volume The ratio of the volume to the original volume in \% with overflow protection. The range is `[0, 400]` and the recommended range is `[0, 100]`. * @return * - 0: Success. * - < 0 : Fail. See {@link ReturnStatus ReturnStatus} for more details. * @note This API has a lower priority than {@link setVolume setVolume}, i.e. the volume of the audio effect set by {@link setVolume setVolume} is not affected by this API. * */ setVolumeAll(volume: number): number; /** {en} * @detail api * @brief Gets the current volume. * @param effectId Audio effect ID * @return * - >0: Success, the current volume value. * - < 0: Failed. * @note Call this API after {@link start start}. * */ getVolume(effectId: number): number; /** {en} * @detail api * @brief Get the duration of the audio effect file. * @param effectId Audio effect ID * @return * - >0: Success, the duration of the audio effect file in milliseconds. * - < 0: failed. * @note Call this API after {@link start start}. * */ getDuration(effectId: number): number; /** {en} * @detail api * @brief Set the event handler. * @param handler See {@link IAudioEffectPlayerEventHandler IAudioEffectPlayerEventHandler}. * @return * - 0: Success. * - < 0: Failed. * */ setEventHandler(handler: IAudioEffectPlayerEventHandler): number; }