import EnhancedEventEmitter from "../EnhancedEventEmitter"; import { AudioSourceState } from "../types"; export declare const audioContext: AudioContext; export declare const FFT_SIZE = 2048; export declare const BUFFER_SIZE = 4096; export declare const BUFFER_SAMPLE_RATE: number; export declare class AudioManager extends EnhancedEventEmitter { audioSource: MediaStreamAudioSourceNode | AudioBufferSourceNode | MediaElementAudioSourceNode | null; analyserNode: AnalyserNode; /** * 输出经过 WebAudio 处理后的 MediaSteam 对象 */ audioStream: MediaStreamAudioDestinationNode; gainNode: GainNode; scriptNode: ScriptProcessorNode; /** * 接受 PCM 数据回调 */ audioBufferCallback?: (buffer: AudioBuffer) => any; audioBufferSize?: number; audioSourceState: AudioSourceState; /** * 如果输入源是 AudioBufferSourceNode,这里存储了原始的 Buffer */ private audioSourceBuffer?; /** * 如果输入源是 MediaElementSourceBuffer,这里存储了原始的媒体元素 */ private audioSourceElement?; private audioSourceLoop?; private _audioSourceState; private bufferSourceDuration; constructor(); initAudioContext(): void; setMediaStreamSource(originMediaStream: MediaStream): void; setAudioBufferSource(): void; setMediaElementSource(mediaElement: HTMLAudioElement): void; setAudioSourceLoop(isLoop: boolean): void; setAudioBuffer(data: AudioBuffer): void; playAudioSource(offset?: number): void; resumeAudioSource(): void; pauseAudioSource(): void; stopAudioSource(useToSeek?: boolean): void; getAudioSourceCurrentTime(): number; setAudioSourceCurrentTime(val: number): void; getAudioSourceDuration(): number; release(): void; private connect; private handleAudioBuffer; private onAudioBuffer; private handleMediaElementEvents; private resetBufferSourceDuration; }