///
import { AudioCache } from './AudioCache.js';
declare function getAudioCacheInstance(): AudioCache;
/**
* Global Web Audio Context manager
* Manages a single AudioContext instance for efficient audio playback
*
* ## iOS Safari Audio Workaround
*
* iOS Safari has a known issue where Web Audio API playback may not produce
* sound on real devices (works fine in simulator). The workaround routes audio
* through a MediaStreamDestination connected to an HTML5 Audio element:
*
* ```
* AudioBufferSourceNode → GainNode → MediaStreamDestination → HTML5 → Speakers
* ```
*
* This tricks iOS Safari into treating the audio as a "live stream" (like a video
* conference), which it allows to play reliably. The HTML5 audio element stays
* playing continuously, acting as a bridge between Web Audio and the actual speakers.
*
* ## Why This Works
*
* - iOS Safari blocks standard Web Audio destination output (silently fails)
* - MediaStreams are treated differently (needed for WebRTC/conferencing)
* - By routing through MediaStream, iOS allows the audio to reach speakers
* - Web Audio API still runs off main thread (smooth UI rendering)
*
* ## References
*
* - Community workaround: https://www.reddit.com/r/webdev/comments/1ldjqa1/comment/mymw7v3/
* - Used for background audio, PWAs, and continuous scanning scenarios
*/
declare class WebAudioContextManager {
private audioContext;
private isUnlocked;
private unlockPromise;
private unlockAttempts;
private readonly MAX_UNLOCK_ATTEMPTS;
private lastKnownSampleRate;
/** iOS-specific: MediaStreamDestination for routing audio */
private mediaStreamDestination;
/** iOS-specific: HTML5 audio element that plays the MediaStream */
private streamAudioElement;
private handlerStreamAudioElementError;
private handlerStreamAudioElementPause;
/**
* Get the AudioContext (must be created via unlock first on iOS)
*
* ## Sample Rate: Native Hardware Rate
*
* We let the browser use its native sample rate to prevent pitch shifting:
*
* **The Problem with Forcing 44.1kHz:**
* - iOS may ignore the sampleRate parameter in AudioContext constructor
* - This creates a mismatch between requested (44.1kHz) and actual sample rate
* - When AudioContext uses different rate than what we requested, audio plays at wrong pitch
* - This happens especially on iOS with dynamic audio routing
*
* **The Solution:**
* - Let the browser choose its native sample rate (no sampleRate parameter)
* - Detect the actual sample rate and remember it
* - If sample rate changes during session, recreate context to match
* - Web Audio API automatically resamples audio buffers to match
* - Sound quality remains identical (transparent resampling)
*
* **Why This Works:**
* - AudioContext will use the hardware's preferred rate
* - No mismatch between requested and actual rates
* - Prevents pitch shifting caused by rate mismatches
* - More stable across different iOS devices and configurations
*/
getContext(): AudioContext;
/**
* Get the audio destination node.
*
* On iOS: Returns MediaStreamDestination routed through HTML5 Audio element
* On other platforms: Returns the standard AudioContext destination
*
* ## iOS MediaStreamDestination Workaround
*
* Why we need this:
* - iOS Safari silently blocks Web Audio API output on real devices
* - The AudioContext reports "running" and source.start() succeeds
* - But no sound reaches the speakers (works fine in iOS Simulator)
*
* The solution:
* 1. Create a MediaStreamDestination node from the AudioContext
* 2. Create an HTML5 element
* 3. Connect the audio element to the MediaStream via srcObject
* 4. Start playing the audio element (it plays continuously as a live stream)
* 5. Route all audio through this destination instead of context.destination
*
* Result:
* - iOS Safari treats this as a "live stream" (like WebRTC)
* - Live streams are allowed to play reliably on iOS
* - The element acts as a bridge to the speakers
* - Individual sounds play through the stream as they're triggered
*
* ## Lifecycle & Resource Management
*
* The stream audio element plays continuously for the page lifetime. This is
* intentional because:
* - Recreating it requires a user gesture on iOS (can't do on-demand)
* - Resource usage is minimal when no audio is playing through it
* - Stopping and restarting could fail due to iOS autoplay restrictions
*
* The element can be cleaned up by calling dispose() if needed, but this will
* require a new user gesture to re-enable audio on iOS.
*
* Error handling: If the stream stops or errors unexpectedly, it automatically
* attempts to restart to maintain audio functionality.
*
* @returns AudioDestinationNode (desktop) or MediaStreamAudioDestinationNode (iOS)
*/
getDestination(): AudioDestinationNode | MediaStreamAudioDestinationNode;
onStreamAudioElementError(): void;
onStreamAudioElementPause(): void;
/**
* Create AudioContext (must be called synchronously during a user gesture on iOS)
*/
private createContext;
/**
* Unlock audio playback on iOS/mobile during user gesture.
*
* ## iOS Safari Requirements
*
* iOS Safari requires explicit user interaction before audio can play:
*
* 1. **AudioContext must be created during a user gesture**
* - Creating it later (async) will result in silent playback
* - Must happen in synchronous event handler (touchstart, click, etc.)
*
* 2. **AudioContext.resume() must be called during a user gesture**
* - Even if state is "running", calling resume() during gesture is required
* - Must be initiated synchronously (can complete asynchronously)
*
* 3. **Actual audio playback must occur during the unlock**
* - Just creating and resuming context isn't enough
* - Must play a buffer (even silent) to truly unlock audio
*
* ## Implementation Flow
*
* 1. User taps screen (event handler runs synchronously)
* 2. Create AudioContext synchronously
* 3. Call context.resume() synchronously (completes async)
* 4. This async method plays a silent scratch buffer
* 5. On iOS, this creates MediaStreamDestination on first call
* 6. Mark as unlocked after successful playback
*
* @returns Promise that resolves when unlock is complete
*/
unlock(): Promise;
/**
* Get the unlock status
*/
get unlocked(): boolean;
}
export { WebAudioContextManager, getAudioCacheInstance };