import { EFFramegen } from "../EF_FRAMEGEN.js"; import { EFSourceMixinInterface } from "./EFSourceMixin.js"; import { TemporalMixinInterface } from "./EFTemporal.js"; import { FetchMixinInterface } from "./FetchMixin.js"; import { ControllableInterface } from "../gui/Controllable.js"; import { UrlGenerator } from "../transcoding/utils/UrlGenerator.js"; import { MediaEngine } from "./EFMedia/MediaEngine.js"; import { AudioSpan } from "../transcoding/types/index.js"; import * as _$lit from "lit"; import { LitElement, PropertyValueMap } from "lit"; //#region src/elements/EFMedia.d.ts declare global { var EF_FRAMEGEN: EFFramegen; } /** * Simple async value wrapper that mimics Lit Task interface. * Used for backwards compatibility with code expecting task-like objects. */ declare class AsyncValue { #private; get value(): T | undefined; get error(): Error | undefined; get status(): number; get taskComplete(): Promise; /** * Set the value (marks status as complete) */ setValue(value: T): void; /** * Set an error (marks status as error) */ setError(error: Error): void; /** * Start a new async operation */ startPending(): void; /** * Run an async function and update status accordingly */ run(fn: () => Promise): Promise; } declare const EFMedia_base: (new (...args: any[]) => EFSourceMixinInterface) & (new (...args: any[]) => TemporalMixinInterface) & (new (...args: any[]) => FetchMixinInterface) & typeof LitElement; declare class EFMedia extends EFMedia_base { #private; get efContext(): ControllableInterface | null; shouldAutoReady(): boolean; static readonly VIDEO_SAMPLE_BUFFER_SIZE = 90; static readonly AUDIO_SAMPLE_BUFFER_SIZE = 120; /** * Which tracks this media element requires. * Subclasses can override to specify their needs: * - "audio" - Only needs audio track (e.g., EFAudio) * - "video" - Only needs video track * - "both" - Needs both tracks (default for backwards compatibility) * * This is used during media engine creation to skip validation * of tracks that won't be used, avoiding unnecessary network requests. */ get requiredTracks(): "audio" | "video" | "both"; static get observedAttributes(): string[]; static styles: _$lit.CSSResult[]; attributeChangedCallback(name: string, oldValue: string | null, newValue: string | null): void; /** * Duration in milliseconds for audio buffering ahead of current time * @domAttribute "audio-buffer-duration" */ audioBufferDurationMs: number; /** * Maximum number of concurrent audio segment fetches for buffering * @domAttribute "max-audio-buffer-fetches" */ maxAudioBufferFetches: number; /** * Enable/disable audio buffering system * @domAttribute "enable-audio-buffering" */ enableAudioBuffering: boolean; /** * Mute/unmute the media element * @domAttribute "mute" */ mute: boolean; /** * FFT size for frequency analysis * @domAttribute "fft-size" */ fftSize: number; /** * FFT decay rate for frequency analysis * @domAttribute "fft-decay" */ fftDecay: number; /** * FFT gain for frequency analysis * @domAttribute "fft-gain" */ fftGain: number; /** * Enable/disable frequency interpolation * @domAttribute "interpolate-frequencies" */ interpolateFrequencies: boolean; getFreqWeights(): Float32Array; getShouldInterpolateFrequencies(): boolean; getUrlGenerator(): UrlGenerator; /** * Async wrapper that mimics Task interface for backwards compatibility. * Code expecting mediaEngineTask.value, .taskComplete, .error, .status will still work. */ mediaEngineTask: AsyncValue; /** * Get or create the MediaEngine for this element. * Uses caching based on src/fileId to avoid redundant fetches. */ getMediaEngine(signal?: AbortSignal): Promise; /** * Async wrapper for frequency data - mimics Task interface for EFWaveform compatibility */ frequencyDataTask: AsyncValue | null>; /** * Async wrapper for time domain data - mimics Task interface for EFWaveform compatibility */ byteTimeDomainTask: AsyncValue | null>; /** * Get frequency data for audio visualization at a given time. */ getFrequencyData(timeMs: number, signal?: AbortSignal): Promise; /** * Get time domain data for audio visualization at a given time. */ getTimeDomainData(timeMs: number, signal?: AbortSignal): Promise; audioSegmentIdTask: AsyncValue; audioInitSegmentFetchTask: AsyncValue; audioSegmentFetchTask: AsyncValue; audioInputTask: AsyncValue; audioSeekTask: AsyncValue; audioBufferTask: AsyncValue; /** * The unique identifier for the media file. * This property can be set programmatically or via the "file-id" attribute. * The "asset-id" attribute is also supported for backward compatibility. * @domAttribute "file-id" */ fileId: string | null; /** @deprecated Use fileId instead */ get assetId(): string | null; set assetId(value: string | null); get intrinsicDurationMs(): number | undefined; /** * Set a duration discovered via lightweight index probe, without creating a * full MediaEngine. This allows the sequence to compute startTimeMs for all * clips before any rendering begins. * @internal */ setDurationOnly(durationMs: number): void; /** * Lightweight duration probe: fetches only the fragment index JSON (one HTTP * request) and extracts durationMs without allocating SegmentTransport, * TimingModel, or CachedFetcher. Much cheaper than getMediaEngine() when * only the duration is needed (e.g. getRenderInfo). * * Falls back to getMediaEngine() for src-based elements (manifest mode) where * there is no standalone index endpoint. File-id elements — the common case in * the cloud render pipeline — always use the lightweight path. * @internal */ getDurationOnly(signal?: AbortSignal): Promise; protected updated(changedProperties: PropertyValueMap | Map): void; get hasOwnDuration(): boolean; private _desiredSeekTimeMs; get desiredSeekTimeMs(): number; set desiredSeekTimeMs(value: number); protected executeSeek(seekToMs: number): Promise; /** * Main integration method for EFTimegroup audio playback * Now powered by clean, testable utility functions * Returns undefined if no audio rendition is available */ fetchAudioSpanningTime(fromMs: number, toMs: number, signal?: AbortSignal): Promise; /** * Wait for media engine to load and determine duration * Ensures media is ready for playback */ waitForMediaDurations(signal?: AbortSignal): Promise; /** * Returns media elements for playback audio rendering * For standalone media, returns [this]; for timegroups, returns all descendants * Used by PlaybackController for audio-driven playback */ getMediaElements(): EFMedia[]; /** * Render audio buffer for playback * Called by PlaybackController during live playback * Delegates to shared renderTemporalAudio utility for consistent behavior */ renderAudio(fromMs: number, toMs: number): Promise; } //#endregion export { AsyncValue, EFMedia }; //# sourceMappingURL=EFMedia.d.ts.map