export declare const createHeaders: (headerSettings: AdditionalHeaders | undefined) => Record; /** * Additional headers to be sent with the request. * Default is `Content-Type: text/plain`. This can be overridden. * If a function is provided, it will be called before each request. */ export type AdditionalHeaders = Record | (() => Record); export type RequestPriority = 'high' | 'low' | 'auto'; export type RequestCredentials = 'include' | 'same-origin' | 'omit'; /** * These are the options that can be passed to the fetch dispatcher. * They more/less map to the Fetch RequestInit type. */ interface DispatchFetchConfig { /** * Request credentials configuration * * @see https://developer.mozilla.org/en-US/docs/Web/API/Request/credentials */ credentials?: RequestCredentials; /** * This is useful for ensuring that an event is sent even if the user navigates away from the page. * However, it may increase the likelihood of events being lost, as there is a 64kb limit for *all* fetch requests (not just ones to segment) with keepalive (which is why it's disabled by default). So, if you're sending a lot of data, this will likely cause events to be dropped. * @default false */ keepalive?: boolean; /** * Additional headers to be sent with the request. * Default is `Content-Type: text/plain`. This can be overridden. * If a function is provided, it will be called before each request. * @example { 'Content-Type': 'application/json' } or () => { 'Content-Type': 'application/json' } */ headers?: AdditionalHeaders; /** * 'Request Priority' of the request * @see https://developer.mozilla.org/en-US/docs/Web/API/RequestInit#priority */ priority?: RequestPriority; } export interface BatchingDispatchConfig extends DispatchFetchConfig { /** * If strategy = 'batching', the maximum number of events to send in a single request. If the batch reaches this size, a request will automatically be sent. * * @default 10 */ size?: number; /** * If strategy = 'batching', the maximum time, in milliseconds, to wait before sending a request. * This won't always be relevant, as the request will be sent when the size is reached. * However, if the size is never reached, the request will be sent after this time. * When it comes to retries, if there is a rate limit timeout header, that will be respected over the value here. * * @default 5000 */ timeout?: number; /** * If strategy = 'batching', the maximum number of retries to attempt before giving up. * @default 10 */ maxRetries?: number; } export interface StandardDispatcherConfig extends DispatchFetchConfig { } export type DeliveryStrategy = { strategy?: 'standard'; config: StandardDispatcherConfig; } | { strategy: 'batching'; config?: BatchingDispatchConfig; }; export interface RateLimitConfig { /** * Kept for cross-SDK config parity (mobile/server). * Browser SDK already had rate-limit handling before this config and currently keeps existing behavior. * @default true */ enabled?: boolean; /** Max retry attempts for rate-limited requests. @default 10 */ maxRetryCount?: number; /** Max Retry-After interval the SDK will respect, in seconds. @default 300 */ maxRetryInterval?: number; /** Max total time (seconds) rate-limited retries can continue before dropping. @default 43200 (12 hours) */ maxRateLimitDuration?: number; } export interface BackoffConfig { /** * Kept for cross-SDK config parity (mobile/server). * Browser SDK already had backoff behavior before this config and currently keeps existing behavior. * @default true */ enabled?: boolean; /** Max retry attempts per batch. @default 10 */ maxRetryCount?: number; /** Initial backoff interval in seconds. @default 0.5 */ baseBackoffInterval?: number; /** Max backoff interval in seconds. @default 60 */ maxBackoffInterval?: number; /** Max total time (seconds) a batch can remain in retry before being dropped. @default 43200 (12 hours) */ maxTotalBackoffDuration?: number; /** Jitter percentage (0-100) added to backoff calculations to prevent thundering herd. @default 10 */ jitterPercent?: number; /** Default behavior for 4xx responses. @default "drop" */ default4xxBehavior?: 'drop' | 'retry'; /** Default behavior for 5xx responses. @default "retry" */ default5xxBehavior?: 'drop' | 'retry'; /** Per-status-code behavior overrides. Keys are HTTP status codes as strings. */ statusCodeOverrides?: Record; } export interface HttpConfig { rateLimitConfig?: RateLimitConfig; backoffConfig?: BackoffConfig; } export interface ResolvedRateLimitConfig { maxRetryCount: number; maxRetryInterval: number; maxRateLimitDuration: number; } export interface ResolvedBackoffConfig { maxRetryCount: number; baseBackoffInterval: number; maxBackoffInterval: number; maxTotalBackoffDuration: number; jitterPercent: number; default4xxBehavior: 'drop' | 'retry'; default5xxBehavior: 'drop' | 'retry'; statusCodeOverrides: Record; } export interface ResolvedHttpConfig { rateLimitConfig: ResolvedRateLimitConfig; backoffConfig: ResolvedBackoffConfig; } /** * Parse the Retry-After header from a response, if present and applicable. * Returns `{ retryAfterMs, fromHeader }` when a valid delay is found, or `null` otherwise. */ export declare function parseRetryAfter(res: { status: number; headers?: { get(name: string): string | null; }; }, rateLimitConfig: ResolvedRateLimitConfig): { retryAfterMs: number; fromHeader: boolean; } | null; /** * Determine whether a given HTTP status code should cause a retry or a drop, * based on the resolved backoff configuration. */ export declare function getStatusBehavior(status: number, backoffConfig: ResolvedBackoffConfig): 'drop' | 'retry'; /** * Compute an exponential backoff delay in milliseconds for the given attempt. * Attempt is 1-based (first retry = 1). */ export declare function computeBackoff(attempt: number, config: ResolvedBackoffConfig): number; /** * Resolve an optional HttpConfig from CDN/user settings into a fully-populated * config object with defaults applied and values clamped to safe ranges. */ export declare function resolveHttpConfig(config?: HttpConfig, cdnConfig?: HttpConfig): ResolvedHttpConfig; export {}; //# sourceMappingURL=shared-dispatcher.d.ts.map