/*! * Copyright (c) Microsoft Corporation and contributors. All rights reserved. * Licensed under the MIT License. */ import type { IBatchMessage } from "@fluidframework/container-definitions/internal"; import type { ITelemetryBaseLogger } from "@fluidframework/core-interfaces"; import type { ISequencedDocumentMessage } from "@fluidframework/driver-definitions/internal"; import type { IChunkedOp, OutboundBatchMessage, OutboundSingletonBatch } from "./definitions.js"; export declare function isChunkedMessage(message: ISequencedDocumentMessage): boolean; /** * Responsible for creating and reconstructing chunked messages. */ export declare class OpSplitter { private readonly submitBatchFn; readonly chunkSizeInBytes: number; private readonly maxBatchSizeInBytes; private readonly chunkMap; private readonly logger; constructor(chunks: [string, string[]][], submitBatchFn: ((batch: IBatchMessage[], referenceSequenceNumber?: number) => number) | undefined, chunkSizeInBytes: number, maxBatchSizeInBytes: number, logger: ITelemetryBaseLogger); get isBatchChunkingEnabled(): boolean; get chunks(): ReadonlyMap; clearPartialChunks(clientId: string): void; private addChunk; /** * Takes a singleton batch, and splits the interior message into chunks, sending the chunks separately and * returning a new singleton batch containing the last chunk. * * A compressed batch is formed by one large op at the first position. * * If the op is too large, it can be chunked (split into smaller op) which can be sent individually over the wire * and accumulate at ingestion, until the last op in the chunk is processed, when the original op is unrolled. * * This method will send the first N - 1 chunks separately and use the last chunk as the first message in the result batch. * This will ensure that the batch semantics of the original (non-compressed) batch are preserved, as the original chunked op * will be unrolled by the runtime when the first message in the batch is processed (as it is the last chunk). * * To illustrate the current functionality, if the input is `[largeOp]`, `largeOp` will be split into `[chunk1, chunk2, chunk3, chunk4]`. * `chunk1`, `chunk2` and `chunk3` will be sent individually and `[chunk4]` will be returned. * * @remarks A side effect here is that 1 or more chunks are queued immediately for sending in next JS turn. * * @privateRemarks * This maintains support for splitting a compressed batch with multiple messages (empty placeholders after the first), * but this is only used for Unit Tests so the typing has been updated to preclude that. * That code should be moved out of this function into a test helper. * * @param batch - the compressed batch which needs to be split into chunks before being sent over the wire * @returns A batch with the last chunk in place of the original complete compressed content */ splitSingletonBatchMessage(batch: OutboundSingletonBatch): OutboundSingletonBatch; processChunk(message: ISequencedDocumentMessage): ProcessChunkResult; } type ProcessChunkResult = { readonly isFinalChunk: false; } | { readonly isFinalChunk: true; readonly message: ISequencedDocumentMessage; }; /** * Splits an op into smaller ops (chunks), based on the size of the op and the `chunkSizeInBytes` parameter. * * The last op of the result will be bundled with empty ops in the same batch. There is a risk of the batch payload * exceeding the 1MB limit due to the overhead from the empty ops. If the last op is large, the risk is even higher. * To minimize the odds, an extra empty op can be added to the result using the `extraOp` parameter. * * @param op - the op to be split * @param chunkSizeInBytes - how large should the chunks be * @param extraOp - should an extra empty op be added to the result * @returns an array of chunked ops */ export declare const splitOp: (op: OutboundBatchMessage, chunkSizeInBytes: number, extraOp?: boolean) => IChunkedOp[]; export {}; //# sourceMappingURL=opSplitter.d.ts.map