import type { MastraDBMessage, MessageList } from '@mastra/core/agent'; import type { ObservabilityContext } from '@mastra/core/observability'; import type { ProcessorStreamWriter } from '@mastra/core/processors'; import { MessageHistory } from '@mastra/core/processors'; import type { RequestContext } from '@mastra/core/request-context'; import type { MemoryStorage, ObservationalMemoryRecord, ObservationalMemoryHistoryOptions } from '@mastra/core/storage'; import { BufferingCoordinator } from './buffering-coordinator.js'; /** * Returns the parts from the latest step of a message (after the last step-start marker). * If no step-start marker exists, returns all parts. */ export declare function getLatestStepParts(parts: MastraDBMessage['content']['parts']): MastraDBMessage['content']['parts']; /** * Build a messageRange string from the first and last messages that have visible * content. Falls back to the full array boundaries when every message is data-only. */ export declare function buildMessageRange(messages: MastraDBMessage[]): string; /** * Returns the unix-ms timestamp of the last non-data part in the last assistant * message, representing when the last visible LLM response completed. Used as the * last activity time for activateAfterIdle checks. */ export declare function getLastActivityFromMessages(messages?: MastraDBMessage[]): number | undefined; export declare function getLastModelFromMessages(messages?: MastraDBMessage[]): string | undefined; export declare function getCurrentModel(model?: { provider?: string; modelId?: string; }): string | undefined; export { didProviderChange } from './model-context.js'; import { ObservationTurn } from './observation-turn/index.js'; import type { ObservationTurnHooks } from './observation-turn/types.js'; import { ObserverRunner } from './observer-runner.js'; import type { CompressionLevel } from './reflector-agent.js'; import { ReflectorRunner } from './reflector-runner.js'; import { TokenCounter } from './token-counter.js'; import type { ObservationDebugEvent, ObservationalMemoryConfig, ObserveHookUsage, ObserveHooks, ResolvedObservationConfig, ResolvedReflectionConfig, ThresholdRange, ObservationModelContext } from './types.js'; /** * ObservationalMemory - A three-agent memory system for long conversations. * * This processor: * 1. On input: Injects observations into context, filters out observed messages * 2. On output: Tracks new messages, triggers Observer/Reflector when thresholds hit * * The Actor (main agent) sees: * - Observations (compressed history) * - Suggested continuation message * - Recent unobserved messages * * @example * ```ts * import { ObservationalMemory } from '@mastra/memory/processors'; * * // Minimal configuration * const om = new ObservationalMemory({ storage }); * * // Full configuration * const om = new ObservationalMemory({ * storage, * model: 'google/gemini-2.5-flash', // shared model for both agents * shareTokenBudget: true, * observation: { * messageTokens: 30_000, * modelSettings: { temperature: 0.3 }, * }, * reflection: { * observationTokens: 40_000, * }, * }); * * const agent = new Agent({ * inputProcessors: [om], * outputProcessors: [om], * }); * ``` */ export declare class ObservationalMemory { private storage; private tokenCounter; readonly scope: 'resource' | 'thread'; /** Whether retrieval-mode observation groups are enabled. */ readonly retrieval: boolean; private observationConfig; private reflectionConfig; private onDebugEvent?; readonly onIndexObservations?: (observation: { text: string; groupId: string; range: string; threadId: string; resourceId: string; observedAt?: Date; }) => Promise; /** Observer agent runner — handles LLM calls for extracting observations. */ readonly observer: ObserverRunner; /** Reflector agent runner — handles LLM calls for compressing observations. */ readonly reflector: ReflectorRunner; /** Buffering state coordinator — manages static maps and buffering lifecycle. */ readonly buffering: BufferingCoordinator; private shouldObscureThreadIds; private hasher; /** * Track message IDs observed during this instance's lifetime. * Prevents re-observing messages when per-thread lastObservedAt cursors * haven't fully advanced past messages observed in a prior cycle. * @internal Used by observation strategies. Do not call directly. */ observedMessageIds: Set; /** Internal MessageHistory for message persistence */ private messageHistory; /** * In-memory mutex for serializing observation/reflection cycles per resource/thread. * Prevents race conditions where two concurrent cycles could both read isObserving=false * before either sets it to true, leading to lost work. * * Key format: "resource:{resourceId}" or "thread:{threadId}" * Value: Promise that resolves when the lock is released * * NOTE: This mutex only works within a single Node.js process. For distributed * deployments, external locking (Redis, database locks) would be needed, or * accept eventual consistency (acceptable for v1). */ private locks; /** * Acquire a lock for the given key, execute the callback, then release. * If a lock is already held, waits for it to be released before acquiring. */ private withLock; constructor(config: ObservationalMemoryConfig); /** * Get the current configuration for this OM instance. * Used by the server to expose config to the UI when OM is added via processors. */ get config(): { scope: 'resource' | 'thread'; retrieval: boolean; observation: { messageTokens: number | ThresholdRange; previousObserverTokens: number | false | undefined; }; reflection: { observationTokens: number | ThresholdRange; }; }; /** * Wait for any in-flight async buffering operations for the given thread/resource. * Used by server endpoints to block until buffering completes so the UI can get final state. */ waitForBuffering(threadId: string | null | undefined, resourceId: string | null | undefined, timeoutMs?: number): Promise; private getConcreteModel; private getModelToResolve; private formatModelName; private resolveObservationModel; private resolveReflectionModel; private resolveTieredModel; private resolveModelRouting; private resolveModelContext; /** * Get the default compression start level based on model behavior. * gemini-2.5-flash is a faithful transcriber that needs explicit pressure to compress effectively. */ getCompressionStartLevel(requestContext?: RequestContext): Promise; /** * Get the full config including resolved model names. * This is async because it needs to resolve the model configs. */ getResolvedConfig(requestContext?: RequestContext): Promise<{ scope: 'resource' | 'thread'; observation: { messageTokens: number | ThresholdRange; model: string; previousObserverTokens: number | false | undefined; routing?: Array<{ upTo: number; model: string; }>; }; reflection: { observationTokens: number | ThresholdRange; model: string; routing?: Array<{ upTo: number; model: string; }>; }; }>; /** * Emit a debug event if the callback is configured. * @internal Used by observation strategies. Do not call directly. */ emitDebugEvent(event: ObservationDebugEvent): void; /** * Validate buffer configuration on first use. * Ensures bufferTokens is less than the threshold and bufferActivation is valid. */ private validateBufferConfig; /** * Resolve the effective messageTokens for a record. * Only explicit per-record overrides (stored under `_overrides`) win; * the initial config snapshot written by getOrCreateRecord() is ignored * so that later instance-level changes still take effect. * * Overrides that fall below the instance-level buffering floor * (bufferTokens / absolute bufferActivation) are clamped to the * instance threshold to preserve buffering invariants. */ private getEffectiveMessageTokens; /** * Resolve the effective reflection observationTokens for a record. * Only explicit per-record overrides (stored under `_overrides`) win; * the initial config snapshot is ignored so instance-level changes * still take effect for existing records. */ private getEffectiveReflectionTokens; /** * Check whether the unobserved message tokens meet the observation threshold. */ private meetsObservationThreshold; /** * Get thread/resource IDs for storage lookup */ private getStorageIds; /** * Get or create the observational memory record. * Returns the existing record if one exists, otherwise initializes a new one. */ getOrCreateRecord(threadId: string, resourceId?: string): Promise; /** * Get current config snapshot for observation markers. */ private getObservationMarkerConfig; /** * Persist a data-om-* marker part on the last assistant message in messageList * AND save the updated message to the DB so it survives page reload. * (data-* parts are filtered out before sending to the LLM, so they don't affect model calls.) * @internal Used by ReflectorRunner. Do not call directly. */ persistMarkerToMessage(marker: { type: string; data: unknown; }, messageList: MessageList | undefined, threadId: string, resourceId?: string): Promise; /** * Persist a marker to the last assistant message in storage. * Unlike persistMarkerToMessage, this fetches messages directly from the DB * so it works even when no MessageList is available (e.g. async buffering ops). * @internal Used by observation strategies. Do not call directly. */ persistMarkerToStorage(marker: { type: string; data: unknown; }, threadId: string, resourceId?: string): Promise; /** * Find the last completed observation boundary in a message's parts. * A completed observation is a start marker followed by an end marker. * * Returns the index of the END marker (which is the observation boundary), * or -1 if no completed observation is found. */ /** * Check if a message has an in-progress observation (start without end). */ private hasInProgressObservation; /** * Seal messages to prevent new parts from being merged into them. * This is used when starting buffering to capture the current content state. * * Sealing works by: * 1. Setting `message.content.metadata.mastra.sealed = true` (message-level flag) * 2. Adding `metadata.mastra.sealedAt` to the last part (boundary marker) * * When MessageList.add() receives a message with the same ID as a sealed message, * it creates a new message with only the parts beyond the seal boundary. * * The messages are mutated in place - since they're references to the same objects * in the MessageList, the seal will be recognized immediately. * * @param messages - Messages to seal (mutated in place) */ /** @internal Used by ObservationStep. */ sealMessagesForBuffering(messages: MastraDBMessage[]): void; /** * Insert an observation marker into a message. * The marker is appended directly to the message's parts array (mutating in place). * Also persists the change to storage so markers survive page refresh. * * For end/failed markers, the message is also "sealed" to prevent future content * from being merged into it. This ensures observation markers are preserved. */ /** * Insert an observation marker into a message. * For start markers, this pushes the part directly. * For end/failed markers, this should be called AFTER writer.custom() has added the part, * so we just find the part and add sealing metadata. */ /** * Create a virtual message containing only the unobserved parts. * This is used for token counting and observation. */ private createUnobservedMessage; /** * Get unobserved messages with part-level filtering. * * This method uses data-om-observation-end markers to filter at the part level: * 1. For messages WITH a completed observation: only return parts AFTER the end marker * 2. For messages WITHOUT completed observation: check timestamp against lastObservedAt * * This handles the case where a single message accumulates many parts * (like tool calls) during an agentic loop - we only observe the new parts. */ /** @internal Used by ObservationStep. */ getUnobservedMessages(allMessages: MastraDBMessage[], record: ObservationalMemoryRecord, opts?: { excludeBuffered?: boolean; }): MastraDBMessage[]; /** * Prepare optimized observer context by applying truncation and buffered-reflection inclusion. * * Returns the (possibly optimized) observations string to pass as "Previous Observations" * to the observer prompt. When no optimization options are set, returns the input unchanged. */ prepareObserverContext(existingObservations: string | undefined, record?: ObservationalMemoryRecord | null): { context: string | undefined; wasTruncated: boolean; }; /** * Truncate observations to fit within a token budget. * * Strategy: * 1. Keep a raw tail of recent observations (end of block). * 2. Add a truncation marker: [X observations truncated here], placed at the hidden gap. * 3. Try to preserve important observations (🔴) from older context, newest-first. * 4. Enforce that at least 50% of kept observations remain raw tail observations. */ private truncateObservationsToTokenBudget; /** * Format observations for injection into context. * Applies token optimization before presenting to the Actor. * * In resource scope mode, filters continuity messages to only show * the message for the current thread. */ private formatObservationsForContext; private splitObservationContextChunks; /** * Create a message boundary delimiter with an ISO 8601 date. * The date should be the lastObservedAt timestamp — the latest message * timestamp that was observed to produce the observations following this boundary. */ static createMessageBoundary(date: Date): string; /** * Get threadId and resourceId from either RequestContext or MessageList */ getThreadContext(requestContext: RequestContext | undefined, messageList: MessageList): { threadId: string; resourceId?: string; } | null; /** * Save messages to storage, skipping messages that were already persisted by * async buffering. Uses the message-level sealed flag (metadata.mastra.sealed) * to detect already-persisted messages, avoiding redundant DB operations. * * Messages with observation markers are always saved (upserted) even if sealed, * because the markers need to be persisted to storage. */ persistMessages(messagesToSave: MastraDBMessage[], threadId: string, resourceId: string | undefined): Promise; /** * Load messages from storage that haven't been observed yet. * Uses cursor-based query with lastObservedAt timestamp for efficiency. * * In resource scope mode, loads messages for the entire resource (all threads). * In thread scope mode, loads messages for just the current thread. */ private loadMessagesFromStorage; /** * Format unobserved messages from other threads as blocks. * These are injected into the Actor's context so it has awareness of activity * in other threads for the same resource. */ private formatUnobservedContextBlocks; private representThreadIDInContext; /** * Get the maximum createdAt timestamp from a list of messages. * Used to set lastObservedAt to the most recent message timestamp instead of current time. * This ensures historical data (like LongMemEval fixtures) works correctly. */ private getMaxMessageTimestamp; /** * Wrap observations in a thread attribution tag. * Used in resource scope to track which thread observations came from. * @internal Used by observation strategies. Do not call directly. */ wrapWithThreadTag(threadId: string, observations: string, messageRange?: string): Promise; /** * Append or merge new thread sections. * If the new section has the same thread ID and date as an existing section, * merge the observations into that section to reduce token usage. * Otherwise, append as a new section. */ private replaceOrAppendThreadSection; /** * @internal Used by observation strategies. Do not call directly. */ wrapObservations(rawObservations: string, existingObservations: string, threadId: string, lastObservedAt?: Date, messageRange?: string): Promise | string; /** * Start an async background observation that stores results to bufferedObservations. * This is a fire-and-forget operation that runs in the background. * The results will be swapped to active when the main threshold is reached. * * If another buffering operation is already in progress for this scope, this will * wait for it to complete before starting a new one (mutex behavior). * * @param record - Current OM record * @param threadId - Thread ID * @param unobservedMessages - All unobserved messages (will be filtered for already-buffered) * @param lockKey - Lock key for this scope * @param writer - Optional stream writer for emitting buffering markers */ private startAsyncBufferedObservation; /** * Internal method that waits for existing buffering operation and then runs new buffering. * This implements the mutex-wait behavior. */ private runAsyncBufferedObservation; /** * Trigger async buffered observation if the token count has crossed a new interval. * * Encapsulates the shouldTrigger check + startAsyncBufferedObservation call. * Returns whether buffering was actually triggered. */ triggerAsyncBuffering(opts: { threadId: string; resourceId?: string; record: ObservationalMemoryRecord; pendingTokens: number; unbufferedPendingTokens: number; unobservedMessages: MastraDBMessage[]; threshold: number; writer?: ProcessorStreamWriter; requestContext?: RequestContext; observabilityContext?: ObservabilityContext; }): Promise; private isMessageList; private removeIdsFromArray; /** * Mutate partially observed messages in place and return the fully observed * message IDs that should be removed from the live context. * * This is the shared activation-cleanup primitive used by both the processor * and AI SDK integrations: callers pass the current live messages, OM trims * any partially observed messages down to their unobserved parts, and OM * returns only the IDs that are safe to remove entirely. */ getObservedMessageIdsForCleanup(opts: { threadId: string; resourceId?: string; messages: MastraDBMessage[]; observedMessageIds?: string[]; retentionFloor?: number; }): Promise; /** * Clean up observed content from either a live MessageList or a plain message array. * * - MessageList input: mutates the live container in place and returns the remaining messages * - Array input: mutates the array in place and returns it * * This is the shared cleanup primitive intended for both processor and non-processor * integrations. The processor may still pass sealedIds/state so marker/fallback cleanup * can persist messages safely, but callers that do not need that bookkeeping can omit it. */ /** @internal Used by ObservationStep. */ cleanupMessages(opts: { threadId: string; resourceId?: string; messages: MessageList | MastraDBMessage[]; observedMessageIds?: string[]; retentionFloor?: number; }): Promise; /** * Clean up the message context after a successful observation. * * Handles both activation-based cleanup (using observedMessageIds) and * marker-based cleanup (using observation boundary markers). Respects * retention floors to prevent removing too many messages. */ cleanupObservedContext(opts: { messageList: MessageList; threadId: string; resourceId?: string; observedMessageIds?: string[]; retentionFloor?: number; }): Promise; /** * Reset buffering state after a successful observation activation. * * Clears the lastBufferedBoundary, buffering flag, and optionally cleans up * static maps for activated message IDs. */ /** @internal Used by ObservationStep. */ resetBufferingState(opts: { threadId: string; resourceId?: string; recordId: string; activatedMessageIds?: string[]; }): Promise; /** * Build the observation system message string for injection into an LLM prompt. * * Loads thread metadata (currentTask, suggestedResponse), formats observations * with context prompts and instructions, and returns the fully-formed string. * Returns undefined if no observations exist. * * This is the public entry point for context formatting — used by both * Memory.getContext() (standalone) and the processor (via injectObservationsIntoMessages). * * @example * ```ts * const systemMsg = await om.buildContextSystemMessage({ threadId: 'thread-1' }); * if (systemMsg) { * const result = await generateText({ system: systemMsg, messages }); * } * ``` */ buildContextSystemMessage(opts: { threadId: string; resourceId?: string; record?: ObservationalMemoryRecord; unobservedContextBlocks?: string; currentDate?: Date; }): Promise; /** * Build observation context as an array of system message chunks. * Each chunk is a separate system message for better LLM cache hit rates. * Used by the processor to inject multiple system messages. * @internal */ buildContextSystemMessages(opts: { threadId: string; resourceId?: string; record?: ObservationalMemoryRecord; unobservedContextBlocks?: string; currentDate?: Date; }): Promise; /** * Get unobserved messages from other threads for resource-scoped observation. * * Lists all threads for the resource, filters to unobserved messages, * and formats them as context blocks. */ /** @internal Used by ObservationTurn. */ getOtherThreadsContext(resourceId: string, currentThreadId: string): Promise; /** * Emit debug event and stream progress for UI feedback. */ emitProgress(opts: { record: ObservationalMemoryRecord; pendingTokens: number; threshold: number; effectiveObservationTokensThreshold: number; currentObservationTokens: number; writer?: ProcessorStreamWriter; stepNumber: number; threadId: string; resourceId?: string; }): Promise; /** * Get the current observation status for a thread/resource. * * Loads unobserved messages from storage, counts tokens, and checks against * configured thresholds. Returns a comprehensive status object that tells the * caller what actions are needed. * * This is a pure read operation with no side effects. * * @example * ```ts * const status = await om.getStatus({ threadId }); * if (status.shouldObserve) { * await om.observe({ threadId }); * } else if (status.shouldBuffer) { * await om.buffer({ threadId }); * } * if (status.shouldReflect) { * await om.reflect(threadId); * } * ``` */ getStatus(opts: { threadId: string; resourceId?: string; messages?: MastraDBMessage[]; }): Promise<{ record: ObservationalMemoryRecord; pendingTokens: number; threshold: number; effectiveObservationTokensThreshold: number; unbufferedPendingTokens: number; shouldObserve: boolean; shouldBuffer: boolean; shouldReflect: boolean; bufferedChunkCount: number; bufferedChunkTokens: number; canActivate: boolean; asyncObservationEnabled: boolean; asyncReflectionEnabled: boolean; scope: 'resource' | 'thread'; }>; /** * Finalize the observation lifecycle: activate any remaining buffered chunks, * then observe if the threshold is crossed. * * Call this at the end of a conversation, session, or turn sequence to ensure * no buffered observations are left orphaned and the observation cursor is * advanced. Produces a clean terminal state (no pending chunks, cursor up to date). * * @example * ```ts * // After all turns are complete * const result = await om.finalize({ threadId }); * // result.activated: true if buffered chunks were promoted * // result.observed: true if a full observation pass ran * ``` */ finalize(opts: { threadId: string; resourceId?: string; messages?: MastraDBMessage[]; }): Promise<{ activated: boolean; observed: boolean; reflected: boolean; record: ObservationalMemoryRecord; }>; /** * Return only the messages that haven't been fully observed yet. * * Use this to prune observed messages from an in-memory message array, * preventing unbounded context growth across steps in a multi-step loop. * This is the array-based equivalent of the processor's `cleanupObservedContext()`. * * @example * ```ts * // In a prepareStep hook, prune before sending to the model * messages = await om.pruneObserved({ threadId, messages }); * ``` */ pruneObserved(opts: { threadId: string; resourceId?: string; messages: MastraDBMessage[]; }): Promise; /** * Load unobserved messages from storage for a thread/resource. * * Fetches the OM record, queries storage for messages after the * lastObservedAt cursor, then applies part-level filtering so * partially-observed messages only include their unobserved parts. * * Use this when you need to load stored conversation history that * hasn't been observed yet (e.g. in a stateless gateway proxy that * only receives the latest message from the HTTP request). */ loadUnobservedMessages(opts: { threadId: string; resourceId?: string; }): Promise; /** * Create a buffered observation chunk without merging into active observations. * * Loads unobserved messages from storage (filtered by the buffer cursor to avoid * re-buffering), calls the observer LLM, and stores the result as a pending * buffered chunk in the DB. The chunk can later be merged into active observations * via `activate()`. * * This is a synchronous (awaited) operation — the caller decides whether to * `await` it or fire-and-forget. All state lives in storage; no in-process * coordination is needed. * * @example * ```ts * const status = await om.getStatus({ threadId }); * if (status.shouldBuffer) { * await om.buffer({ threadId }); * } * ``` */ /** @internal Used by ObservationStep. */ buffer(opts: { threadId: string; resourceId?: string; messages?: MastraDBMessage[]; /** The freshly-counted pending token count from the caller. If not provided, * falls back to record.pendingMessageTokens (which may be stale). */ pendingTokens?: number; /** Pre-loaded record to skip the initial getOrCreateRecord() fetch. * When called fire-and-forget, passing the record avoids an async gap * before lastBufferedBoundary is set. */ record?: ObservationalMemoryRecord; writer?: ProcessorStreamWriter; requestContext?: RequestContext; observabilityContext?: ObservabilityContext; /** Called with the final candidate messages after cursor filtering, before the observer runs. * Use this to seal messages in a live MessageList and persist them to storage. */ beforeBuffer?: (candidates: MastraDBMessage[]) => Promise; }): Promise<{ buffered: boolean; record: ObservationalMemoryRecord; }>; /** * Activate buffered observation chunks by merging them into active observations. * * This is a pure storage operation — no LLM call. It reads buffered chunks from * the DB and swaps them into active observations via `storage.swapBufferedToActive()`. * * Call this after `buffer()` has created chunks, typically at the start of a new * turn or when `getStatus().canActivate` is true. * * @example * ```ts * const status = await om.getStatus({ threadId }); * if (status.canActivate) { * const result = await om.activate({ threadId }); * if (result.activated) { * console.log('Activated', result.activatedMessageIds?.length, 'message observations'); * } * } * ``` */ /** @internal Used by ObservationStep. */ activate(opts: { threadId: string; resourceId?: string; /** When true, skip activation if pending tokens are below the observation threshold. */ checkThreshold?: boolean; /** Messages to use for threshold check (in-memory). If omitted, loads from storage. */ messages?: MastraDBMessage[]; /** Current actor model for provider-change activation checks. */ currentModel?: ObservationModelContext; /** Stream writer for emitting activation markers to the UI. */ writer?: ProcessorStreamWriter; /** MessageList for persisting activation markers on the last assistant message. */ messageList?: MessageList; }): Promise<{ activated: boolean; record: ObservationalMemoryRecord; activatedMessageIds?: string[]; }>; /** * Manually trigger observation. * * When `messages` is provided, those are used directly (filtered for unobserved) * instead of reading from storage. This allows external systems (e.g., opencode) * to pass conversation messages without duplicating them into Mastra's DB. * * Returns a result indicating whether observation and/or reflection occurred, * along with the updated record. */ observe(opts: { threadId: string; resourceId?: string; messages?: MastraDBMessage[]; hooks?: ObserveHooks; requestContext?: RequestContext; writer?: ProcessorStreamWriter; observabilityContext?: ObservabilityContext; }): Promise<{ observed: boolean; reflected: boolean; record: ObservationalMemoryRecord; }>; /** * Manually trigger reflection with optional guidance prompt. * * @example * ```ts * // Trigger reflection with specific focus * await om.reflect(threadId, resourceId, * "focus on the authentication implementation, only keep minimal details about UI styling" * ); * ``` */ reflect(threadId: string, resourceId?: string, prompt?: string, requestContext?: RequestContext, observabilityContext?: ObservabilityContext): Promise<{ reflected: boolean; record: ObservationalMemoryRecord; usage?: ObserveHookUsage; }>; /** * Get current observations for a thread/resource */ getObservations(threadId: string, resourceId?: string): Promise; /** * Get current record for a thread/resource */ getRecord(threadId: string, resourceId?: string): Promise; /** * Update per-record config overrides for observation and/or reflection thresholds. * The provided config is deep-merged into the record's `_overrides` key, * so you only need to specify the fields you want to change. * * Overrides that violate buffering invariants (e.g. messageTokens below * bufferTokens) are silently ignored at read time — the helpers fall back * to the instance-level config. * * @example * ```ts * await om.updateRecordConfig('thread-1', undefined, { * observation: { messageTokens: 2000 }, * reflection: { observationTokens: 8000 }, * }); * ``` */ updateRecordConfig(threadId: string, resourceId: string | undefined, config: Record): Promise; /** * Get observation history (previous generations) */ getHistory(threadId: string, resourceId?: string, limit?: number, options?: ObservationalMemoryHistoryOptions): Promise; /** * Clear all memory for a specific thread/resource */ clear(threadId: string, resourceId?: string): Promise; /** * Get the underlying storage adapter */ getStorage(): MemoryStorage; /** * Get the token counter */ getTokenCounter(): TokenCounter; /** * Get current observation configuration */ getObservationConfig(): ResolvedObservationConfig; /** * Get current reflection configuration */ getReflectionConfig(): ResolvedReflectionConfig; /** * Get the message history instance for marker persistence. */ getMessageHistory(): MessageHistory; /** * Get whether thread IDs should be obscured in observations. */ getObscureThreadIds(): boolean; /** * Begin a new observation turn — the high-level API for managing the * observe/buffer/activate/reflect lifecycle across agentic loop steps. * * @example * ```ts * const turn = om.beginTurn({ threadId, resourceId, messageList }); * await turn.start(memory); * * const step0 = turn.step(0); * const ctx = await step0.prepare(); * // ... agent generates ... * * await turn.end(); * ``` */ beginTurn(opts: { threadId: string; resourceId?: string; messageList: MessageList; observabilityContext?: ObservabilityContext; hooks?: ObservationTurnHooks; }): ObservationTurn; } //# sourceMappingURL=observational-memory.d.ts.map