import { Optional } from 'utility-types'; import { Client } from '@modelcontextprotocol/sdk/client/index.js'; /** * This class helps organize groups of tokenized text along with removing items when the window is full. */ declare class TokenWindow { /** Token window size */ size: number; /** Token groups */ groups: TokenWindowGroup[]; /** Create a new group */ createGroup(id: string): TokenWindowGroup; /** Get a group */ group(id: string): TokenWindowGroup | undefined; /** Counts tokens in the specified text */ static countTokensInText(text: string): number; /** Calculate current tokens in all groups */ countTokens(): number; /** Remove overflow from all groups. */ removeOverflow(): void; /** Remove one overflow item. Returns null if no items were able to be removed. */ private removeOneItem; } /** A token group. */ declare class TokenWindowGroup { /** Group ID */ id: string; /** List of items */ items: TokenWindowGroupItem[]; /** * Weight controls how many items from this group should be kept in relation to the entire window. For example if all * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1, * that group will be allowed to keep double the amount of items. */ weight: number; /** Current total token count, computed automatically. Don't update this value manually. */ tokenCount: number; /** Group item separator. This text is added in between each item in the token window. */ separator: string; /** Token count padding added to each item. */ private itemPadding; /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */ setItemPadding(padding: number): this; /** Sort function */ private sortFunction; /** Set sort function */ sortBy(sortFunction: (a: TokenWindowGroupItem, b: TokenWindowGroupItem) => number): this; /** Set separator. This text is added in between each item in the token window. */ setSeparator(separator: string): this; /** * Set weight. Weight controls how many items from this group should be kept * in relation to the entire window. For example if all groups have a weight * of 1, each group remove items equally if full. If one has a weight of 2 * while the rest are 1, that group will be allowed to keep double the * amount of items. */ setWeight(weight: number): this; /** Recalculate all tokens. Note this may take a while. */ recalculateTokens(): void; /** Add an item to the group */ add(item: string | TokenWindowGroupItemParams): TokenWindowGroupItem; /** Manually remove an item */ remove(itemId: string): boolean; /** Get all items as a string */ getAllAsString(): string; /** Get all items. Doesn't return disabled items. */ getAll(): TokenWindowGroupItem[]; /** Remove all items from this group */ empty(): void; } /** Token group item section types */ declare enum TokenWindowGroupItemSectionType { /** Text items represent plain text. */ Text = "text", /** Tool call items represent a tool call requested by the AI. */ ToolCall = "tool_call", /** Tool result items represent the result of a tool call. */ ToolResult = "tool_result", /** Thinking section */ Thinking = "thinking", /** Other item types */ Other = "other" } /** Token group item */ interface TokenWindowGroupItem { /** Each item must have a unique ID. */ id: string; /** True if this item should never be removed */ cannotRemove?: boolean; /** Sorting order. If not specified, uses dateAdded instead. */ sortOrder: number; /** Date this item was added */ dateAdded: number; /** Token count in the content */ tokenCount: number; /** This is the actual item that gets sent to the APIs. It will be in whatever format is required for the associated API. */ data?: DataType; /** If disabled, this item will not be included and will not add to the token count. */ disabled?: boolean; /** Message source, ie was this message created by the user, or by the AI? */ source: 'user' | 'assistant'; /** * The string content of the item, or a summary of it. This is an autogenerated field, updated when the item is added/updated in the token window group. * If `data` is a string, this will be the same as `data`. If `data` is more complex, this will be a text representation of all items in the `sections` array. * * Note: When the response contains text and tool calls, this will add in a summary of what's happening. For better displaying, use the `sections` array. */ text?: string; /** Message sections */ sections?: TokenWindowGroupItemSection[]; /** If this message was generated by the AI, this contains the token usage for this message. */ usage?: { /** Number of tokens consumed from the data passed to the AI */ inputTokens: number; /** Number of input tokens that were used in token caching */ cachedInputTokens: number; /** Number of tokens consumed by the AI generating output */ outputTokens: number; /** Total token usage */ totalTokens: number; }; /** True if this item is still being streamed */ streamingInProgress?: boolean; } /** A section of a message returned by the AI */ interface TokenWindowGroupItemSection { /** Section type */ type: TokenWindowGroupItemSectionType; /** Text content when this section represents text or thinking */ text?: string; /** The raw tool name the AI requested to be called. */ toolName?: string; /** The ID of the KB action this tool call maps to, if any */ toolKbID?: string; /** The name of the KB action this tool call maps to, if any */ toolKbName?: string; /** The parameters the AI requested to be sent to the tool. Only available if type == 'tool_call' */ toolParameters?: any; /** Successful response of the tool call. Will be null if toolErrorResponse is set. */ toolSuccessResponse?: any; /** Error response of the tool call. Will be null if toolSuccessResponse is set. */ toolErrorResponse?: string; /** Tool call ID. This can be used to match a tool call request with it's result. */ toolCallInstanceID?: string; /** True if this tool call should be hidden in the UI */ toolCallHiddenInUI?: 'always' | 'after-complete'; } /** Token window group item input, without the autogenerated fields */ type TokenWindowGroupItemParams = Omit, 'id' | 'dateAdded' | 'sortOrder' | 'text' | 'source' | 'sections'>, 'tokenCount'>; /** * Allows an MCP server to be used as a knowledge source for IntelliWeave. */ declare class MCPKnowledgeClient { /** MCP client */ client?: Client; /** All tools discovered on the MCP server. Only available after connect() has completed. */ tools: Awaited>['tools']; /** All toold discovered, mapped to IntelliWeave knowledge base actions */ iwActions: KnowledgeBaseItem[]; /** Statistics */ stats: { toolsCalled: number; }; /** Configuration */ config: { /** Source ID */ id?: string; /** URL to the MCP server endpoint */ baseURL?: string; /** Custom connection function. If specified, baseURL is optional. */ connect?: () => Promise; /** * The name of the tool which provides knowledge searching. If specified, the search() will exclude this function and instead * call it and show returned results. If not specified, the search() will just return all tools. */ searchToolName?: string; /** Keep search function available for the AI to use. */ searchToolVisible?: boolean; /** Use the IntelliWeave proxy */ proxy?: { /** If true, will send requests via the IntelliWeave MCP proxy */ enabled?: boolean; /** The URL of the proxy server, defaults to the standard IntelliWeave proxy */ url?: string; /** IntelliWeave API key */ apiKey?: string; }; /** Pass extra headers to the MCP server */ headers?: Record; }; /** Constructor */ constructor(config: MCPKnowledgeClient['config']); /** In-progress connection attempt */ private connectionPromise?; /** Connect to the client */ connect(): Promise>; connectInternal(): Promise>; /** Disconnect from server */ disconnect(): Promise; /** Fetch list of tools from the MCP server */ private fetchTools; /** Cache last search result */ lastSearchQuery: string; lastSearchResults: KnowledgeBaseItem[]; /** Perform a search query */ search(query: string): Promise; /** Perform search using the configured search function */ private performSearchCall; /** Perform tool call. */ private performToolCall; } // ================================================================================================== // JSON Schema Draft 07 // ================================================================================================== // https://tools.ietf.org/html/draft-handrews-json-schema-validation-01 // -------------------------------------------------------------------------------------------------- /** * Primitive type * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1 */ type JSONSchema7TypeName = | "string" // | "number" | "integer" | "boolean" | "object" | "array" | "null"; /** * Primitive type * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1 */ type JSONSchema7Type = | string // | number | boolean | JSONSchema7Object | JSONSchema7Array | null; // Workaround for infinite type recursion interface JSONSchema7Object { [key: string]: JSONSchema7Type; } // Workaround for infinite type recursion // https://github.com/Microsoft/TypeScript/issues/3496#issuecomment-128553540 interface JSONSchema7Array extends Array {} /** * Meta schema * * Recommended values: * - 'http://json-schema.org/schema#' * - 'http://json-schema.org/hyper-schema#' * - 'http://json-schema.org/draft-07/schema#' * - 'http://json-schema.org/draft-07/hyper-schema#' * * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-5 */ type JSONSchema7Version = string; /** * JSON Schema v7 * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01 */ type JSONSchema7Definition = JSONSchema7 | boolean; interface JSONSchema7 { $id?: string | undefined; $ref?: string | undefined; $schema?: JSONSchema7Version | undefined; $comment?: string | undefined; /** * @see https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-00#section-8.2.4 * @see https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#appendix-A */ $defs?: { [key: string]: JSONSchema7Definition; } | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1 */ type?: JSONSchema7TypeName | JSONSchema7TypeName[] | undefined; enum?: JSONSchema7Type[] | undefined; const?: JSONSchema7Type | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.2 */ multipleOf?: number | undefined; maximum?: number | undefined; exclusiveMaximum?: number | undefined; minimum?: number | undefined; exclusiveMinimum?: number | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.3 */ maxLength?: number | undefined; minLength?: number | undefined; pattern?: string | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.4 */ items?: JSONSchema7Definition | JSONSchema7Definition[] | undefined; additionalItems?: JSONSchema7Definition | undefined; maxItems?: number | undefined; minItems?: number | undefined; uniqueItems?: boolean | undefined; contains?: JSONSchema7Definition | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.5 */ maxProperties?: number | undefined; minProperties?: number | undefined; required?: string[] | undefined; properties?: { [key: string]: JSONSchema7Definition; } | undefined; patternProperties?: { [key: string]: JSONSchema7Definition; } | undefined; additionalProperties?: JSONSchema7Definition | undefined; dependencies?: { [key: string]: JSONSchema7Definition | string[]; } | undefined; propertyNames?: JSONSchema7Definition | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.6 */ if?: JSONSchema7Definition | undefined; then?: JSONSchema7Definition | undefined; else?: JSONSchema7Definition | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.7 */ allOf?: JSONSchema7Definition[] | undefined; anyOf?: JSONSchema7Definition[] | undefined; oneOf?: JSONSchema7Definition[] | undefined; not?: JSONSchema7Definition | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-7 */ format?: string | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-8 */ contentMediaType?: string | undefined; contentEncoding?: string | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-9 */ definitions?: { [key: string]: JSONSchema7Definition; } | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-10 */ title?: string | undefined; description?: string | undefined; default?: JSONSchema7Type | undefined; readOnly?: boolean | undefined; writeOnly?: boolean | undefined; examples?: JSONSchema7Type | undefined; } /** * Register knowledge base sources and perform searches. */ declare class KnowledgeBase { /** Reference to the AI */ ai?: IntelliWeave; /** Knowledge base sources */ _sources: KnowledgeBaseSource[]; /** List of sources returned from the last window event */ _windowSources: KnowledgeBaseSource[]; /** List of last search results */ lastResults: KnowledgeBaseItem[]; /** Individual knowledge base entries added manually by the application */ manualEntries: KnowledgeBaseItem[]; /** If true, allows using globally defined sources from the browser window events */ allowWindowSources: boolean; /** If true, allows using knowledge specified in the global configuration object */ allowGlobalConfigSources: boolean; /** If true, allows the AI to search the knowledge base. If false, essentially disables RAG lookup. */ allowRagSearch: boolean; /** Constructor */ constructor(ai: IntelliWeave); /** Ensures the internal knowledge is set correctly */ ensureInternalKnowledge(): void; /** Clears all knowledge back to the default */ reset(): void; /** * Register a new knowledge base source. You can pass either just a query function, or an ID and a query function. * * @param idOrQuery The ID of the source or a function that performs the query if no ID is provided * @param query The function that performs the query. Can be undefined if the first param is a function. */ registerSource(idOrQuery: string | KnowledgeBaseSource['query'], query?: KnowledgeBaseSource['query']): string; /** Remove a knowledge base source */ removeSource(idOrQuery: string | KnowledgeBaseSource['query']): void; /** Add a knowledge base item. */ addEntry(item: KnowledgeBaseItem): void; /** Remove a knowledge base item. */ removeEntry(id: string): void; /** Get all knowledge base sources */ get sources(): KnowledgeBaseSource[]; /** Search the knowledge base */ search(query: string): Promise; /** Get the KB entry with the specified ID. Requires the item to have been fetched in the last knowledge base query. */ getCachedEntry(id: string): KnowledgeBaseItem | undefined; /** Create and register an external knowledge base source from a URL */ registerSourceFromURL(url: string, id?: string): void; /** Clone this instance */ clone(newIW: IntelliWeave): KnowledgeBase; /** Registers an MCP server as a knowledge base source */ registerMCPSource(config: MCPKnowledgeClient['config']): MCPKnowledgeClient; } /** Knowledge fetcher */ type KnowledgeFetcher = (query: string) => (KnowledgeBaseItem[] | Promise); /** Knowledge base source */ interface KnowledgeBaseSource { /** Source ID */ id?: string; /** Source name */ name?: string; /** Optional description */ description?: string; /** Optional icon URL */ icon?: string; /** If true, this source will not be queried. */ disabled?: boolean; /** Source query function. This function should return a list of knowledge base entries that optionally match the query. */ query?: KnowledgeFetcher; /** URL query for remote sources */ url?: string; /** Pre-packaged knowledge base entries */ entries?: KnowledgeBaseItem[]; /** Remote knowledge server type (default is 'iw') */ backendType?: 'mcp' | 'iw'; /** If using MCP, this is the name of the tool to use to search for knowledge */ mcpSearchToolName?: string; } /** Knowledge base item */ interface KnowledgeBaseItem { /** Item ID */ id?: string; /** OpenAI-compatible internal ID. This is set automatically and should not be used. */ _functionID?: string; /** Item type. */ type: 'info' | 'action' | 'tour' | 'input-event' | 'output-event'; /** Item name */ name: string; /** Item tags. Helps with search optimization. */ tags?: string; /** Item content */ content: string; /** If true, this item will always be returned from all search results. */ isContext?: boolean; /** If true, this item will not be visible to the AI. */ disabled?: boolean | ((ai: IntelliWeave) => boolean); /** List of parameters for an action function. Can either use IW's format, or a JSON Schema object. */ parameters?: KnowledgeBaseActionParameterSchema; /** * Item action. The parameters are defined in `parameters`. The response is stringified and sent to the AI. * You can return any JSON-serializable object. You can also return a string describing to the AI the action * that was performed. If an error is thrown, the AI will respond appropriately to the user. */ action?: (input: any, ai: IntelliWeave) => (any | Promise); /** If specified, will hide this action from the default UI after the AI finishes running it, or always hide it */ hideActionInUI?: 'always' | 'after-complete'; /** Attachments such as images, etc */ attachments?: KnowledgeBaseItemAttachment[]; } /** Knowledge base item attachment, such as an image, file, etc. */ interface KnowledgeBaseItemAttachment { /** UUID */ uuid: string; /** Attachment mime type */ mimeType: string; /** File name */ name: string; /** Full URL to access the file. This is required for the AI to be able to see the attachment. */ url?: string; /** UNIX timestamp (milliseconds since epoch) when the file was added */ dateAdded?: number; /** Internal path to where the file is stored */ path?: string; /** File size */ size?: number; } /** Parameter definition used by IntelliWeave */ interface IntelliWeaveParameterDefinition { name: string; type: 'string' | 'boolean' | 'number'; description: string; } /** Tool call input schema. Can either use IW's format, or a JSON Schema object */ type KnowledgeBaseActionParameterSchema = JSONSchema7 | IntelliWeaveParameterDefinition[]; /** * Speech output * * - event `speechfilter` - Allows modification or cancellation of speech * - event `speechstart` - When the speech starts * - event `speechend` - When the speech ends */ declare class WebWeaverSpeechOutput extends EventTarget { /** Reference to the AI */ private ai?; /** Automatically speak output from the AI */ autoSpeak: boolean; /** If enabled, connections will be pre-emptively opened to speed up text-to-speech response times, if possible */ preemptiveConnection: boolean; /** Constructor */ constructor(ai: IntelliWeave); /** Message IDs we've processed */ private processedMessages; /** Called when the AI responds */ onOutputFromAI(e: CustomEvent): void; /** Current player vars */ private currentPlayerVolume?; private currentPlayer?; /** The audio analyser node */ private analyserNode?; /** The audio analyser buffer */ private analyserBuffer?; /** @private Maximum volume heard this session */ private maxVolumeHeard; /** Get current (realtime) audio output volume level, from 0 to 1 */ get volumeLevel(): number; /** Queued messages to speak next */ private _queuedText; /** Speak the text */ speak(text: string): Promise; private _queueActive; _runQueue(): Promise; /** ElevenLabs connection pre-cache */ private _elevenLabsPrecachedConnection?; private _getElevenLabsConnection; private _speakWithLock; /** True if currently playing audio */ get isSpeaking(): boolean; /** Interrupt the previously playing audio */ interrupt(): Promise; /** Called when the speech output ends */ onSpeechEnd(): void; } /** * An AudioWorklet module that records data from input and sends it to the host. * * - event `data` - Fired when data is available to be read. */ declare class PCMReceiverNode extends AudioWorkletNode { /** @type {'int16' | 'float32'} The output data format */ format: string; /** Register worklet with an audio context */ static registerModule(context: AudioContext): Promise; /** * Creates a new PCMRecorderNode ready to receive PCM data. * * @param context - The audio context to use. * @param sampleRate - The sample rate of the output data stream. * @param format - The format of the output data stream. * @param bufferSize - The size of the output buffer in elements (Int16Array or Float32Array items, depending on `format`). */ constructor(context: AudioContext, sampleRate: number, format: 'int16' | 'int64' | 'float32', bufferSize: number); /** @private Called when a message is received from the worklet */ onWorkletMessage(e: MessageEvent): void; /** Called when data is received */ onData(buffer: Float32Array | Int16Array | BigInt64Array): void; } /** * An AudioNode which sends events for when speech is detected * * - event `speechstart` - Fired when speech is detected * - event `speechend` - Fired when speech ends */ declare class VoiceDetectionNode extends PCMReceiverNode { /** True if voice is currently being detected */ isVoiceActive: boolean; /** True if voice is active but may be ending soon */ get isVoicePossiblyEnding(): boolean; /** Last date that voice was detected */ lastVoiceActiveDate: number; /** Amount of time to wait after voice detection to detect that it has ended */ voiceEndTimeout: number; /** Detection sensitivity, if the detection model outputs a number bigger than this it will be considered voice */ sensitivity: number; /** Sensitivity threshold to end speaking */ sentivityEnd: number; /** VAD model */ static vadModelURL: string; /** Loaded VAD model */ private vad?; /** Sample rate */ get sampleRate(): 16000 | 8000; /** Number of samples */ get numberOfSamples(): number; /** Number of sample chunks */ get numberOfSampleChunks(): number; /** Output buffer size */ get outputBufferSize(): number; /** True if the VAD model has been loaded */ get isModelLoaded(): boolean; /** The time when to next reset the VAD model */ nextVadReset: number; /** The current probability of active voice */ currentProbability: number; /** Constructor */ constructor(audioContext: AudioContext); /** Start loading */ loadModel(): Promise; private _lastVoiceActive; /** Called when data is received */ onData(buffer: Float32Array): Promise; /** Called when speech is detected */ onSpeechStart(): void; /** Called when speech ends */ onSpeechEnd(): void; } /** * An AudioNode which isolates speech and outputs the audio data. Since we are reusing the VAD model node, * output data is in 8000Hz Float32 format. * * - event `voicedata` - Fired when a chunk of voice is detected. `data` contains the recorded chunk of voice in a Float32Array. * - event `voicedataend` - Fired when this chunk of voice ends. `data` contains an array of Float32Array containing the entirety of the recorded voice. */ declare class VoiceChunkOutputNode extends VoiceDetectionNode { /** Stored buffers */ buffers: Float32Array[]; /** Recorded audio chunks with voice in it */ recordedBuffers: Float32Array[]; /** Last active state */ _voiceRecording: boolean; /** Amount of audio data in the buffer, in seconds */ get bufferDuration(): number; /** Amount of data to keep from before the user started speaking */ backBufferDurationSeconds: number; /** Called when data is received */ onData(buffer: Float32Array): Promise; /** Called when a chunk of voice is recorded */ onVoiceChunk(buffer: Float32Array): void; /** Called when the voice recording ends */ onVoiceEnd(buffers: Float32Array[]): void; } /** * This AudioNode uses OpenAI's Whisper model to transcribe spoken speech to text. * * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text. */ declare class OpenAITranscriptionNode extends VoiceChunkOutputNode { /** OpenAI API key */ apiKey: string; /** Pending buffers */ private pendingBuffers; /** Last request */ private lastRequestAbortController?; /** True if currently transcribing */ isTranscribing: boolean; /** Constructor */ constructor(audioContext: AudioContext, apiKey: string); /** Called when the voice recording ends */ onVoiceEnd(buffers: Float32Array[]): Promise; /** Called when a transcription is ready */ onVoiceTranscription(text: string): void; } /** * This AudioNode uses IntelliWeave's servers to transcribe spoken speech to text. * * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text. */ declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode { /** Debug: Export each recording as a wav file for download */ static debugExportWav: boolean; /** Server address for transcription */ apiAddress: string; /** IntelliWeave API key */ apiKey: string; /** WebSocket connection */ private ws?; /** True if currently transcribing */ isTranscribing: boolean; /** WebSocket shutdown timer */ private shutdownTimer?; /** Constructor */ constructor(audioContext: AudioContext, apiKey: string); /** Called when a voice chunk is received */ onVoiceChunk(buffer: Float32Array): Promise; /** Called when the voice recording ends */ onVoiceEnd(buffers: Float32Array[]): Promise; /** Called when a transcription is ready */ onVoiceTranscription(text: string): void; /** Called when the WebSocket is closed */ onSocketClose(): void; } /** * This AudioNode uses ElevenLabs to transcribe spoken speech to text. * * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text. */ declare class ElevenLabsTranscriptionNode extends VoiceChunkOutputNode { /** ElevenLabs API key */ apiKey: string; /** ElevenLabs stream connection */ private connection?; /** True if currently transcribing */ isTranscribing: boolean; /** WebSocket shutdown timer */ private shutdownTimer?; /** Constructor */ constructor(audioContext: AudioContext, apiKey: string); /** Called when a voice chunk is received */ onVoiceChunk(buffer: Float32Array): Promise; /** Start reading the stream */ private startReading; /** Called when the voice recording ends */ onVoiceEnd(buffers: Float32Array[]): Promise; /** Called when a transcription is ready */ onVoiceTranscription(text: string): void; } /** * Handles speech recognition from the microphone * * - event `speechstart` - We have detected the user started speaking * - event `speechend` - We have detected the user stopped speaking * - event `speech` - Speech recognition result * - event `start` - Speech recognition started * - event `end` - Speech recognition ended */ declare class WebWeaverSpeechRecognition extends EventTarget { /** Reference to the AI */ ai?: IntelliWeave; /** True if recognition is running */ isRunning: boolean; /** The audio analyser node */ private analyserNode?; /** The audio analyser buffer */ private analyserBuffer?; /** The microphone stream */ micStream?: MediaStream; /** Recording start time for tracking duration */ private recordingStartTime?; /** Returns true if speech recognition is supported by this persona and browser */ get isSupported(): boolean; /** Currently active voice detection node */ voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode | ElevenLabsTranscriptionNode; /** Constructor */ constructor(ai: IntelliWeave); private _skipEvents; /** Start recognition */ start(): Promise; /** Stop recognition */ stop(): void; /** @private Maximum volume heard this session */ maxVolumeHeard: number; /** Get current (realtime) microphone volume level, from 0 to 1 */ get volumeLevel(): number; /** True if currently detecting words being spoken */ get wordsCurrentlyBeingSpoken(): boolean; /** True if currently transcribing voice to text */ get isTranscribing(): boolean; /** Called when speech has been recorded */ onTranscription(e: CustomEvent): void; /** Called to reset the speech recognizer */ reset(): Promise; } /** Handles creating and managing the AudioContext */ declare class AudioSystem { /** Reference to the AI */ private ai?; /** The speech recognition module. */ speechRecognition: WebWeaverSpeechRecognition; /** The speech output module. */ speechOutput: WebWeaverSpeechOutput; /** The audio context */ context?: AudioContext; /** List of active named locks */ locks: string[]; /** Returns true if speech recognition and output is supported by this persona and browser */ static get isSupported(): boolean; /** Constructor */ constructor(ai: IntelliWeave); /** Register worklets */ static registerModules(context: AudioContext): Promise; /** Create a named lock to enable the audio system */ beginAccess(namedLock: string): Promise; /** Stop accessing the audio system */ endAccess(namedLock: string): void; } /** * This class allows you to use the AI as a logic engine, data extractor, etc. */ declare class AILogic { /** Reference to the AI */ private ai?; /** Constructor */ constructor(ai: IntelliWeave); /** Ask the AI a yes/no question associated with the specified data. Data must be JSON-serializable, or a string of any kind of data. */ boolean(config: IntelliWeaveInstructConfig): Promise; /** Ask the AI to select a choice from a list of options. */ choose(config: IntelliWeaveInstructConfig & { /** List of choices the AI can pick from. */ options: string[]; }): Promise; /** * Ask the AI to extract data from input data. The AI will return the extracted data. Possibly an array of multiple extractions. */ extract(config: IntelliWeaveInstructConfig & { /** Allow multiple items to be returned. If true, returns an array instead of an object. */ allowMultiple?: boolean; /** Fields to extract in each object. */ extractions: { /** Field name */ name: string; /** Field data type */ type: 'string' | 'number' | 'boolean' | 'date' | 'email' | 'phone' | 'address'; /** Describe to the AI what data to put in this field. */ description?: string; }[]; }): Promise; /** * Generate a Markdown document based on the data from the user. * * @param config Instruct config. * @returns A markdown document. */ generateMarkdown(config: Omit): Promise; /** * Perform an instruction. * * @param config Instruct config. * @returns The final response from the AI. */ instruct(config: IntelliWeaveInstructConfig): Promise; } /** Config for any instruct call */ interface IntelliWeaveInstructConfig { /** Instruction */ instruction: string; /** Input data or query to process */ data: any; /** Whether to allow the AI to use the knowledge base or not. If false, the AI will not use the knowledge base. */ allowKB?: boolean; /** Callback that will be called when streaming the response. Each call will contain the full text that has been generated so far. */ callback?: (txt: string) => void; } /** Chat config options */ interface ChatBaseConfig { /** API key */ apiKey: string; /** Provider ID */ providerID?: string; /** Endpoint URL if using a custom URL */ endpoint: string; /** LLM model to use */ model: string; /** System message to describe to the AI how to behave. */ systemMessage: string; /** User ID used to uniquely identify users in ChatGPT's API */ userID: string; /** If true, streams the text responses from the API */ stream: boolean; /** Amount of estimated tokens to keep when trimming */ maxTokens: number; /** Callback before the AI sends info to the LLM */ onBeforeMessageProcessing?: () => void; /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */ onAIMessage?: (output: TokenWindowGroupItemParams[], isPartial: boolean) => void; /** Callback when the AI starts performing an action */ onAIToolStart?: (toolName: string, input: any) => void; } /** Chat tool config */ interface ChatBaseToolConfig { /** Name of the tool, eg "perform_search" */ name: string; /** Description of the tool */ description: string; /** Parameters for the tool */ params: JSONSchema7; /** Callback function to process the tool */ callback: (params: any) => any; /** If true, this item can be removed if there's not enough context available. */ canRemove?: boolean; /** Knowledge base item this tool use represents */ kbItem?: KnowledgeBaseItem; } /** * API for interacting with chat APIs. */ declare class ChatBase< /** Format for messages in the token window */ DataType = any, /** Optional extended config */ ConfigFormat extends ChatBaseConfig = ChatBaseConfig> { /** ID */ id: string; /** Metadata */ metadata: any; /** Config */ config: ConfigFormat; /** The maximum tool calls in sequence the AI can make before an error is thrown. */ maxToolCallsPerMessage: number; /** Statistics */ stats: { /** Total tokens used this session */ tokensUsed: number; }; /** Token window management */ tokenWindow: TokenWindow; /** Token window group used for the context message */ get contextGroup(): TokenWindowGroup; /** Token window group used for tools / actions */ get toolGroup(): TokenWindowGroup; /** Token window group used for messages */ get messageGroup(): TokenWindowGroup; /** Get the API base after stripping out exact endpoints, or undefined for the default */ getBaseURL(): string | undefined; /** Constructor */ constructor(config: ConfigFormat); /** Send a message, and get the response as a string. */ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams[]) => void): Promise[]>; /** Add a user message to the message history */ addUserMessage(message: string): void; /** Add an assistant message to the message history */ addAssistantMessage(message: string): void; /** Helper to add a plain text item */ protected addTextMessage(text: string, source: 'user' | 'assistant', data: DataType): void; /** Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */ onBeforeIncomingMessage(message: DataType): void; /** Reset the conversation */ resetConversation(): void; /** Trim message list */ trimMessages(): Promise; /** Register a tool. */ registerTool(tool: ChatBaseToolConfig): TokenWindowGroupItem; /** Find a tool based on the AI-safe name */ protected findToolBySafeName(toolSafeName: string): ChatBaseToolConfig | undefined; /** Execute the specified tool. Throws an error if the tool is undefined. */ protected executeTool(tool: ChatBaseToolConfig | undefined, input: any): Promise; } /** Parses the response from `IntelliWeave.sendMessage()` or a collection of message items. */ declare class IntelliWeaveMessageParser { /** New messages produced after sendMessage() was called */ messages: TokenWindowGroupItemParams[]; /** Constructor */ constructor(items: TokenWindowGroupItemParams[]); /** Plain text output from the AI */ text(): string; /** Total token usage */ tokenUsage(): { cachedInputTokens: number; inputTokens: number; outputTokens: number; totalTokens: number; }; /** Component sections for display */ sections(): TokenWindowGroupItemParams['sections']; /** List all tool calls that took place */ toolCalls(): TokenWindowGroupItemSection[]; /** Find the response for a tool call */ toolResult(toolCallInstanceID: string): TokenWindowGroupItemSection | null; } /** Handles subagents. This allows your Persona to use other Personas as tools. */ declare class SubAgents { /** Reference to the main IntelliWeave instance */ ai: IntelliWeave; /** Constructor */ constructor(ai: IntelliWeave); /** Subagents */ subagents: SubAgentConfig[]; /** Cached subagents */ cachedSubagents: Record; /** Register a sub-agent */ register(config: SubAgentConfig): void; /** Unregister subagent */ remove(id: string): void; /** Run the subagent */ runQuery(config: SubAgentConfig, query: string): Promise; } /** Sub-agent config */ interface SubAgentConfig { /** ID of the sub-agent */ id: string; /** API key for the persona. If not specified, uses the same api key as the main agent. */ apiKey?: string; /** Name of the sub-agent */ name?: string; /** Instructions for the main agent to use this sub agent */ usageInstructions?: string; /** If true, will remove all Persona knowledge entries */ clearExistingKnowledge?: boolean; /** Disable RAG search for subagents. If true, only KB entries with isContext=true will be used. */ disableRagSearch?: boolean; /** Extra knowledge base sources for the sub-agent */ knowledge?: KnowledgeFetcher; /** Optional extra configuration for the subagent instance */ config?: Partial; /** Called when the subagent is loaded */ onAgentLoaded?: (agent: IntelliWeave) => Promise | void; } /** Built-in action flags for the persona */ interface BuiltInActionFlags { /** Allows the AI to display follow-up suggestions */ allowSuggestionButtons?: boolean; /** Allows the AI to open a new tab pointing to any URL */ allowOpenNewTab?: boolean; /** Allows the AI to perform an arbitrary HTTP request */ allowHttpRequests?: boolean; /** Allows the AI to set the path of the current page */ allowChangeRoute?: boolean; /** Allows the AI to craft an email and use mailto: link */ allowSendEmail?: boolean; /** Makes the AI only use information from knowledge bases, not general knowledge */ onlyUseKnowledgeBase?: boolean; } /** Persona config received from the hub */ interface WebWeaverGPTConfig { /** ID */ id: string; /** Chat API config */ model: ChatBaseConfig; /** If true, message history will be sent to the IntelliWeave hub for analysis */ analytics?: boolean; /** Persona name */ name?: string; /** Instructions to the AI */ instructions?: string; /** Introduction message, used in the automatic UI */ introductionMessage?: string; /** URL to the logo image to display in the chat UI */ logo?: string; /** Background color or gradient or image for the chat UI */ background?: string; /** Text color for the chat UI */ textColor?: string; /** Display mode: 'closed' (default - starts minimized) or 'open' (always open) */ displayMode?: 'closed' | 'open'; /** Layout preset: 'widget' (default) or 'fullscreen' */ layout?: 'widget' | 'fullscreen'; /** Positioning mode: 'fixed' (default - floats on page) or 'container' (fills parent container) */ positioningMode?: 'fixed' | 'container'; /** Horizontal position: 'left' or 'right' (default: 'right') - only used when positioningMode is 'fixed' */ positionX?: 'left' | 'right'; /** Vertical position: 'top' or 'bottom' (default: 'bottom') - only used when positioningMode is 'fixed' */ positionY?: 'top' | 'bottom'; /** Horizontal offset from edge in pixels (default: 20) - only used when positioningMode is 'fixed' */ offsetX?: number; /** Vertical offset from edge in pixels (default: 20) - only used when positioningMode is 'fixed' */ offsetY?: number; /** Identifier of an external app or service which manages this persona, if any. (eg. "chatterly") */ managedBy?: string; /** Voice information */ voice?: { /** Provider ID */ providerID: string; /** API key for the provider */ apiKey: string; /** Voice ID within the provider */ voiceID: string; }; /** Transcription API information */ transcription?: { /** Provider ID */ providerID: string; /** API key for the provider */ apiKey: string; /** Optional URL for the transcription service */ url?: string; }; /** Knowledge base sources */ knowledge?: KnowledgeBaseSource[]; /** MCP servers */ mcpServers?: MCPKnowledgeClient['config'][]; /** Built-in action flags that are currently enabled */ flags?: BuiltInActionFlags; /** Allow custom chat provider */ onCreateProvider?: (config: ChatBaseConfig) => ChatBase; /** Subagents */ subagents?: SubAgentConfig[]; } /** Configuration for the IntelliWeave initialization */ interface IntelliWeaveConfig extends Partial { /** Optionally allows you to specify a custom hub API URL. */ hubAPI?: string; /** A unique ID to identify this user. Defaults to a generated userID if not specified. */ userID?: string; } /** * IntelliWeave interface, loads a Persona from the hub and allows you to interact with it. This is the main entry point into the IntelliWeave * SDK when not using the built-in UI. * * - event `load` - Fired when the AI is loaded with a new configuration. * - event `error` - Fired when an error occurs during loading. * - event `webweaver_loaded` - Fired when the AI is loaded with a new configuration. This is a global event that is fired on the window object. * - event `webweaver_error` - Fired when an error occurs during loading. This is a global event that is fired on the window object. * - event `input` - Fired when the user sends a message to the AI. * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isPartial` is true, the message is incomplete and will be followed by more events. * - event `toolstart` - Fired when the AI starts performing an action. * - event `tool` - Fired when the AI finishes performing an action. */ declare class IntelliWeave extends EventTarget { /** App version */ static version: string; /** Built-in actions version - increment this when adding new actions */ static builtInActionsVersion: string; /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */ onAIMessage?: (messages: TokenWindowGroupItemParams[], isPartial: boolean) => void; /** Callback when the AI starts performing an action */ onAIToolStart?: ChatBaseConfig['onAIToolStart']; /** Current conversation ID */ conversationID: string; /** Knowledge database interface */ knowledgeBase: KnowledgeBase; /** Subagent interface */ subAgents: SubAgents; /** Last KB search that was performed */ private _lastKBsearch; /** If set, the next time a request is made this is the KB result items that will be used, once-off. */ private _nextRequestUseKBitems?; /** Timestamp when the last message processing started (for response time tracking) */ private _messageStartTime?; /** Config loaded from the API */ config?: WebWeaverGPTConfig; /** Available LLMs */ models: { id: string; config: ChatBaseConfig; priority?: number; }[]; /** Current LLM */ currentModel?: ChatBase; /** The audio system. Set this to a new instance of AudioSystem to enable audio support */ audio: AudioSystem | null; /** Silero VAD model blob */ vadModel?: Blob; /** True if the AI has loaded */ get loaded(): boolean; /** Built-in action flags from the config */ get flags(): BuiltInActionFlags; /** Get information about available built-in actions */ getBuiltInActionsInfo(): { version: string; availableActions: string[]; enabledFlags: BuiltInActionFlags; }; /** If loading fails, this stores the last error during load() */ error?: Error; /** IntelliWeave API key */ apiKey: string; /** Tracker for the current voice interaction */ _voiceTracker?: (text: string) => void; /** Logic engine */ logic: AILogic; /** A unique ID to identify this user. Defaults to a value stored in localStorage, or random. */ userID: string; /** URL of the IntelliWeave Hub API */ hubAPI: string; /** Set model and load data from an API key */ load(apiKey: string, config?: IntelliWeaveConfig): Promise; /** Set the current model */ setModel(id: string): void; private _lastSystemMsg; /** Get the system message prefix, before the KB entries are added */ getContextPrefix(): Promise; /** KB items added in the last run */ private lastKBItems; /** Get system message to send to the AI */ onBeforeMessageProcessing(): Promise; /** @private Process incoming message(s) from the AI. Can be used to respond to encoded actions in the text response. */ processIncomingMessage(messages: TokenWindowGroupItemParams[], isPartial?: boolean): void; /** True if currently processing a message */ isProcessing: boolean; /** Send a message, and get the response */ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams[]) => void): Promise; /** @private Called when the AI wants to run a KB action */ toolRunKBAction(kb: KnowledgeBaseItem, input: any): Promise; /** Submit an analytics event asynchronously. These events are for use in the Conversation Analytics code. For anonymous statistic analysis, use track() instead. */ private activeAnalyticsPromises; submitAnalyticsEvent(data: any): void; /** Wait for all analytics events to finish */ waitForAnalytics(): Promise; /** Reset the conversation */ resetConversation(): void; /** Insert a message as if the assistant has written it */ insertAssistantMessage(message: string): void; /** Export conversation state to a JSON object */ exportState(): { type: string; conversationID: string; messages: TokenWindowGroupItem[] | undefined; }; /** Import conversation state from JSON */ importState(state: any): void; /** Clone this instance without any message history */ clone(): IntelliWeave; /** Get all messages in the conversation history */ get messages(): TokenWindowGroupItem[]; } /** * Base class for custom Web Components, with some utility functions on it. * * Version 1.1 * Created by: jjv360 */ declare class BaseComponent extends HTMLElement { /** Attributes to monitor */ static observedAttributes: string[]; /** Component tag name */ static tagName: string; /** True if this component is already registered */ static _isRegistered: boolean; /** Contains the shadow DOM */ _shadow?: ShadowRoot; /** Get the root node for this element's shadow DOM */ get root(): ShadowRoot | undefined; /** Register the component */ static register(): void; /** Create a new element of this type */ static create(attrs?: any, content?: string): BaseComponent; /** Add the html for a component */ static add(attrs?: any, content?: string): string; /** Create an element of this kind. Same as document.createElement(), but also ensures the component is registered. */ static createElement(): HTMLElement; /** Called when the element is added to the DOM */ connectedCallback(): void; /** Called when the element is removed */ disconnectedCallback(): void; /** @abstract Get the HTML layout */ html(): string; /** @abstract Called before the HTML is created for the first time */ onBeforeCreate(): void; /** @abstract Called when the component is created */ onCreate(): void; /** @abstract Called when the UI should be updated with the current values */ onUpdate(): void; /** @abstract Called when the component is created */ onDestroy(): void; /** @abstract Called when an observed attribute changes */ attributeChangedCallback(name: string, oldValue: any, newValue: any): void; /** Internal attribute proxy */ private _attrProxy?; /** Helper for getting attributes */ get attr(): any; /** Internal for state */ private _stateProxy?; private _state; /** Helper for state variables. When anything inside here changes, onUpdate is called. */ get state(): any; /** Check if a child with the ID exists */ hasChild(id: string): boolean; /** Get a child element by it's ID */ child(id: string): BaseComponent; } /** Main embed web component */ declare class WebWeaverEmbed extends BaseComponent { /** Element tag name */ static tagName: string; /** Attributes we monitor for changes */ static observedAttributes: string[]; /** ChatGPT reference */ ai: IntelliWeave; /** Currently loaded configuration */ config: Partial; /** Responses the AI has suggested for the next user message */ suggestions: string[]; /** Previous open state for tracking UI open/close events */ private _previousOpenState; /** Session start time for tracking UI session duration */ private _uiSessionStartTime?; /** Constructor */ constructor(); /** Content */ html: () => string; /** On create */ onCreate(): void; private _lastLogo?; private _lastBackground?; private _lastTextColor?; private _lastDisplayMode?; private _lastLayout?; private _lastPersonaName?; private _lastHeaderLogo?; private _lastPositioningMode?; private _lastPositionX?; private _lastPositionY?; private _lastOffsetX?; private _lastOffsetY?; /** Apply persona-based color variants as CSS variables */ private applyPersonaColorVariants; /** Parse a color string to RGB (supports hex and rgb/rgba) */ private parseColorToRGB; /** Apply UI styles from config and attributes, prioritizing attributes */ private applyConfigStylesAndAttributes; /** Called on update */ onUpdate(): void; /** Called when the component is created */ onDestroy(): void; /** Called when the container is clicked */ onContainerClick(e: Event): void; /** Called when the logo is clicked */ onLogoClick(e: Event): void; /** Open the interaction panel */ open(): void; /** Close the interaction panel */ close(): void; /** Reset conversation UI */ resetConversation(): void; /** True if busy processing */ private _isProcessing; /** The element which will receive the current message from the AI */ currentOutputElement?: HTMLElement; /** Process input text from the user */ processInput(inputText: string): Promise; /** Called when the AI responds with some text */ onAIMessage(messages: TokenWindowGroupItemParams[], isPartial: boolean): Promise; /** Updates a text element */ updateTextElement(elementID: string, text: string): void; /** Updates an info block element */ updateInfoElement(elementID: string, text: string, iconType: string): void; /** Called when a suggestion button is clicked */ onSuggestionClick(e: Event, suggestion: string): void; /** Called when an LLM model is selected */ onLLMModelSelect(e: CustomEvent): void; /** Called when the content area is scrolled */ onContentScroll(e: Event): void; /** Displays whitelabeled branding for known apps */ private updateBrandingFor; } /** * This is used when someone does `import "web-weaver-embedded/component"`. This gives them a Web Component * that they can use in their HTML. */ export { WebWeaverEmbed as default };