import * as onnxruntime_web from 'onnxruntime-web'; import { InferenceSession, Tensor } from 'onnxruntime-web'; import { Optional } from 'utility-types'; import { Client } from '@modelcontextprotocol/sdk/client/index.js'; import OpenAI from 'openai'; import Anthropic from '@anthropic-ai/sdk'; /** * This is a utility class for dealing with the ONNX runtime and model loading. */ declare class ONNXModel { /** ONNX runtime loader. Example: `ONNXModel.lib = () => import('onnxruntime-web')` */ static lib?: () => Promise; /** Loaded ONNX runtime */ static onnx?: typeof onnxruntime_web; /** True if this device supports the ONNX runtime */ static isSupported(): boolean; /** The loaded model */ session: InferenceSession; /** Tensors used for state, ie they are passed from the output to the next input */ stateTensors: { [key: string]: { tensor: Tensor; outputName: string; }; }; /** Constant tensors that will always be passed as-is to the input */ constantTensors: ONNXTensors; /** Load a model */ static load(modelURL: string): Promise; /** Constructor */ constructor(session: InferenceSession); /** Create a new tensor zeroed from the specified definition */ makeTensor(type: BufferType, dims: number[], fillWith?: number): onnxruntime_web.TypedTensor<"float32"> | onnxruntime_web.TypedTensor<"int8"> | onnxruntime_web.TypedTensor<"int16"> | onnxruntime_web.TypedTensor<"int32"> | onnxruntime_web.TypedTensor<"int64"> | onnxruntime_web.TypedTensor<"uint8"> | onnxruntime_web.TypedTensor<"uint16"> | onnxruntime_web.TypedTensor<"uint32"> | onnxruntime_web.TypedTensor<"uint64">; /** Register a static tensor, ie one that will be passed to the input on every call */ registerConstant(name: string, tensor: Tensor): Tensor; /** Create and register a static tensor, ie one that will be passed to the input on every call */ makeConstant(name: string, type: BufferType, dims: number[], fillWith?: number): Tensor; /** Register a state tensor, ie one that will be passed from the output to the input */ registerState(inputName: string, outputName: string, tensor: Tensor): Tensor; /** Create and register a state tensor, ie one that will be passed from the output to the input */ makeState(inputName: string, outputName: string, type: BufferType, dims: number[], fillWith?: number): Tensor; /** True if a previous run is still active */ private _runActive; /** If true, will ignore a request to run if a previous run is still active. Otherwise, will throw an error */ ignoreIfBusy: boolean; /** Run the model */ run(inputs?: ONNXTensors): Promise; /** Reset state tensors to zero */ resetState(): void; } /** A collection of named tensors */ type ONNXTensors = { [key: string]: Tensor; }; /** Buffer types */ type BufferType = 'float32' | 'int8' | 'int16' | 'int32' | 'int64' | 'uint8' | 'uint16' | 'uint32' | 'uint64'; /** Convert an array of Float32Array audio buffers to a WAV file */ declare function audioToWav(sampleRate: number, buffers: Float32Array[]): File; /** WebSocket that lets you send packets before connection is opened */ declare class BufferedWebSocket extends WebSocket { /** Pending data to send */ pendingData: any[]; /** Constructor */ constructor(url: string); /** Send data */ send(data: any): void; /** Called when the connection opens */ private _onOpen; } /** Supported classes */ type SupportedArrayBuffers = Int8Array | Uint8Array | Int16Array | Uint16Array | Int32Array | Uint32Array | Float32Array | Float64Array | BigInt64Array; /** * This class accepts an input of a variable amount of data elements, and outputs a fixed amount of data elements. */ declare class FixedBufferStream { /** The class to use when creating buffers, ie Int16Array, Float32Array, etc */ ArrayClass?: Function; /** Number of elements in the output buffers */ outputBufferSize: number; /** Partial buffers */ partialBuffers: ArrayType[]; /** The amount of bytes already consumed from the first buffer in the `buffers` array */ partialBufferOffset: number; /** Get number of queued elements */ get queuedSize(): number; /** * Constructor * * @param outputBufferSize - Number of elements in the output buffers */ constructor(ArrayClass: Function, outputBufferSize: number); /** Feed data in */ feed(data: ArrayType): void; /** True if there's enough data to return a buffer from drain() */ get canDrain(): boolean; /** Pull the next chunk of fixed data, or else returns null if no more data */ drain(): ArrayType | null; /** Pad the buffer with zeroes to fill the remaining chunk */ pad(): void; } /** * @author Created by felix on 18-7-2. * @email 307253927@qq.com * @source https://github.com/felix307253927/resampler/blob/master/Resampler.js */ class Resampler { constructor(fromSampleRate, toSampleRate, channels, inputBufferSize) { if (!fromSampleRate || !toSampleRate || !channels) { throw(new Error("Invalid settings specified for the resampler.")); } this.resampler = null; this.fromSampleRate = fromSampleRate; this.toSampleRate = toSampleRate; this.channels = channels || 0; this.inputBufferSize = inputBufferSize; this.initialize(); } initialize() { if (this.fromSampleRate == this.toSampleRate) { // Setup resampler bypass - Resampler just returns what was passed through this.resampler = (buffer) => { return buffer }; this.ratioWeight = 1; } else { if (this.fromSampleRate < this.toSampleRate) { // Use generic linear interpolation if upsampling, // as linear interpolation produces a gradient that we want // and works fine with two input sample points per output in this case. this.linearInterpolation(); this.lastWeight = 1; } else { // Custom resampler I wrote that doesn't skip samples // like standard linear interpolation in high downsampling. // This is more accurate than linear interpolation on downsampling. this.multiTap(); this.tailExists = false; this.lastWeight = 0; } // Initialize the internal buffer: this.initializeBuffers(); this.ratioWeight = this.fromSampleRate / this.toSampleRate; } } bufferSlice(sliceAmount) { //Typed array and normal array buffer section referencing: try { return this.outputBuffer.subarray(0, sliceAmount); } catch (error) { try { //Regular array pass: this.outputBuffer.length = sliceAmount; return this.outputBuffer; } catch (error) { //Nightly Firefox 4 used to have the subarray function named as slice: return this.outputBuffer.slice(0, sliceAmount); } } } initializeBuffers() { this.outputBufferSize = (Math.ceil(this.inputBufferSize * this.toSampleRate / this.fromSampleRate / this.channels * 1.000000476837158203125) + this.channels) + this.channels; try { this.outputBuffer = new Float32Array(this.outputBufferSize); this.lastOutput = new Float32Array(this.channels); } catch (error) { this.outputBuffer = []; this.lastOutput = []; } } linearInterpolation() { this.resampler = (buffer) => { let bufferLength = buffer.length, channels = this.channels, outLength, ratioWeight, weight, firstWeight, secondWeight, sourceOffset, outputOffset, outputBuffer, channel; if ((bufferLength % channels) !== 0) { throw(new Error("Buffer was of incorrect sample length.")); } if (bufferLength <= 0) { return []; } outLength = this.outputBufferSize; ratioWeight = this.ratioWeight; weight = this.lastWeight; firstWeight = 0; secondWeight = 0; sourceOffset = 0; outputOffset = 0; outputBuffer = this.outputBuffer; for (; weight < 1; weight += ratioWeight) { secondWeight = weight % 1; firstWeight = 1 - secondWeight; this.lastWeight = weight % 1; for (channel = 0; channel < this.channels; ++channel) { outputBuffer[outputOffset++] = (this.lastOutput[channel] * firstWeight) + (buffer[channel] * secondWeight); } } weight -= 1; for (bufferLength -= channels, sourceOffset = Math.floor(weight) * channels; outputOffset < outLength && sourceOffset < bufferLength;) { secondWeight = weight % 1; firstWeight = 1 - secondWeight; for (channel = 0; channel < this.channels; ++channel) { outputBuffer[outputOffset++] = (buffer[sourceOffset + ((channel > 0) ? (channel) : 0)] * firstWeight) + (buffer[sourceOffset+(channels + channel)] * secondWeight); } weight += ratioWeight; sourceOffset = Math.floor(weight) * channels; } for (channel = 0; channel < channels; ++channel) { this.lastOutput[channel] = buffer[sourceOffset++]; } return this.bufferSlice(outputOffset); }; } multiTap() { this.resampler = (buffer) => { let bufferLength = buffer.length, outLength, output_variable_list, channels = this.channels, ratioWeight, weight, channel, actualPosition, amountToNext, alreadyProcessedTail, outputBuffer, outputOffset, currentPosition; if ((bufferLength % channels) !== 0) { throw(new Error("Buffer was of incorrect sample length.")); } if (bufferLength <= 0) { return []; } outLength = this.outputBufferSize; output_variable_list = []; ratioWeight = this.ratioWeight; weight = 0; actualPosition = 0; amountToNext = 0; alreadyProcessedTail = !this.tailExists; this.tailExists = false; outputBuffer = this.outputBuffer; outputOffset = 0; currentPosition = 0; for (channel = 0; channel < channels; ++channel) { output_variable_list[channel] = 0; } do { if (alreadyProcessedTail) { weight = ratioWeight; for (channel = 0; channel < channels; ++channel) { output_variable_list[channel] = 0; } } else { weight = this.lastWeight; for (channel = 0; channel < channels; ++channel) { output_variable_list[channel] = this.lastOutput[channel]; } alreadyProcessedTail = true; } while (weight > 0 && actualPosition < bufferLength) { amountToNext = 1 + actualPosition - currentPosition; if (weight >= amountToNext) { for (channel = 0; channel < channels; ++channel) { output_variable_list[channel] += buffer[actualPosition++] * amountToNext; } currentPosition = actualPosition; weight -= amountToNext; } else { for (channel = 0; channel < channels; ++channel) { output_variable_list[channel] += buffer[actualPosition + ((channel > 0) ? channel : 0)] * weight; } currentPosition += weight; weight = 0; break; } } if (weight === 0) { for (channel = 0; channel < channels; ++channel) { outputBuffer[outputOffset++] = output_variable_list[channel] / ratioWeight; } } else { this.lastWeight = weight; for (channel = 0; channel < channels; ++channel) { this.lastOutput[channel] = output_variable_list[channel]; } this.tailExists = true; break; } } while (actualPosition < bufferLength && outputOffset < outLength); return this.bufferSlice(outputOffset); }; } resample(buffer) { if (this.fromSampleRate == this.toSampleRate) { this.ratioWeight = 1; } else { if (this.fromSampleRate < this.toSampleRate) { this.lastWeight = 1; } else { this.tailExists = false; this.lastWeight = 0; } this.initializeBuffers(); this.ratioWeight = this.fromSampleRate / this.toSampleRate; } return this.resampler(buffer) } } /** * 将无符号Float32Array数组转化成有符号的Int16Array数组 * @param {Float32Array} input unsinged Float32Array * @return {Int16Array} singed int16 */ function floatTo16BitPCM(input) { let i = input.length; let output = new Int16Array(i); while (i--) { let s = Math.max(-1, Math.min(1, input[i])); output[i] = (s < 0 ? s * 0x8000 : s * 0x7FFF); } return output; } function floatTo64BitPCM(input) { let i = input.length; let output = new BigInt64Array(i); while (i--) { let s = Math.max(-1, Math.min(1, input[i])); output[i] = BigInt(Math.floor((s < 0 ? s * 0x8000 : s * 0x7FFF))) * 0x100000000000n;//(s < 0 ? s : s * 0x7FFFFFFFFFFFFFFFn); } return output; } /** * 将有符号的Int16Array数组转化成无符号Float32Array数组 * @param {Int16Array} input singed int16 * @return {Float32Array} // unsinged float32 */ function int16ToFloat32BitPCM(input) { let i = input.length; let output = new Float32Array(i); while (i--) { let int = input[i]; output[i] = (int >= 0x8000) ? -(0x10000 - int) / 0x8000 : int / 0x7FFF; } return output; } /** * This class helps organize groups of tokenized text along with removing items when the window is full. */ declare class TokenWindow { /** Token window size */ size: number; /** Token groups */ groups: TokenWindowGroup[]; /** Create a new group */ createGroup(id: string): TokenWindowGroup; /** Get a group */ group(id: string): TokenWindowGroup | undefined; /** Counts tokens in the specified text */ static countTokensInText(text: string): number; /** Calculate current tokens in all groups */ countTokens(): number; /** Remove overflow from all groups. */ removeOverflow(): void; /** Remove one overflow item. Returns null if no items were able to be removed. */ private removeOneItem; } /** A token group. */ declare class TokenWindowGroup { /** Group ID */ id: string; /** List of items */ items: TokenWindowGroupItem[]; /** * Weight controls how many items from this group should be kept in relation to the entire window. For example if all * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1, * that group will be allowed to keep double the amount of items. */ weight: number; /** Current total token count, computed automatically. Don't update this value manually. */ tokenCount: number; /** Group item separator. This text is added in between each item in the token window. */ separator: string; /** Token count padding added to each item. */ private itemPadding; /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */ setItemPadding(padding: number): this; /** Sort function */ private sortFunction; /** Set sort function */ sortBy(sortFunction: (a: TokenWindowGroupItem, b: TokenWindowGroupItem) => number): this; /** Set separator. This text is added in between each item in the token window. */ setSeparator(separator: string): this; /** * Set weight. Weight controls how many items from this group should be kept * in relation to the entire window. For example if all groups have a weight * of 1, each group remove items equally if full. If one has a weight of 2 * while the rest are 1, that group will be allowed to keep double the * amount of items. */ setWeight(weight: number): this; /** Recalculate all tokens. Note this may take a while. */ recalculateTokens(): void; /** Add an item to the group */ add(item: string | TokenWindowGroupItemParams): TokenWindowGroupItem; /** Manually remove an item */ remove(itemId: string): boolean; /** Get all items as a string */ getAllAsString(): string; /** Get all items. Doesn't return disabled items. */ getAll(): TokenWindowGroupItem[]; /** Remove all items from this group */ empty(): void; } /** Token group item section types */ declare enum TokenWindowGroupItemSectionType { /** Text items represent plain text. */ Text = "text", /** Tool call items represent a tool call requested by the AI. */ ToolCall = "tool_call", /** Tool result items represent the result of a tool call. */ ToolResult = "tool_result", /** Thinking section */ Thinking = "thinking", /** Other item types */ Other = "other" } /** Token group item */ interface TokenWindowGroupItem { /** Each item must have a unique ID. */ id: string; /** True if this item should never be removed */ cannotRemove?: boolean; /** Sorting order. If not specified, uses dateAdded instead. */ sortOrder: number; /** Date this item was added */ dateAdded: number; /** Token count in the content */ tokenCount: number; /** This is the actual item that gets sent to the APIs. It will be in whatever format is required for the associated API. */ data?: DataType; /** If disabled, this item will not be included and will not add to the token count. */ disabled?: boolean; /** Message source, ie was this message created by the user, or by the AI? */ source: 'user' | 'assistant'; /** * The string content of the item, or a summary of it. This is an autogenerated field, updated when the item is added/updated in the token window group. * If `data` is a string, this will be the same as `data`. If `data` is more complex, this will be a text representation of all items in the `sections` array. * * Note: When the response contains text and tool calls, this will add in a summary of what's happening. For better displaying, use the `sections` array. */ text?: string; /** Message sections */ sections?: TokenWindowGroupItemSection[]; /** If this message was generated by the AI, this contains the token usage for this message. */ usage?: { /** Number of tokens consumed from the data passed to the AI */ inputTokens: number; /** Number of input tokens that were used in token caching */ cachedInputTokens: number; /** Number of tokens consumed by the AI generating output */ outputTokens: number; /** Total token usage */ totalTokens: number; }; /** True if this item is still being streamed */ streamingInProgress?: boolean; } /** A section of a message returned by the AI */ interface TokenWindowGroupItemSection { /** Section type */ type: TokenWindowGroupItemSectionType; /** Text content when this section represents text or thinking */ text?: string; /** The raw tool name the AI requested to be called. */ toolName?: string; /** The ID of the KB action this tool call maps to, if any */ toolKbID?: string; /** The name of the KB action this tool call maps to, if any */ toolKbName?: string; /** The parameters the AI requested to be sent to the tool. Only available if type == 'tool_call' */ toolParameters?: any; /** Successful response of the tool call. Will be null if toolErrorResponse is set. */ toolSuccessResponse?: any; /** Error response of the tool call. Will be null if toolSuccessResponse is set. */ toolErrorResponse?: string; /** Tool call ID. This can be used to match a tool call request with it's result. */ toolCallInstanceID?: string; /** True if this tool call should be hidden in the UI */ toolCallHiddenInUI?: 'always' | 'after-complete'; } /** Token window group item input, without the autogenerated fields */ type TokenWindowGroupItemParams = Omit, 'id' | 'dateAdded' | 'sortOrder' | 'text' | 'source' | 'sections'>, 'tokenCount'>; /** * Speech output * * - event `speechfilter` - Allows modification or cancellation of speech * - event `speechstart` - When the speech starts * - event `speechend` - When the speech ends */ declare class WebWeaverSpeechOutput extends EventTarget { /** Reference to the AI */ private ai?; /** Automatically speak output from the AI */ autoSpeak: boolean; /** If enabled, connections will be pre-emptively opened to speed up text-to-speech response times, if possible */ preemptiveConnection: boolean; /** Constructor */ constructor(ai: IntelliWeave); /** Message IDs we've processed */ private processedMessages; /** Called when the AI responds */ onOutputFromAI(e: CustomEvent): void; /** Current player vars */ private currentPlayerVolume?; private currentPlayer?; /** The audio analyser node */ private analyserNode?; /** The audio analyser buffer */ private analyserBuffer?; /** @private Maximum volume heard this session */ private maxVolumeHeard; /** Get current (realtime) audio output volume level, from 0 to 1 */ get volumeLevel(): number; /** Queued messages to speak next */ private _queuedText; /** Speak the text */ speak(text: string): Promise; private _queueActive; _runQueue(): Promise; /** ElevenLabs connection pre-cache */ private _elevenLabsPrecachedConnection?; private _getElevenLabsConnection; private _speakWithLock; /** True if currently playing audio */ get isSpeaking(): boolean; /** Interrupt the previously playing audio */ interrupt(): Promise; /** Called when the speech output ends */ onSpeechEnd(): void; } /** * An AudioWorklet module that records data from input and sends it to the host. * * - event `data` - Fired when data is available to be read. */ declare class PCMReceiverNode extends AudioWorkletNode { /** @type {'int16' | 'float32'} The output data format */ format: string; /** Register worklet with an audio context */ static registerModule(context: AudioContext): Promise; /** * Creates a new PCMRecorderNode ready to receive PCM data. * * @param context - The audio context to use. * @param sampleRate - The sample rate of the output data stream. * @param format - The format of the output data stream. * @param bufferSize - The size of the output buffer in elements (Int16Array or Float32Array items, depending on `format`). */ constructor(context: AudioContext, sampleRate: number, format: 'int16' | 'int64' | 'float32', bufferSize: number); /** @private Called when a message is received from the worklet */ onWorkletMessage(e: MessageEvent): void; /** Called when data is received */ onData(buffer: Float32Array | Int16Array | BigInt64Array): void; } /** * An AudioNode which sends events for when speech is detected * * - event `speechstart` - Fired when speech is detected * - event `speechend` - Fired when speech ends */ declare class VoiceDetectionNode extends PCMReceiverNode { /** True if voice is currently being detected */ isVoiceActive: boolean; /** True if voice is active but may be ending soon */ get isVoicePossiblyEnding(): boolean; /** Last date that voice was detected */ lastVoiceActiveDate: number; /** Amount of time to wait after voice detection to detect that it has ended */ voiceEndTimeout: number; /** Detection sensitivity, if the detection model outputs a number bigger than this it will be considered voice */ sensitivity: number; /** Sensitivity threshold to end speaking */ sentivityEnd: number; /** VAD model */ static vadModelURL: string; /** Loaded VAD model */ private vad?; /** Sample rate */ get sampleRate(): 16000 | 8000; /** Number of samples */ get numberOfSamples(): number; /** Number of sample chunks */ get numberOfSampleChunks(): number; /** Output buffer size */ get outputBufferSize(): number; /** True if the VAD model has been loaded */ get isModelLoaded(): boolean; /** The time when to next reset the VAD model */ nextVadReset: number; /** The current probability of active voice */ currentProbability: number; /** Constructor */ constructor(audioContext: AudioContext); /** Start loading */ loadModel(): Promise; private _lastVoiceActive; /** Called when data is received */ onData(buffer: Float32Array): Promise; /** Called when speech is detected */ onSpeechStart(): void; /** Called when speech ends */ onSpeechEnd(): void; } /** * An AudioNode which isolates speech and outputs the audio data. Since we are reusing the VAD model node, * output data is in 8000Hz Float32 format. * * - event `voicedata` - Fired when a chunk of voice is detected. `data` contains the recorded chunk of voice in a Float32Array. * - event `voicedataend` - Fired when this chunk of voice ends. `data` contains an array of Float32Array containing the entirety of the recorded voice. */ declare class VoiceChunkOutputNode extends VoiceDetectionNode { /** Stored buffers */ buffers: Float32Array[]; /** Recorded audio chunks with voice in it */ recordedBuffers: Float32Array[]; /** Last active state */ _voiceRecording: boolean; /** Amount of audio data in the buffer, in seconds */ get bufferDuration(): number; /** Amount of data to keep from before the user started speaking */ backBufferDurationSeconds: number; /** Called when data is received */ onData(buffer: Float32Array): Promise; /** Called when a chunk of voice is recorded */ onVoiceChunk(buffer: Float32Array): void; /** Called when the voice recording ends */ onVoiceEnd(buffers: Float32Array[]): void; } /** * This AudioNode uses OpenAI's Whisper model to transcribe spoken speech to text. * * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text. */ declare class OpenAITranscriptionNode extends VoiceChunkOutputNode { /** OpenAI API key */ apiKey: string; /** Pending buffers */ private pendingBuffers; /** Last request */ private lastRequestAbortController?; /** True if currently transcribing */ isTranscribing: boolean; /** Constructor */ constructor(audioContext: AudioContext, apiKey: string); /** Called when the voice recording ends */ onVoiceEnd(buffers: Float32Array[]): Promise; /** Called when a transcription is ready */ onVoiceTranscription(text: string): void; } /** * This AudioNode uses IntelliWeave's servers to transcribe spoken speech to text. * * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text. */ declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode { /** Debug: Export each recording as a wav file for download */ static debugExportWav: boolean; /** Server address for transcription */ apiAddress: string; /** IntelliWeave API key */ apiKey: string; /** WebSocket connection */ private ws?; /** True if currently transcribing */ isTranscribing: boolean; /** WebSocket shutdown timer */ private shutdownTimer?; /** Constructor */ constructor(audioContext: AudioContext, apiKey: string); /** Called when a voice chunk is received */ onVoiceChunk(buffer: Float32Array): Promise; /** Called when the voice recording ends */ onVoiceEnd(buffers: Float32Array[]): Promise; /** Called when a transcription is ready */ onVoiceTranscription(text: string): void; /** Called when the WebSocket is closed */ onSocketClose(): void; } /** * This AudioNode uses ElevenLabs to transcribe spoken speech to text. * * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text. */ declare class ElevenLabsTranscriptionNode extends VoiceChunkOutputNode { /** ElevenLabs API key */ apiKey: string; /** ElevenLabs stream connection */ private connection?; /** True if currently transcribing */ isTranscribing: boolean; /** WebSocket shutdown timer */ private shutdownTimer?; /** Constructor */ constructor(audioContext: AudioContext, apiKey: string); /** Called when a voice chunk is received */ onVoiceChunk(buffer: Float32Array): Promise; /** Start reading the stream */ private startReading; /** Called when the voice recording ends */ onVoiceEnd(buffers: Float32Array[]): Promise; /** Called when a transcription is ready */ onVoiceTranscription(text: string): void; } /** * Handles speech recognition from the microphone * * - event `speechstart` - We have detected the user started speaking * - event `speechend` - We have detected the user stopped speaking * - event `speech` - Speech recognition result * - event `start` - Speech recognition started * - event `end` - Speech recognition ended */ declare class WebWeaverSpeechRecognition extends EventTarget { /** Reference to the AI */ ai?: IntelliWeave; /** True if recognition is running */ isRunning: boolean; /** The audio analyser node */ private analyserNode?; /** The audio analyser buffer */ private analyserBuffer?; /** The microphone stream */ micStream?: MediaStream; /** Recording start time for tracking duration */ private recordingStartTime?; /** Returns true if speech recognition is supported by this persona and browser */ get isSupported(): boolean; /** Currently active voice detection node */ voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode | ElevenLabsTranscriptionNode; /** Constructor */ constructor(ai: IntelliWeave); private _skipEvents; /** Start recognition */ start(): Promise; /** Stop recognition */ stop(): void; /** @private Maximum volume heard this session */ maxVolumeHeard: number; /** Get current (realtime) microphone volume level, from 0 to 1 */ get volumeLevel(): number; /** True if currently detecting words being spoken */ get wordsCurrentlyBeingSpoken(): boolean; /** True if currently transcribing voice to text */ get isTranscribing(): boolean; /** Called when speech has been recorded */ onTranscription(e: CustomEvent): void; /** Called to reset the speech recognizer */ reset(): Promise; } /** Handles creating and managing the AudioContext */ declare class AudioSystem { /** Reference to the AI */ private ai?; /** The speech recognition module. */ speechRecognition: WebWeaverSpeechRecognition; /** The speech output module. */ speechOutput: WebWeaverSpeechOutput; /** The audio context */ context?: AudioContext; /** List of active named locks */ locks: string[]; /** Returns true if speech recognition and output is supported by this persona and browser */ static get isSupported(): boolean; /** Constructor */ constructor(ai: IntelliWeave); /** Register worklets */ static registerModules(context: AudioContext): Promise; /** Create a named lock to enable the audio system */ beginAccess(namedLock: string): Promise; /** Stop accessing the audio system */ endAccess(namedLock: string): void; } /** * This class allows you to use the AI as a logic engine, data extractor, etc. */ declare class AILogic { /** Reference to the AI */ private ai?; /** Constructor */ constructor(ai: IntelliWeave); /** Ask the AI a yes/no question associated with the specified data. Data must be JSON-serializable, or a string of any kind of data. */ boolean(config: IntelliWeaveInstructConfig): Promise; /** Ask the AI to select a choice from a list of options. */ choose(config: IntelliWeaveInstructConfig & { /** List of choices the AI can pick from. */ options: string[]; }): Promise; /** * Ask the AI to extract data from input data. The AI will return the extracted data. Possibly an array of multiple extractions. */ extract(config: IntelliWeaveInstructConfig & { /** Allow multiple items to be returned. If true, returns an array instead of an object. */ allowMultiple?: boolean; /** Fields to extract in each object. */ extractions: { /** Field name */ name: string; /** Field data type */ type: 'string' | 'number' | 'boolean' | 'date' | 'email' | 'phone' | 'address'; /** Describe to the AI what data to put in this field. */ description?: string; }[]; }): Promise; /** * Generate a Markdown document based on the data from the user. * * @param config Instruct config. * @returns A markdown document. */ generateMarkdown(config: Omit): Promise; /** * Perform an instruction. * * @param config Instruct config. * @returns The final response from the AI. */ instruct(config: IntelliWeaveInstructConfig): Promise; } /** Config for any instruct call */ interface IntelliWeaveInstructConfig { /** Instruction */ instruction: string; /** Input data or query to process */ data: any; /** Whether to allow the AI to use the knowledge base or not. If false, the AI will not use the knowledge base. */ allowKB?: boolean; /** Callback that will be called when streaming the response. Each call will contain the full text that has been generated so far. */ callback?: (txt: string) => void; } /** * Allows an MCP server to be used as a knowledge source for IntelliWeave. */ declare class MCPKnowledgeClient { /** MCP client */ client?: Client; /** All tools discovered on the MCP server. Only available after connect() has completed. */ tools: Awaited>['tools']; /** All toold discovered, mapped to IntelliWeave knowledge base actions */ iwActions: KnowledgeBaseItem[]; /** Statistics */ stats: { toolsCalled: number; }; /** Configuration */ config: { /** Source ID */ id?: string; /** URL to the MCP server endpoint */ baseURL?: string; /** Custom connection function. If specified, baseURL is optional. */ connect?: () => Promise; /** * The name of the tool which provides knowledge searching. If specified, the search() will exclude this function and instead * call it and show returned results. If not specified, the search() will just return all tools. */ searchToolName?: string; /** Keep search function available for the AI to use. */ searchToolVisible?: boolean; /** Use the IntelliWeave proxy */ proxy?: { /** If true, will send requests via the IntelliWeave MCP proxy */ enabled?: boolean; /** The URL of the proxy server, defaults to the standard IntelliWeave proxy */ url?: string; /** IntelliWeave API key */ apiKey?: string; }; /** Pass extra headers to the MCP server */ headers?: Record; }; /** Constructor */ constructor(config: MCPKnowledgeClient['config']); /** In-progress connection attempt */ private connectionPromise?; /** Connect to the client */ connect(): Promise>; connectInternal(): Promise>; /** Disconnect from server */ disconnect(): Promise; /** Fetch list of tools from the MCP server */ private fetchTools; /** Cache last search result */ lastSearchQuery: string; lastSearchResults: KnowledgeBaseItem[]; /** Perform a search query */ search(query: string): Promise; /** Perform search using the configured search function */ private performSearchCall; /** Perform tool call. */ private performToolCall; } // ================================================================================================== // JSON Schema Draft 07 // ================================================================================================== // https://tools.ietf.org/html/draft-handrews-json-schema-validation-01 // -------------------------------------------------------------------------------------------------- /** * Primitive type * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1 */ type JSONSchema7TypeName = | "string" // | "number" | "integer" | "boolean" | "object" | "array" | "null"; /** * Primitive type * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1 */ type JSONSchema7Type = | string // | number | boolean | JSONSchema7Object | JSONSchema7Array | null; // Workaround for infinite type recursion interface JSONSchema7Object { [key: string]: JSONSchema7Type; } // Workaround for infinite type recursion // https://github.com/Microsoft/TypeScript/issues/3496#issuecomment-128553540 interface JSONSchema7Array extends Array {} /** * Meta schema * * Recommended values: * - 'http://json-schema.org/schema#' * - 'http://json-schema.org/hyper-schema#' * - 'http://json-schema.org/draft-07/schema#' * - 'http://json-schema.org/draft-07/hyper-schema#' * * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-5 */ type JSONSchema7Version = string; /** * JSON Schema v7 * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01 */ type JSONSchema7Definition = JSONSchema7 | boolean; interface JSONSchema7 { $id?: string | undefined; $ref?: string | undefined; $schema?: JSONSchema7Version | undefined; $comment?: string | undefined; /** * @see https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-00#section-8.2.4 * @see https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#appendix-A */ $defs?: { [key: string]: JSONSchema7Definition; } | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1 */ type?: JSONSchema7TypeName | JSONSchema7TypeName[] | undefined; enum?: JSONSchema7Type[] | undefined; const?: JSONSchema7Type | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.2 */ multipleOf?: number | undefined; maximum?: number | undefined; exclusiveMaximum?: number | undefined; minimum?: number | undefined; exclusiveMinimum?: number | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.3 */ maxLength?: number | undefined; minLength?: number | undefined; pattern?: string | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.4 */ items?: JSONSchema7Definition | JSONSchema7Definition[] | undefined; additionalItems?: JSONSchema7Definition | undefined; maxItems?: number | undefined; minItems?: number | undefined; uniqueItems?: boolean | undefined; contains?: JSONSchema7Definition | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.5 */ maxProperties?: number | undefined; minProperties?: number | undefined; required?: string[] | undefined; properties?: { [key: string]: JSONSchema7Definition; } | undefined; patternProperties?: { [key: string]: JSONSchema7Definition; } | undefined; additionalProperties?: JSONSchema7Definition | undefined; dependencies?: { [key: string]: JSONSchema7Definition | string[]; } | undefined; propertyNames?: JSONSchema7Definition | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.6 */ if?: JSONSchema7Definition | undefined; then?: JSONSchema7Definition | undefined; else?: JSONSchema7Definition | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.7 */ allOf?: JSONSchema7Definition[] | undefined; anyOf?: JSONSchema7Definition[] | undefined; oneOf?: JSONSchema7Definition[] | undefined; not?: JSONSchema7Definition | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-7 */ format?: string | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-8 */ contentMediaType?: string | undefined; contentEncoding?: string | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-9 */ definitions?: { [key: string]: JSONSchema7Definition; } | undefined; /** * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-10 */ title?: string | undefined; description?: string | undefined; default?: JSONSchema7Type | undefined; readOnly?: boolean | undefined; writeOnly?: boolean | undefined; examples?: JSONSchema7Type | undefined; } /** Chat config options */ interface ChatBaseConfig { /** API key */ apiKey: string; /** Provider ID */ providerID?: string; /** Endpoint URL if using a custom URL */ endpoint: string; /** LLM model to use */ model: string; /** System message to describe to the AI how to behave. */ systemMessage: string; /** User ID used to uniquely identify users in ChatGPT's API */ userID: string; /** If true, streams the text responses from the API */ stream: boolean; /** Amount of estimated tokens to keep when trimming */ maxTokens: number; /** Callback before the AI sends info to the LLM */ onBeforeMessageProcessing?: () => void; /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */ onAIMessage?: (output: TokenWindowGroupItemParams[], isPartial: boolean) => void; /** Callback when the AI starts performing an action */ onAIToolStart?: (toolName: string, input: any) => void; } /** Chat tool config */ interface ChatBaseToolConfig { /** Name of the tool, eg "perform_search" */ name: string; /** Description of the tool */ description: string; /** Parameters for the tool */ params: JSONSchema7; /** Callback function to process the tool */ callback: (params: any) => any; /** If true, this item can be removed if there's not enough context available. */ canRemove?: boolean; /** Knowledge base item this tool use represents */ kbItem?: KnowledgeBaseItem; } /** * API for interacting with chat APIs. */ declare class ChatBase< /** Format for messages in the token window */ DataType = any, /** Optional extended config */ ConfigFormat extends ChatBaseConfig = ChatBaseConfig> { /** ID */ id: string; /** Metadata */ metadata: any; /** Config */ config: ConfigFormat; /** The maximum tool calls in sequence the AI can make before an error is thrown. */ maxToolCallsPerMessage: number; /** Statistics */ stats: { /** Total tokens used this session */ tokensUsed: number; }; /** Token window management */ tokenWindow: TokenWindow; /** Token window group used for the context message */ get contextGroup(): TokenWindowGroup; /** Token window group used for tools / actions */ get toolGroup(): TokenWindowGroup; /** Token window group used for messages */ get messageGroup(): TokenWindowGroup; /** Get the API base after stripping out exact endpoints, or undefined for the default */ getBaseURL(): string | undefined; /** Constructor */ constructor(config: ConfigFormat); /** Send a message, and get the response as a string. */ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams[]) => void): Promise[]>; /** Add a user message to the message history */ addUserMessage(message: string): void; /** Add an assistant message to the message history */ addAssistantMessage(message: string): void; /** Helper to add a plain text item */ protected addTextMessage(text: string, source: 'user' | 'assistant', data: DataType): void; /** Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */ onBeforeIncomingMessage(message: DataType): void; /** Reset the conversation */ resetConversation(): void; /** Trim message list */ trimMessages(): Promise; /** Register a tool. */ registerTool(tool: ChatBaseToolConfig): TokenWindowGroupItem; /** Find a tool based on the AI-safe name */ protected findToolBySafeName(toolSafeName: string): ChatBaseToolConfig | undefined; /** Execute the specified tool. Throws an error if the tool is undefined. */ protected executeTool(tool: ChatBaseToolConfig | undefined, input: any): Promise; } /** Parses the response from `IntelliWeave.sendMessage()` or a collection of message items. */ declare class IntelliWeaveMessageParser { /** New messages produced after sendMessage() was called */ messages: TokenWindowGroupItemParams[]; /** Constructor */ constructor(items: TokenWindowGroupItemParams[]); /** Plain text output from the AI */ text(): string; /** Total token usage */ tokenUsage(): { cachedInputTokens: number; inputTokens: number; outputTokens: number; totalTokens: number; }; /** Component sections for display */ sections(): TokenWindowGroupItemParams['sections']; /** List all tool calls that took place */ toolCalls(): TokenWindowGroupItemSection[]; /** Find the response for a tool call */ toolResult(toolCallInstanceID: string): TokenWindowGroupItemSection | null; } /** Handles subagents. This allows your Persona to use other Personas as tools. */ declare class SubAgents { /** Reference to the main IntelliWeave instance */ ai: IntelliWeave; /** Constructor */ constructor(ai: IntelliWeave); /** Subagents */ subagents: SubAgentConfig[]; /** Cached subagents */ cachedSubagents: Record; /** Register a sub-agent */ register(config: SubAgentConfig): void; /** Unregister subagent */ remove(id: string): void; /** Run the subagent */ runQuery(config: SubAgentConfig, query: string): Promise; } /** Sub-agent config */ interface SubAgentConfig { /** ID of the sub-agent */ id: string; /** API key for the persona. If not specified, uses the same api key as the main agent. */ apiKey?: string; /** Name of the sub-agent */ name?: string; /** Instructions for the main agent to use this sub agent */ usageInstructions?: string; /** If true, will remove all Persona knowledge entries */ clearExistingKnowledge?: boolean; /** Disable RAG search for subagents. If true, only KB entries with isContext=true will be used. */ disableRagSearch?: boolean; /** Extra knowledge base sources for the sub-agent */ knowledge?: KnowledgeFetcher; /** Optional extra configuration for the subagent instance */ config?: Partial; /** Called when the subagent is loaded */ onAgentLoaded?: (agent: IntelliWeave) => Promise | void; } /** Built-in action flags for the persona */ interface BuiltInActionFlags { /** Allows the AI to display follow-up suggestions */ allowSuggestionButtons?: boolean; /** Allows the AI to open a new tab pointing to any URL */ allowOpenNewTab?: boolean; /** Allows the AI to perform an arbitrary HTTP request */ allowHttpRequests?: boolean; /** Allows the AI to set the path of the current page */ allowChangeRoute?: boolean; /** Allows the AI to craft an email and use mailto: link */ allowSendEmail?: boolean; /** Makes the AI only use information from knowledge bases, not general knowledge */ onlyUseKnowledgeBase?: boolean; } /** Persona config received from the hub */ interface WebWeaverGPTConfig { /** ID */ id: string; /** Chat API config */ model: ChatBaseConfig; /** If true, message history will be sent to the IntelliWeave hub for analysis */ analytics?: boolean; /** Persona name */ name?: string; /** Instructions to the AI */ instructions?: string; /** Introduction message, used in the automatic UI */ introductionMessage?: string; /** URL to the logo image to display in the chat UI */ logo?: string; /** Background color or gradient or image for the chat UI */ background?: string; /** Text color for the chat UI */ textColor?: string; /** Display mode: 'closed' (default - starts minimized) or 'open' (always open) */ displayMode?: 'closed' | 'open'; /** Layout preset: 'widget' (default) or 'fullscreen' */ layout?: 'widget' | 'fullscreen'; /** Positioning mode: 'fixed' (default - floats on page) or 'container' (fills parent container) */ positioningMode?: 'fixed' | 'container'; /** Horizontal position: 'left' or 'right' (default: 'right') - only used when positioningMode is 'fixed' */ positionX?: 'left' | 'right'; /** Vertical position: 'top' or 'bottom' (default: 'bottom') - only used when positioningMode is 'fixed' */ positionY?: 'top' | 'bottom'; /** Horizontal offset from edge in pixels (default: 20) - only used when positioningMode is 'fixed' */ offsetX?: number; /** Vertical offset from edge in pixels (default: 20) - only used when positioningMode is 'fixed' */ offsetY?: number; /** Identifier of an external app or service which manages this persona, if any. (eg. "chatterly") */ managedBy?: string; /** Voice information */ voice?: { /** Provider ID */ providerID: string; /** API key for the provider */ apiKey: string; /** Voice ID within the provider */ voiceID: string; }; /** Transcription API information */ transcription?: { /** Provider ID */ providerID: string; /** API key for the provider */ apiKey: string; /** Optional URL for the transcription service */ url?: string; }; /** Knowledge base sources */ knowledge?: KnowledgeBaseSource[]; /** MCP servers */ mcpServers?: MCPKnowledgeClient['config'][]; /** Built-in action flags that are currently enabled */ flags?: BuiltInActionFlags; /** Allow custom chat provider */ onCreateProvider?: (config: ChatBaseConfig) => ChatBase; /** Subagents */ subagents?: SubAgentConfig[]; } /** Configuration for the IntelliWeave initialization */ interface IntelliWeaveConfig extends Partial { /** Optionally allows you to specify a custom hub API URL. */ hubAPI?: string; /** A unique ID to identify this user. Defaults to a generated userID if not specified. */ userID?: string; } /** * IntelliWeave interface, loads a Persona from the hub and allows you to interact with it. This is the main entry point into the IntelliWeave * SDK when not using the built-in UI. * * - event `load` - Fired when the AI is loaded with a new configuration. * - event `error` - Fired when an error occurs during loading. * - event `webweaver_loaded` - Fired when the AI is loaded with a new configuration. This is a global event that is fired on the window object. * - event `webweaver_error` - Fired when an error occurs during loading. This is a global event that is fired on the window object. * - event `input` - Fired when the user sends a message to the AI. * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isPartial` is true, the message is incomplete and will be followed by more events. * - event `toolstart` - Fired when the AI starts performing an action. * - event `tool` - Fired when the AI finishes performing an action. */ declare class IntelliWeave extends EventTarget { /** App version */ static version: string; /** Built-in actions version - increment this when adding new actions */ static builtInActionsVersion: string; /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */ onAIMessage?: (messages: TokenWindowGroupItemParams[], isPartial: boolean) => void; /** Callback when the AI starts performing an action */ onAIToolStart?: ChatBaseConfig['onAIToolStart']; /** Current conversation ID */ conversationID: string; /** Knowledge database interface */ knowledgeBase: KnowledgeBase; /** Subagent interface */ subAgents: SubAgents; /** Last KB search that was performed */ private _lastKBsearch; /** If set, the next time a request is made this is the KB result items that will be used, once-off. */ private _nextRequestUseKBitems?; /** Timestamp when the last message processing started (for response time tracking) */ private _messageStartTime?; /** Config loaded from the API */ config?: WebWeaverGPTConfig; /** Available LLMs */ models: { id: string; config: ChatBaseConfig; priority?: number; }[]; /** Current LLM */ currentModel?: ChatBase; /** The audio system. Set this to a new instance of AudioSystem to enable audio support */ audio: AudioSystem | null; /** Silero VAD model blob */ vadModel?: Blob; /** True if the AI has loaded */ get loaded(): boolean; /** Built-in action flags from the config */ get flags(): BuiltInActionFlags; /** Get information about available built-in actions */ getBuiltInActionsInfo(): { version: string; availableActions: string[]; enabledFlags: BuiltInActionFlags; }; /** If loading fails, this stores the last error during load() */ error?: Error; /** IntelliWeave API key */ apiKey: string; /** Tracker for the current voice interaction */ _voiceTracker?: (text: string) => void; /** Logic engine */ logic: AILogic; /** A unique ID to identify this user. Defaults to a value stored in localStorage, or random. */ userID: string; /** URL of the IntelliWeave Hub API */ hubAPI: string; /** Set model and load data from an API key */ load(apiKey: string, config?: IntelliWeaveConfig): Promise; /** Set the current model */ setModel(id: string): void; private _lastSystemMsg; /** Get the system message prefix, before the KB entries are added */ getContextPrefix(): Promise; /** KB items added in the last run */ private lastKBItems; /** Get system message to send to the AI */ onBeforeMessageProcessing(): Promise; /** @private Process incoming message(s) from the AI. Can be used to respond to encoded actions in the text response. */ processIncomingMessage(messages: TokenWindowGroupItemParams[], isPartial?: boolean): void; /** True if currently processing a message */ isProcessing: boolean; /** Send a message, and get the response */ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams[]) => void): Promise; /** @private Called when the AI wants to run a KB action */ toolRunKBAction(kb: KnowledgeBaseItem, input: any): Promise; /** Submit an analytics event asynchronously. These events are for use in the Conversation Analytics code. For anonymous statistic analysis, use track() instead. */ private activeAnalyticsPromises; submitAnalyticsEvent(data: any): void; /** Wait for all analytics events to finish */ waitForAnalytics(): Promise; /** Reset the conversation */ resetConversation(): void; /** Insert a message as if the assistant has written it */ insertAssistantMessage(message: string): void; /** Export conversation state to a JSON object */ exportState(): { type: string; conversationID: string; messages: TokenWindowGroupItem[] | undefined; }; /** Import conversation state from JSON */ importState(state: any): void; /** Clone this instance without any message history */ clone(): IntelliWeave; /** Get all messages in the conversation history */ get messages(): TokenWindowGroupItem[]; } /** * Register knowledge base sources and perform searches. */ declare class KnowledgeBase { /** Reference to the AI */ ai?: IntelliWeave; /** Knowledge base sources */ _sources: KnowledgeBaseSource[]; /** List of sources returned from the last window event */ _windowSources: KnowledgeBaseSource[]; /** List of last search results */ lastResults: KnowledgeBaseItem[]; /** Individual knowledge base entries added manually by the application */ manualEntries: KnowledgeBaseItem[]; /** If true, allows using globally defined sources from the browser window events */ allowWindowSources: boolean; /** If true, allows using knowledge specified in the global configuration object */ allowGlobalConfigSources: boolean; /** If true, allows the AI to search the knowledge base. If false, essentially disables RAG lookup. */ allowRagSearch: boolean; /** Constructor */ constructor(ai: IntelliWeave); /** Ensures the internal knowledge is set correctly */ ensureInternalKnowledge(): void; /** Clears all knowledge back to the default */ reset(): void; /** * Register a new knowledge base source. You can pass either just a query function, or an ID and a query function. * * @param idOrQuery The ID of the source or a function that performs the query if no ID is provided * @param query The function that performs the query. Can be undefined if the first param is a function. */ registerSource(idOrQuery: string | KnowledgeBaseSource['query'], query?: KnowledgeBaseSource['query']): string; /** Remove a knowledge base source */ removeSource(idOrQuery: string | KnowledgeBaseSource['query']): void; /** Add a knowledge base item. */ addEntry(item: KnowledgeBaseItem): void; /** Remove a knowledge base item. */ removeEntry(id: string): void; /** Get all knowledge base sources */ get sources(): KnowledgeBaseSource[]; /** Search the knowledge base */ search(query: string): Promise; /** Get the KB entry with the specified ID. Requires the item to have been fetched in the last knowledge base query. */ getCachedEntry(id: string): KnowledgeBaseItem | undefined; /** Create and register an external knowledge base source from a URL */ registerSourceFromURL(url: string, id?: string): void; /** Clone this instance */ clone(newIW: IntelliWeave): KnowledgeBase; /** Registers an MCP server as a knowledge base source */ registerMCPSource(config: MCPKnowledgeClient['config']): MCPKnowledgeClient; } /** Knowledge fetcher */ type KnowledgeFetcher = (query: string) => (KnowledgeBaseItem[] | Promise); /** Knowledge base source */ interface KnowledgeBaseSource { /** Source ID */ id?: string; /** Source name */ name?: string; /** Optional description */ description?: string; /** Optional icon URL */ icon?: string; /** If true, this source will not be queried. */ disabled?: boolean; /** Source query function. This function should return a list of knowledge base entries that optionally match the query. */ query?: KnowledgeFetcher; /** URL query for remote sources */ url?: string; /** Pre-packaged knowledge base entries */ entries?: KnowledgeBaseItem[]; /** Remote knowledge server type (default is 'iw') */ backendType?: 'mcp' | 'iw'; /** If using MCP, this is the name of the tool to use to search for knowledge */ mcpSearchToolName?: string; } /** Knowledge base item */ interface KnowledgeBaseItem { /** Item ID */ id?: string; /** OpenAI-compatible internal ID. This is set automatically and should not be used. */ _functionID?: string; /** Item type. */ type: 'info' | 'action' | 'tour' | 'input-event' | 'output-event'; /** Item name */ name: string; /** Item tags. Helps with search optimization. */ tags?: string; /** Item content */ content: string; /** If true, this item will always be returned from all search results. */ isContext?: boolean; /** If true, this item will not be visible to the AI. */ disabled?: boolean | ((ai: IntelliWeave) => boolean); /** List of parameters for an action function. Can either use IW's format, or a JSON Schema object. */ parameters?: KnowledgeBaseActionParameterSchema; /** * Item action. The parameters are defined in `parameters`. The response is stringified and sent to the AI. * You can return any JSON-serializable object. You can also return a string describing to the AI the action * that was performed. If an error is thrown, the AI will respond appropriately to the user. */ action?: (input: any, ai: IntelliWeave) => (any | Promise); /** If specified, will hide this action from the default UI after the AI finishes running it, or always hide it */ hideActionInUI?: 'always' | 'after-complete'; /** Attachments such as images, etc */ attachments?: KnowledgeBaseItemAttachment[]; } /** Knowledge base item attachment, such as an image, file, etc. */ interface KnowledgeBaseItemAttachment { /** UUID */ uuid: string; /** Attachment mime type */ mimeType: string; /** File name */ name: string; /** Full URL to access the file. This is required for the AI to be able to see the attachment. */ url?: string; /** UNIX timestamp (milliseconds since epoch) when the file was added */ dateAdded?: number; /** Internal path to where the file is stored */ path?: string; /** File size */ size?: number; } /** Parameter definition used by IntelliWeave */ interface IntelliWeaveParameterDefinition { name: string; type: 'string' | 'boolean' | 'number'; description: string; } /** Tool call input schema. Can either use IW's format, or a JSON Schema object */ type KnowledgeBaseActionParameterSchema = JSONSchema7 | IntelliWeaveParameterDefinition[]; /** Format for incoming KB webook. Sent from IntelliWeave to your endpoint. */ interface KnowledgeBaseWebhookRequest { /** Type of the request. */ type: 'search' | 'action'; /** User ID of the calling user */ userID: string; /** When type=search, this is the search query */ query?: string; /** When type=action, this is the action ID */ actionID?: string; /** When type=action, this is the action parameters */ parameters?: any; } /** Format for the response to the webhook when type=search */ interface KnowledgeBaseWebhookSearchResponse { /** Name for your KB database */ name: string; /** Description of your KB database */ description?: string; /** Optional icon URL */ icon?: string; /** URL to display to open more information about your database */ infoURL?: string; /** Items */ items: KnowledgeBaseItem[]; /** * If true, the AI will rerun the search query on every request. * Only use this if you have context items that will change externally, as * this adds a delay to every request. */ noCache?: boolean; } /** Format for the response to the webhook when type=action */ interface KnowledgeBaseWebhookActionResponse { /** The response to the AI. Can be a text instruction, an error message, a JSON object, anything. */ response: any; /** An optional list of knowledge base items to update locally. */ updateItems?: KnowledgeBaseItem[]; } /** * Base class for custom Web Components, with some utility functions on it. * * Version 1.1 * Created by: jjv360 */ declare class BaseComponent extends HTMLElement { /** Attributes to monitor */ static observedAttributes: string[]; /** Component tag name */ static tagName: string; /** True if this component is already registered */ static _isRegistered: boolean; /** Contains the shadow DOM */ _shadow?: ShadowRoot; /** Get the root node for this element's shadow DOM */ get root(): ShadowRoot | undefined; /** Register the component */ static register(): void; /** Create a new element of this type */ static create(attrs?: any, content?: string): BaseComponent; /** Add the html for a component */ static add(attrs?: any, content?: string): string; /** Create an element of this kind. Same as document.createElement(), but also ensures the component is registered. */ static createElement(): HTMLElement; /** Called when the element is added to the DOM */ connectedCallback(): void; /** Called when the element is removed */ disconnectedCallback(): void; /** @abstract Get the HTML layout */ html(): string; /** @abstract Called before the HTML is created for the first time */ onBeforeCreate(): void; /** @abstract Called when the component is created */ onCreate(): void; /** @abstract Called when the UI should be updated with the current values */ onUpdate(): void; /** @abstract Called when the component is created */ onDestroy(): void; /** @abstract Called when an observed attribute changes */ attributeChangedCallback(name: string, oldValue: any, newValue: any): void; /** Internal attribute proxy */ private _attrProxy?; /** Helper for getting attributes */ get attr(): any; /** Internal for state */ private _stateProxy?; private _state; /** Helper for state variables. When anything inside here changes, onUpdate is called. */ get state(): any; /** Check if a child with the ID exists */ hasChild(id: string): boolean; /** Get a child element by it's ID */ child(id: string): BaseComponent; } /** Main embed web component */ declare class WebWeaverEmbed extends BaseComponent { /** Element tag name */ static tagName: string; /** Attributes we monitor for changes */ static observedAttributes: string[]; /** ChatGPT reference */ ai: IntelliWeave; /** Currently loaded configuration */ config: Partial; /** Responses the AI has suggested for the next user message */ suggestions: string[]; /** Previous open state for tracking UI open/close events */ private _previousOpenState; /** Session start time for tracking UI session duration */ private _uiSessionStartTime?; /** Constructor */ constructor(); /** Content */ html: () => string; /** On create */ onCreate(): void; private _lastLogo?; private _lastBackground?; private _lastTextColor?; private _lastDisplayMode?; private _lastLayout?; private _lastPersonaName?; private _lastHeaderLogo?; private _lastPositioningMode?; private _lastPositionX?; private _lastPositionY?; private _lastOffsetX?; private _lastOffsetY?; /** Apply persona-based color variants as CSS variables */ private applyPersonaColorVariants; /** Parse a color string to RGB (supports hex and rgb/rgba) */ private parseColorToRGB; /** Apply UI styles from config and attributes, prioritizing attributes */ private applyConfigStylesAndAttributes; /** Called on update */ onUpdate(): void; /** Called when the component is created */ onDestroy(): void; /** Called when the container is clicked */ onContainerClick(e: Event): void; /** Called when the logo is clicked */ onLogoClick(e: Event): void; /** Open the interaction panel */ open(): void; /** Close the interaction panel */ close(): void; /** Reset conversation UI */ resetConversation(): void; /** True if busy processing */ private _isProcessing; /** The element which will receive the current message from the AI */ currentOutputElement?: HTMLElement; /** Process input text from the user */ processInput(inputText: string): Promise; /** Called when the AI responds with some text */ onAIMessage(messages: TokenWindowGroupItemParams[], isPartial: boolean): Promise; /** Updates a text element */ updateTextElement(elementID: string, text: string): void; /** Updates an info block element */ updateInfoElement(elementID: string, text: string, iconType: string): void; /** Called when a suggestion button is clicked */ onSuggestionClick(e: Event, suggestion: string): void; /** Called when an LLM model is selected */ onLLMModelSelect(e: CustomEvent): void; /** Called when the content area is scrolled */ onContentScroll(e: Event): void; /** Displays whitelabeled branding for known apps */ private updateBrandingFor; } /** Utility to trim whitespace from blocks of text */ declare function trimWhitespaceInText(text: string): string; /** Get the global object */ declare function intelliweaveGlobalThis(): any; /** * Global type declarations for the IntelliWeave library */ interface IntelliWeaveGlobalConfig { apiKey?: string; analytics?: boolean; debug?: boolean; context?: string; introductionMessage?: string; introductionSuggestions?: string[]; knowledgeBase?: KnowledgeBaseItem[]; sources?: any[]; offsetX?: number; offsetY?: number; userID?: string; hubAPI?: string; backgroundColor?: string; /** Display mode: 'closed' (default - starts minimized) or 'open' (always open) */ displayMode?: 'closed' | 'open'; /** Layout preset: 'widget' (default) or 'fullscreen' */ layout?: 'widget' | 'fullscreen'; /** Positioning mode: 'fixed' (default - floats on page) or 'container' (fills parent container) */ positioningMode?: 'fixed' | 'container'; /** Horizontal position: 'left' or 'right' (default: 'right') - only used when positioningMode is 'fixed' */ positionX?: 'left' | 'right'; /** Vertical position: 'top' or 'bottom' (default: 'bottom') - only used when positioningMode is 'fixed' */ positionY?: 'top' | 'bottom'; /** Extra data that will be passed to external knowledge base actions. */ extra: any; /** @deprecated Override the AI system context prefix. This should be controlled from the Hub now. */ pageSummary?: string | (() => string) | (() => Promise); /** List of knowledge base sources */ knowledgeBaseSources?: KnowledgeBaseSource[]; /** When the built-in UI is used, this contains the instance being rendered on the page */ embed?: WebWeaverEmbed; } /** Get the global IntelliWeave configuration object that has been created for this document, or create it if necessary. */ declare function intelliweaveConfig(): IntelliWeaveGlobalConfig; /** An async generator which yields events from an SSE fetch request body (a readable stream) */ declare function sseEvents(stream: ReadableStream): AsyncGenerator; /** Get or create a user ID for this device. */ declare function getDefaultUserID(): string; /** Convert an IntelliWeave parameter list to JSON schema. Does not modify if it's already a JSON schema. */ declare function convertParamsToJSONSchema(params: KnowledgeBaseActionParameterSchema): JSONSchema7; /** OpenRouter message extensions */ interface OpenRouterMessage extends OpenAI.Chat.ChatCompletionAssistantMessageParam { reasoning?: string; } /** OpenAI message format */ type DataType$1 = OpenRouterMessage | OpenAI.Chat.Completions.ChatCompletionMessageParam; /** * API for interacting with Anthropic APIs. */ declare class ChatGPT extends ChatBase { /** Add a user message to the message history */ addUserMessage(message: string): void; /** Add an assistant message to the message history */ addAssistantMessage(message: string): void; /** Create the OpenAI client */ protected createOpenAIClient(): OpenAI; /** Send a message, and get the response string. */ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams[]) => void): Promise[]>; /** Parse a message block into our format */ protected parseMessageBlock(messageID: string, message: OpenRouterMessage, usage: OpenAI.Completions.CompletionUsage | undefined, isPartial: boolean): TokenWindowGroupItemParams; /** Trim message list */ trimMessages(): Promise; } /** Anthropic message format */ type DataType = Anthropic.Messages.MessageParam; /** * API for interacting with Anthropic APIs. */ declare class AnthropicChat extends ChatBase { /** Add a user message to the message history */ addUserMessage(message: string): void; /** Add an assistant message to the message history */ addAssistantMessage(message: string): void; /** Send a message, and get the response string. */ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams[]) => void): Promise[]>; /** Parse a message block into our format */ protected parseMessageBlock(message: Anthropic.Messages.Message, isPartial: boolean): TokenWindowGroupItemParams; /** Trim message list */ trimMessages(): Promise; } /** Class to help with logging */ declare class Logging { /** Current module */ module: string; /** Constructor */ constructor(moduleName: string); /** Enable debug logging */ static debug: boolean; /** Check if verbose logging is enabled */ get debugEnabled(): boolean | undefined; /** Log a message */ log(...args: any[]): void; /** Debug message */ debug(...args: any[]): void; /** Info message message */ info(...args: any[]): void; /** Warning message */ warn(...args: any[]): void; /** Error message */ error(...args: any[]): void; /** Create a timer task, returns a function to log events in this timer */ timer(name: string, ...args: any[]): (...args: any[]) => void; } export { AnthropicChat, type BufferType, BufferedWebSocket, type BuiltInActionFlags, ChatBase, type ChatBaseConfig, type ChatBaseToolConfig, ChatGPT, FixedBufferStream, IntelliWeave, type IntelliWeaveConfig, type IntelliWeaveGlobalConfig, IntelliWeaveMessageParser, type IntelliWeaveParameterDefinition, KnowledgeBase, type KnowledgeBaseActionParameterSchema, type KnowledgeBaseItem, type KnowledgeBaseItemAttachment, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, type KnowledgeFetcher, Logging, MCPKnowledgeClient, ONNXModel, type ONNXTensors, Resampler, type SupportedArrayBuffers, TokenWindow, TokenWindowGroup, type TokenWindowGroupItem, type TokenWindowGroupItemParams, type TokenWindowGroupItemSection, TokenWindowGroupItemSectionType, type WebWeaverGPTConfig, audioToWav, convertParamsToJSONSchema, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, trimWhitespaceInText };