/** * Copyright 2025 Vybestack LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { type IContent } from '../../services/history/IContent.js'; import { type IProviderConfig } from '../types/IProviderConfig.js'; import { BaseProvider, type NormalizedGenerateChatOptions } from '../BaseProvider.js'; import { type OAuthManager } from '../../auth/precedence.js'; import { type IModel } from '../IModel.js'; import { type IProvider } from '../IProvider.js'; /** * Vercel OpenAI-based provider using AI SDK v5. * * NOTE: * - No dependency on the official `openai` SDK. * - Uses `openai.chat(modelId)` to talk to the Chat Completions API. * - Tools are configured via AI SDK `tool()` with JSON schema input. */ export declare class OpenAIVercelProvider extends BaseProvider implements IProvider { private getLogger; /** * @plan:PLAN-20251023-STATELESS-HARDENING.P08 * @requirement:REQ-SP4-003 * Constructor reduced to minimal initialization - no state captured. */ constructor(apiKey: string | undefined, baseURL?: string, config?: IProviderConfig, oauthManager?: OAuthManager); protected supportsOAuth(): boolean; /** * Create an OpenAI provider instance for this call using AI SDK v5. * * Uses the resolved runtime auth token and baseURL, and still allows * local endpoints without authentication (for Ollama-style servers). * * @param options - Normalized generate chat options * @param customFetch - Optional custom fetch function for intercepting responses */ private createOpenAIClient; /** * Extract model parameters from normalized options instead of settings service. * This mirrors OpenAIProvider but feeds AI SDK call options instead. */ private extractModelParamsFromOptions; private getAiJsonSchema; private getAiTool; /** * Normalize tool IDs from various formats to OpenAI-style format. * Kept for compatibility with existing history/tool logic. */ /** * Convert internal history IContent[] to AI SDK ModelMessage[]. * * This implementation uses textual tool replay for past tool calls/results. * New tool calls in the current response still use structured ToolCallBlocks. * * For Kimi K2 models, uses ToolIdStrategy to generate proper tool IDs * in the format functions.{name}:{index} instead of call_xxx. */ private convertToModelMessages; /** * Build an AI SDK ToolSet from already-normalized OpenAI-style tool definitions. * * Input is the array produced by convertToolsToOpenAIVercel(). */ private buildVercelTools; private mapUsageToMetadata; /** * Extract thinking content from , , or tags * and return it as a ThinkingBlock. Returns null if no thinking tags found. * * This must be called BEFORE sanitizeText which strips these tags. * * Handles two formats: * 1. Standard: Full thinking paragraph here... * 2. Fragmented (Synthetic API): wordword... * * For fragmented format, joins with spaces. For standard, joins with newlines. */ /** * Core chat completion implementation using AI SDK v5. * * This replaces the original OpenAI SDK v5 client usage with: * - createOpenAI({ apiKey, baseURL }) * - openai.chat(modelId) * - generateText / streamText */ protected generateChatCompletionWithOptions(options: NormalizedGenerateChatOptions): AsyncIterableIterator; /** * Models listing – uses HTTP GET /models via fetch instead of the OpenAI SDK. * Falls back to a small static list if the request fails. */ getModels(): Promise; private getFallbackModels; getDefaultModel(): string; getCurrentModel(): string; clearClientCache(runtimeKey?: string): void; clearState(): void; getServerTools(): string[]; invokeServerTool(toolName: string, _params: unknown, _config?: unknown, _signal?: AbortSignal): Promise; getToolFormat(): string; /** * Detects the tool call format based on the model being used. * Mirrors OpenAIProvider behavior so existing ToolFormatter logic works. */ parseToolResponse(response: unknown): unknown; /** * Disallow memoization of model params to preserve stateless behavior. */ setModelParams(_params: Record | undefined): void; /** * Gets model parameters from SettingsService per call (stateless). * @plan PLAN-20260126-SETTINGS-SEPARATION.P09 * Now uses invocation.modelParams instead of filtering SettingsService */ getModelParams(): Record | undefined; /** * Determines whether a response should be retried based on error codes. * * This is retained for compatibility with existing retryWithBackoff * callers, even though AI SDK's generateText/streamText have their * own built-in retry logic. */ shouldRetryResponse(error: unknown): boolean; }