/** * Copyright 2025 Vybestack LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { type IModel } from '../IModel.js'; import { type IContent } from '../../services/history/IContent.js'; import { type IProviderConfig } from '../types/IProviderConfig.js'; import { BaseProvider, type NormalizedGenerateChatOptions } from '../BaseProvider.js'; import type { ToolFormat } from '../../tools/IToolFormatter.js'; import type { OAuthManager } from '../../auth/precedence.js'; export declare class OpenAIResponsesProvider extends BaseProvider { private logger; private _isCodexMode; constructor(apiKey: string | undefined, baseURL?: string, config?: IProviderConfig, oauthManager?: OAuthManager); /** * OAuth is supported in Codex mode * Check baseURL directly to avoid timing issues with instance properties * @plan PLAN-20251213-ISSUE160.P03 */ protected supportsOAuth(): boolean; /** * Detect if provider is in Codex mode based on baseURL * @plan PLAN-20251213-ISSUE160.P03 */ private isCodexMode; /** * @plan PLAN-20251215-issue813 * @requirement REQ-RETRY-001: OpenAIResponsesProvider must use retryWithBackoff for all fetch calls * * Determines if an error should trigger a retry. * - 429 (rate limit) errors are retried * - 5xx server errors are retried * - 400 (bad request) errors are NOT retried * - Network transient errors are retried */ private shouldRetryOnError; /** * Get account_id from Codex OAuth token * @plan PLAN-20251213-ISSUE160.P03 */ private getCodexAccountId; getToolFormat(): ToolFormat; getModels(): Promise; /** * Get Codex models * * Note: The Codex /models endpoint is protected by Cloudflare bot detection * which blocks automated requests (even with proper auth headers). * The /responses endpoint works fine, but /models returns a Cloudflare challenge. * Therefore, we use a hardcoded list based on codex-rs/core/tests/suite/list_models.rs * * @plan PLAN-20251214-ISSUE160.P06 */ private getCodexModels; getCurrentModel(): string; getDefaultModel(): string; setConfig(config: IProviderConfig): void; /** * OpenAI Responses API always requires payment (API key) */ isPaidMode(): boolean; clearState(): void; /** * Generate a unique synthetic call ID to avoid collisions. * @issue #966 */ private generateSyntheticCallId; /** * Inject a synthetic tool call/result pair that makes GPT think it already read AGENTS.md. * * The CODEX_SYSTEM_PROMPT instructs GPT to read AGENTS.md for project instructions. * However, the user may have configured LLXPRT.md instead (or both), and sometimes * AGENTS.md is deliberately reserved for a different agent (like Codex itself). * * This method: * 1. Always claims to have read "AGENTS.md" in the synthetic function call * 2. Returns the actual userMemory content (from LLXPRT.md, AGENTS.md, or both) * 3. Prevents GPT from wasting a tool call trying to read AGENTS.md * * @issue #966 */ private injectSyntheticConfigFileRead; /** * Get the list of server tools supported by this provider */ getServerTools(): string[]; /** * Invoke a server tool (native provider tool) */ invokeServerTool(_toolName: string, _params: unknown, _config?: unknown, _signal?: AbortSignal): Promise; /** * Get current model parameters */ getModelParams(): Record | undefined; /** * Check if the provider is authenticated using any available method */ isAuthenticated(): Promise; /** * @plan PLAN-20251023-STATELESS-HARDENING.P08 * @requirement REQ-SP4-002/REQ-SP4-003 * Refactored to remove constructor-captured config and global state, sourcing all per-call data from normalized options */ protected generateChatCompletionWithOptions(options: NormalizedGenerateChatOptions): AsyncIterableIterator; }