import { APIResource } from "../../core/resource.js"; import * as Shared from "../shared.js"; import * as CompletionsAPI from "./completions.js"; import { CompletionCreateParams, Completions } from "./completions.js"; export declare class Chat extends APIResource { completions: CompletionsAPI.Completions; } export interface ChatMessage { /** * The content of the message. */ content: string; /** * The role of the author of this message. One of system, user, tool, or assistant. */ role: string; /** * tool Name */ name?: string; /** * tool_call_id */ tool_call_id?: string; /** * tool calls if any */ tool_calls?: Array; } export declare namespace ChatMessage { interface ToolCall { id?: string; function?: ToolCall.Function; type?: 'function'; } namespace ToolCall { interface Function { arguments?: string; name?: string; } } } export interface ChatRequest { frequency_penalty?: number; /** * LogitBias is must be a token id string (specified by their token ID in the * tokenizer), not a word string. incorrect: `"logit_bias":{"You": 6}`, correct: * `"logit_bias":{"1639": 6}` refs: * https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias */ logit_bias?: { [key: string]: number; }; /** * LogProbs indicates whether to return log probabilities of the output tokens or * not. If true, returns the log probabilities of each output token returned in the * content of message. This option is currently not available on the * gpt-4-vision-preview model. */ logprobs?: boolean; max_tokens?: number; messages?: Array; model?: string; n?: number; /** * Disable the default behavior of parallel tool calls by setting it: false. */ parallel_tool_calls?: boolean; presence_penalty?: number; response_format?: ChatRequest.ResponseFormat; seed?: number; stop?: Array; stream?: boolean; /** * Options for streaming response. Only set this when you set stream: true. */ stream_options?: ChatRequest.StreamOptions; temperature?: number; /** * This can be either a string or an ToolChoice object. */ tool_choice?: unknown; tools?: unknown; /** * TopLogProbs is an integer between 0 and 5 specifying the number of most likely * tokens to return at each token position, each with an associated log * probability. logprobs must be set to true if this parameter is used. */ top_logprobs?: number; top_p?: number; user?: string; } export declare namespace ChatRequest { interface ResponseFormat { type?: 'json_object' | 'text'; } /** * Options for streaming response. Only set this when you set stream: true. */ interface StreamOptions { /** * If set, an additional chunk will be streamed before the data: [DONE] message. * The usage field on this chunk shows the token usage statistics for the entire * request, and the choices field will always be an empty array. All other chunks * will also include a usage field, but with a null value. */ include_usage?: boolean; } } export interface ChatResponse { id?: string; choices?: Array; created?: number; model?: string; object?: string; system_fingerprint?: string; usage?: Usage; } export interface Choice { finish_reason?: string; index?: number; logprobs?: unknown; message?: ChatMessage; tool_authorizations?: Array; tool_messages?: Array; } export interface Usage { completion_tokens?: number; prompt_tokens?: number; total_tokens?: number; } export declare namespace Chat { export { type ChatMessage as ChatMessage, type ChatRequest as ChatRequest, type ChatResponse as ChatResponse, type Choice as Choice, type Usage as Usage, }; export { Completions as Completions, type CompletionCreateParams as CompletionCreateParams }; } //# sourceMappingURL=chat.d.ts.map