import { APIResource } from "../../core/resource.js"; import * as ChatAPI from "./chat.js"; import { APIPromise } from "../../core/api-promise.js"; import { RequestOptions } from "../../internal/request-options.js"; export declare class Completions extends APIResource { /** * Interact with language models via OpenAI's chat completions API */ create(body: CompletionCreateParams, options?: RequestOptions): APIPromise; } export interface CompletionCreateParams { frequency_penalty?: number; /** * LogitBias is must be a token id string (specified by their token ID in the * tokenizer), not a word string. incorrect: `"logit_bias":{"You": 6}`, correct: * `"logit_bias":{"1639": 6}` refs: * https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias */ logit_bias?: { [key: string]: number; }; /** * LogProbs indicates whether to return log probabilities of the output tokens or * not. If true, returns the log probabilities of each output token returned in the * content of message. This option is currently not available on the * gpt-4-vision-preview model. */ logprobs?: boolean; max_tokens?: number; messages?: Array; model?: string; n?: number; /** * Disable the default behavior of parallel tool calls by setting it: false. */ parallel_tool_calls?: boolean; presence_penalty?: number; response_format?: CompletionCreateParams.ResponseFormat; seed?: number; stop?: Array; stream?: boolean; /** * Options for streaming response. Only set this when you set stream: true. */ stream_options?: CompletionCreateParams.StreamOptions; temperature?: number; /** * This can be either a string or an ToolChoice object. */ tool_choice?: unknown; tools?: unknown; /** * TopLogProbs is an integer between 0 and 5 specifying the number of most likely * tokens to return at each token position, each with an associated log * probability. logprobs must be set to true if this parameter is used. */ top_logprobs?: number; top_p?: number; user?: string; } export declare namespace CompletionCreateParams { interface ResponseFormat { type?: 'json_object' | 'text'; } /** * Options for streaming response. Only set this when you set stream: true. */ interface StreamOptions { /** * If set, an additional chunk will be streamed before the data: [DONE] message. * The usage field on this chunk shows the token usage statistics for the entire * request, and the choices field will always be an empty array. All other chunks * will also include a usage field, but with a null value. */ include_usage?: boolean; } } export declare namespace Completions { export { type CompletionCreateParams as CompletionCreateParams }; } //# sourceMappingURL=completions.d.ts.map