import Cerebras from "@cerebras/cerebras_cloud_sdk"; import { AIMessageChunk, BaseMessage } from "@langchain/core/messages"; import { BaseChatModel, BaseChatModelCallOptions, BaseChatModelParams, BindToolsInput, LangSmithParams, ToolChoice } from "@langchain/core/language_models/chat_models"; import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs"; import { Runnable } from "@langchain/core/runnables"; import { InteropZodType } from "@langchain/core/utils/types"; import { SerializableSchema } from "@langchain/core/utils/standard_schema"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base"; //#region src/chat_models.d.ts /** * Input to chat model class. */ interface ChatCerebrasInput extends BaseChatModelParams { model: Cerebras.ChatCompletionCreateParams["model"]; apiKey?: string; streaming?: boolean; maxTokens?: number; maxCompletionTokens?: number; temperature?: number; topP?: number; seed?: number; timeout?: number; fetch?: (...args: any) => any; } interface ChatCerebrasCallOptions extends BaseChatModelCallOptions, Pick { tools?: BindToolsInput[]; tool_choice?: ToolChoice; user?: string; response_format?: Cerebras.ChatCompletionCreateParams["response_format"]; } /** * Cerebras chat model integration. * * Setup: * Install `@langchain/cerebras` and set an environment variable named `CEREBRAS_API_KEY`. * * ```bash * npm install @langchain/cerebras * export CEREBRAS_API_KEY="your-api-key" * ``` * * ## [Constructor args](https://api.js.langchain.com/classes/langchain_cerebras.ChatCerebras.html#constructor) * * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_cerebras.ChatCerebrasCallOptions.html) * * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below: * * ```typescript * // When calling `.withConfig`, call options should be passed via the first argument * const llmWithArgsBound = llm.withConfig({ * stop: ["\n"], * tools: [...], * }); * * // When calling `.bindTools`, call options should be passed via the second argument * const llmWithTools = llm.bindTools( * [...], * { * tool_choice: "auto", * } * ); * ``` * * ## Examples * *
* Instantiate * * ```typescript * import { ChatCerebras } from '@langchain/cerebras'; * * const llm = new ChatCerebras({ * model: "llama-3.3-70b", * temperature: 0, * // other params... * }); * ``` *
* *
* *
* Invoking * * ```typescript * const input = `Translate "I love programming" into French.`; * * // Models also accept a list of chat messages or a formatted prompt * const result = await llm.invoke(input); * console.log(result); * ``` * * ```txt * AIMessage { * "id": "run-9281952d-d4c5-424c-9c18-c6ad62dd6684", * "content": "J'adore la programmation.", * "additional_kwargs": {}, * "response_metadata": { * "id": "chatcmpl-bb411272-aac5-44a5-b793-ae70bd94fd3d", * "created": 1735784442, * "model": "llama-3.3-70b", * "system_fingerprint": "fp_2e2a2a083c", * "object": "chat.completion", * "time_info": { * "queue_time": 0.000096069, * "prompt_time": 0.002166527, * "completion_time": 0.012331633, * "total_time": 0.01629185676574707, * "created": 1735784442 * } * }, * "tool_calls": [], * "invalid_tool_calls": [], * "usage_metadata": { * "input_tokens": 55, * "output_tokens": 9, * "total_tokens": 64 * } * } * ``` *
* *
* *
* Streaming Chunks * * ```typescript * for await (const chunk of await llm.stream(input)) { * console.log(chunk); * } * ``` * * ```txt * AIMessageChunk { * "id": "run-1756a5b2-2ce0-47a9-81e0-2195bf893bd4", * "content": "", * "additional_kwargs": {}, * "response_metadata": { * "created": 1735785346, * "object": "chat.completion.chunk" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [], * "usage_metadata": {} * } * AIMessageChunk { * "id": "run-1756a5b2-2ce0-47a9-81e0-2195bf893bd4", * "content": "J", * "additional_kwargs": {}, * "response_metadata": { * "created": 1735785346, * "object": "chat.completion.chunk" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [], * "usage_metadata": {} * } * AIMessageChunk { * "id": "run-1756a5b2-2ce0-47a9-81e0-2195bf893bd4", * "content": "'", * "additional_kwargs": {}, * "response_metadata": { * "created": 1735785346, * "object": "chat.completion.chunk" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [], * "usage_metadata": {} * } * AIMessageChunk { * "id": "run-1756a5b2-2ce0-47a9-81e0-2195bf893bd4", * "content": "ad", * "additional_kwargs": {}, * "response_metadata": { * "created": 1735785346, * "object": "chat.completion.chunk" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [], * "usage_metadata": {} * } * AIMessageChunk { * "id": "run-1756a5b2-2ce0-47a9-81e0-2195bf893bd4", * "content": "ore", * "additional_kwargs": {}, * "response_metadata": { * "created": 1735785346, * "object": "chat.completion.chunk" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [], * "usage_metadata": {} * } * AIMessageChunk { * "id": "run-1756a5b2-2ce0-47a9-81e0-2195bf893bd4", * "content": " la", * "additional_kwargs": {}, * "response_metadata": { * "created": 1735785346, * "object": "chat.completion.chunk" * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [], * "usage_metadata": {} * } * ... * AIMessageChunk { * "id": "run-1756a5b2-2ce0-47a9-81e0-2195bf893bd4", * "content": "", * "additional_kwargs": {}, * "response_metadata": { * "finish_reason": "stop", * "id": "chatcmpl-15c80082-4475-423c-b140-7b0a556311ca", * "system_fingerprint": "fp_2e2a2a083c", * "model": "llama-3.3-70b", * "created": 1735785346, * "object": "chat.completion.chunk", * "time_info": { * "queue_time": 0.000100589, * "prompt_time": 0.002167348, * "completion_time": 0.012320277, * "total_time": 0.0169985294342041, * "created": 1735785346 * } * }, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [], * "usage_metadata": { * "input_tokens": 55, * "output_tokens": 9, * "total_tokens": 64 * } * } * ``` *
* *
* *
* Aggregate Streamed Chunks * * ```typescript * import { AIMessageChunk } from '@langchain/core/messages'; * import { concat } from '@langchain/core/utils/stream'; * * const stream = await llm.stream(input); * let full: AIMessageChunk | undefined; * for await (const chunk of stream) { * full = !full ? chunk : concat(full, chunk); * } * console.log(full); * ``` * * ```txt * AIMessageChunk { * "content": "J'adore la programmation.", * "additional_kwargs": {}, * "tool_calls": [], * "tool_call_chunks": [], * "invalid_tool_calls": [] * } * ``` *
* *
* *
* Bind tools * * ```typescript * import { z } from 'zod'; * * const llmForToolCalling = new ChatCerebras({ * model: "llama-3.3-70b", * temperature: 0, * // other params... * }); * * const GetWeather = { * name: "GetWeather", * description: "Get the current weather in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const GetPopulation = { * name: "GetPopulation", * description: "Get the current population in a given location", * schema: z.object({ * location: z.string().describe("The city and state, e.g. San Francisco, CA") * }), * } * * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]); * const aiMsg = await llmWithTools.invoke( * "Which city is hotter today and which is bigger: LA or NY?" * ); * console.log(aiMsg.tool_calls); * ``` * * ```txt * [ * { * name: 'GetWeather', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_cd34' * }, * { * name: 'GetWeather', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_68rf' * }, * { * name: 'GetPopulation', * args: { location: 'Los Angeles, CA' }, * type: 'tool_call', * id: 'call_f81z' * }, * { * name: 'GetPopulation', * args: { location: 'New York, NY' }, * type: 'tool_call', * id: 'call_8byt' * } * ] * ``` *
* *
* *
* Structured Output * * ```typescript * import { z } from 'zod'; * * const Joke = z.object({ * setup: z.string().describe("The setup of the joke"), * punchline: z.string().describe("The punchline to the joke"), * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") * }).describe('Joke to tell user.'); * * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: "Joke" }); * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); * console.log(jokeResult); * ``` * * ```txt * { * setup: "Why don't cats play poker in the wild?", * punchline: 'Because there are too many cheetahs.' * } * ``` *
* *
*/ declare class ChatCerebras extends BaseChatModel implements ChatCerebrasInput { static lc_name(): string; lc_serializable: boolean; get lc_secrets(): { [key: string]: string; } | undefined; get lc_aliases(): { [key: string]: string; } | undefined; getLsParams(options: this["ParsedCallOptions"]): LangSmithParams; client: Cerebras; model: string; maxCompletionTokens?: number; temperature?: number; topP?: number; seed?: number; streaming?: boolean; constructor(model: Cerebras.ChatCompletionCreateParams["model"], fields?: Omit); constructor(fields?: ChatCerebrasInput); _llmType(): string; bindTools(tools: BindToolsInput[], kwargs?: Partial): Runnable; /** * A method that returns the parameters for an Ollama API call. It * includes model and options parameters. * @param options Optional parsed call options. * @returns An object containing the parameters for an Ollama API call. */ invocationParams(options?: this["ParsedCallOptions"]): Omit; _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise; /** * Implement to support streaming. * Should yield chunks iteratively. */ _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator; withStructuredOutput = Record>(outputSchema: InteropZodType | SerializableSchema | Record, config?: StructuredOutputMethodOptions): Runnable; withStructuredOutput = Record>(outputSchema: InteropZodType | SerializableSchema | Record, config?: StructuredOutputMethodOptions): Runnable; } //#endregion export { ChatCerebras, ChatCerebrasCallOptions, ChatCerebrasInput }; //# sourceMappingURL=chat_models.d.ts.map