import { z, JSONSchema7, ActionContext, Action, ActionRunOptions, DetachedAction, StreamingCallback, GenkitError, Operation } from '@genkit-ai/core'; import { Registry } from '@genkit-ai/core/registry'; import { b as DocumentData, l as ToolRequestPart, m as ToolResponsePart } from './document-Batw8a-E.js'; import { GenerateResponseChunk } from './generate/chunk.js'; import { GenerateResponse } from './generate/response.js'; import { M as ModelArgument, c as ModelMiddleware } from './model-B2GL_8eB.js'; import { DynamicResourceAction } from './resource.js'; import { Part, MessageData, GenerateRequestSchema, GenerateResponseSchema, GenerateResponseChunkSchema, ToolDefinition, GenerationCommonConfigSchema, GenerateRequest, GenerateResponseData, GenerateActionOptions } from './model-types.js'; /** * Copyright 2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Prompt action. */ type PromptAction = Action & { __action: { metadata: { type: 'prompt'; }; }; __executablePrompt: ExecutablePrompt; }; declare function isPromptAction(action: Action): action is PromptAction; /** * Prompt action. */ type ExecutablePromptAction = Action & { __action: { metadata: { type: 'executablePrompt'; }; }; __executablePrompt: ExecutablePrompt; }; /** * Configuration for a prompt action. */ interface PromptConfig { name: string; variant?: string; model?: ModelArgument; config?: z.infer; description?: string; input?: { schema?: I; jsonSchema?: JSONSchema7; }; system?: string | Part | Part[] | PartsResolver>; prompt?: string | Part | Part[] | PartsResolver>; messages?: string | MessageData[] | MessagesResolver>; docs?: DocumentData[] | DocsResolver>; output?: OutputOptions; maxTurns?: number; returnToolRequests?: boolean; metadata?: Record; tools?: ToolArgument[]; toolChoice?: ToolChoice; use?: ModelMiddleware[]; context?: ActionContext; } /** * Generate options of a prompt. */ type PromptGenerateOptions = Omit, 'prompt' | 'system'>; /** * A prompt that can be executed as a function. */ interface ExecutablePrompt { /** * Generates a response by rendering the prompt template with given user input and then calling the model. * * @param input Prompt inputs. * @param opt Options for the prompt template, including user input variables and custom model configuration options. * @returns the model response as a promise of `GenerateStreamResponse`. */ (input?: I, opts?: PromptGenerateOptions): Promise>>; /** * Generates a response by rendering the prompt template with given user input and then calling the model. * @param input Prompt inputs. * @param opt Options for the prompt template, including user input variables and custom model configuration options. * @returns the model response as a promise of `GenerateStreamResponse`. */ stream(input?: I, opts?: PromptGenerateOptions): GenerateStreamResponse>; /** * Renders the prompt template based on user input. * * @param opt Options for the prompt template, including user input variables and custom model configuration options. * @returns a `GenerateOptions` object to be used with the `generate()` function from @genkit-ai/ai. */ render(input?: I, opts?: PromptGenerateOptions): Promise>; /** * Returns the prompt usable as a tool. */ asTool(): Promise; } type PartsResolver = (input: I, options: { state?: S; context: ActionContext; }) => Part[] | Promise; type MessagesResolver = (input: I, options: { history?: MessageData[]; state?: S; context: ActionContext; }) => MessageData[] | Promise; type DocsResolver = (input: I, options: { context: ActionContext; state?: S; }) => DocumentData[] | Promise; /** * Defines a prompt which can be used to generate content or render a request. * * @returns The new `ExecutablePrompt`. */ declare function definePrompt(registry: Registry, options: PromptConfig): ExecutablePrompt, O, CustomOptions>; /** * Checks whether the provided object is an executable prompt. */ declare function isExecutablePrompt(obj: any): obj is ExecutablePrompt; declare function loadPromptFolder(registry: Registry, dir: string | undefined, ns: string): void; declare function loadPromptFolderRecursively(registry: Registry, dir: string, ns: string, subDir: string): void; declare function definePartial(registry: Registry, name: string, source: string): void; declare function defineHelper(registry: Registry, name: string, fn: Handlebars.HelperDelegate): void; declare function prompt(registry: Registry, name: string, options?: { variant?: string; dir?: string; }): Promise>; /** * Copyright 2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ interface Resumable { /** * respond constructs a tool response corresponding to the provided interrupt tool request * using the provided reply data, validating it against the output schema of the tool if * it exists. * * @beta */ respond( /** The interrupt tool request to which you want to respond. */ interrupt: ToolRequestPart, /** * The data with which you want to respond. Must conform to a tool's output schema or an * interrupt's input schema. **/ outputData: z.infer, options?: { metadata?: Record; }): ToolResponsePart; /** * restart constructs a tool request corresponding to the provided interrupt tool request * that will then re-trigger the tool after e.g. a user confirms. The `resumedMetadata` * supplied to this method will be passed to the tool to allow for custom handling of * restart logic. * * @param interrupt The interrupt tool request you want to restart. * @param resumedMetadata The metadata you want to provide to the tool to aide in reprocessing. Defaults to `true` if none is supplied. * @param options Additional options for restarting the tool. * * @beta */ restart(interrupt: ToolRequestPart, resumedMetadata?: any, options?: { /** * Replace the existing input arguments to the tool with different ones, for example * if the user revised an action before confirming. When input is replaced, the existing * tool request will be amended in the message history. **/ replaceInput?: z.infer; }): ToolRequestPart; } /** * An action with a `tool` type. */ type ToolAction = Action & Resumable & { __action: { metadata: { type: 'tool'; }; }; }; /** * A dynamic action with a `tool` type. Dynamic tools are detached actions -- not associated with any registry. */ type DynamicToolAction = DetachedAction & Resumable & { __action: { metadata: { type: 'tool'; }; }; }; interface ToolRunOptions extends ActionRunOptions { /** * If resumed is supplied to a tool at runtime, that means that it was previously interrupted and this is a second * @beta **/ resumed?: boolean | Record; /** The metadata from the tool request that triggered this run. */ metadata?: Record; } /** * Configuration for a tool. */ interface ToolConfig { /** Unique name of the tool to use as a key in the registry. */ name: string; /** Description of the tool. This is passed to the model to help understand what the tool is used for. */ description: string; /** Input Zod schema. Mutually exclusive with `inputJsonSchema`. */ inputSchema?: I; /** Input JSON schema. Mutually exclusive with `inputSchema`. */ inputJsonSchema?: JSONSchema7; /** Output Zod schema. Mutually exclusive with `outputJsonSchema`. */ outputSchema?: O; /** Output JSON schema. Mutually exclusive with `outputSchema`. */ outputJsonSchema?: JSONSchema7; /** Metadata to be passed to the tool. */ metadata?: Record; } /** * A reference to a tool in the form of a name, definition, or the action itself. */ type ToolArgument = string | ToolAction | DynamicToolAction | Action | ExecutablePrompt; /** * Converts an action to a tool action by setting the appropriate metadata. */ declare function asTool(registry: Registry, action: Action): ToolAction; /** * Resolves a mix of various formats of tool references to a list of tool actions by looking them up in the registry. */ declare function resolveTools(registry: Registry, tools?: (ToolArgument | ToolDefinition)[]): Promise; declare function lookupToolByName(registry: Registry, name: string): Promise; /** * Converts a tool action to a definition of the tool to be passed to a model. */ declare function toToolDefinition(tool: Action): ToolDefinition; interface ToolFnOptions { /** * A function that can be called during tool execution that will result in the tool * getting interrupted (immediately) and tool request returned to the upstream caller. */ interrupt: (metadata?: Record) => never; context: ActionContext; } type ToolFn = (input: z.infer, ctx: ToolFnOptions & ToolRunOptions) => Promise>; /** * Defines a tool. * * A tool is an action that can be passed to a model to be called automatically if it so chooses. */ declare function defineTool(registry: Registry, config: ToolConfig, fn: ToolFn): ToolAction; /** InterruptConfig defines the options for configuring an interrupt. */ type InterruptConfig = ToolConfig & { /** requestMetadata adds additional `interrupt` metadata to the `toolRequest` generated by the interrupt */ requestMetadata?: Record | ((input: z.infer) => Record | Promise>); }; declare function isToolRequest(part: Part): part is ToolRequestPart; declare function isToolResponse(part: Part): part is ToolResponsePart; declare function isDynamicTool(t: unknown): t is DynamicToolAction; declare function defineInterrupt(registry: Registry, config: InterruptConfig): ToolAction; /** * Thrown when tools execution is interrupted. It's meant to be caugh by the framework, not public API. */ declare class ToolInterruptError extends Error { readonly metadata?: Record | undefined; constructor(metadata?: Record | undefined); } /** * Defines a dynamic tool. Dynamic tools are just like regular tools but will not be registered in the * Genkit registry and can be defined dynamically at runtime. */ declare function dynamicTool(config: ToolConfig, fn?: ToolFn): DynamicToolAction; /** * Copyright 2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** Specifies how tools should be called by the model. */ type ToolChoice = 'auto' | 'required' | 'none'; interface OutputOptions { format?: string; contentType?: string; instructions?: boolean | string; schema?: O; jsonSchema?: any; constrained?: boolean; } /** ResumeOptions configure how to resume generation after an interrupt. */ interface ResumeOptions { /** * respond should contain a single or list of `toolResponse` parts corresponding * to interrupt `toolRequest` parts from the most recent model message. Each * entry must have a matching `name` and `ref` (if supplied) for its `toolRequest` * counterpart. * * Tools have a `.respond` helper method to construct a reply ToolResponse and validate * the data against its schema. Call `myTool.respond(interruptToolRequest, yourReplyData)`. */ respond?: ToolResponsePart | ToolResponsePart[]; /** * restart will run a tool again with additionally supplied metadata passed through as * a `resumed` option in the second argument. This allows for scenarios like conditionally * requesting confirmation of an LLM's tool request. * * Tools have a `.restart` helper method to construct a restart ToolRequest. Call * `myTool.restart(interruptToolRequest, resumeMetadata)`. * */ restart?: ToolRequestPart | ToolRequestPart[]; /** Additional metadata to annotate the created tool message with in the "resume" key. */ metadata?: Record; } interface GenerateOptions { /** A model name (e.g. `vertexai/gemini-1.0-pro`) or reference. */ model?: ModelArgument; /** The system prompt to be included in the generate request. Can be a string for a simple text prompt or one or more parts for multi-modal prompts (subject to model support). */ system?: string | Part | Part[]; /** The prompt for which to generate a response. Can be a string for a simple text prompt or one or more parts for multi-modal prompts. */ prompt?: string | Part | Part[]; /** Retrieved documents to be used as context for this generation. */ docs?: DocumentData[]; /** Conversation messages (history) for multi-turn prompting when supported by the underlying model. */ messages?: (MessageData & { content: Part[] | string | (string | Part)[]; })[]; /** List of registered tool names or actions to treat as a tool for this generation if supported by the underlying model. */ tools?: ToolArgument[]; /** List of dynamic resources to be made available to this generate request. */ resources?: DynamicResourceAction[]; /** Specifies how tools should be called by the model. */ toolChoice?: ToolChoice; /** Configuration for the generation request. */ config?: z.infer; /** Configuration for the desired output of the request. Defaults to the model's default output if unspecified. */ output?: OutputOptions; /** * resume provides convenient capabilities for continuing generation * after an interrupt is triggered. Example: * * ```ts * const myInterrupt = ai.defineInterrupt({...}); * * const response = await ai.generate({ * tools: [myInterrupt], * prompt: "Call myInterrupt", * }); * * const interrupt = response.interrupts[0]; * * const resumedResponse = await ai.generate({ * messages: response.messages, * resume: myInterrupt.respond(interrupt, {note: "this is the reply data"}), * }); * ``` * * @beta */ resume?: ResumeOptions; /** When true, return tool calls for manual processing instead of automatically resolving them. */ returnToolRequests?: boolean; /** Maximum number of tool call iterations that can be performed in a single generate call (default 5). */ maxTurns?: number; /** When provided, models supporting streaming will call the provided callback with chunks as generation progresses. */ onChunk?: StreamingCallback; /** * When provided, models supporting streaming will call the provided callback with chunks as generation progresses. * * @deprecated use {@link onChunk} instead. */ streamingCallback?: StreamingCallback; /** Middleware to be used with this model call. */ use?: ModelMiddleware[]; /** Additional context (data, like e.g. auth) to be passed down to tools, prompts and other sub actions. */ context?: ActionContext; /** Abort signal for the generate request. */ abortSignal?: AbortSignal; } declare function toGenerateRequest(registry: Registry, options: GenerateOptions): Promise; declare class GenerationResponseError extends GenkitError { detail: { response: GenerateResponse; [otherDetails: string]: any; }; constructor(response: GenerateResponse, message: string, status?: GenkitError['status'], detail?: Record); } /** A GenerationBlockedError is thrown when a generation is blocked. */ declare class GenerationBlockedError extends GenerationResponseError { } /** * Generate calls a generative model based on the provided prompt and configuration. If * `history` is provided, the generation will include a conversation history in its * request. If `tools` are provided, the generate method will automatically resolve * tool calls returned from the model unless `returnToolRequests` is set to `true`. * * See `GenerateOptions` for detailed information about available options. * * @param options The options for this generation request. * @returns The generated response based on the provided parameters. */ declare function generate(registry: Registry, options: GenerateOptions | PromiseLike>): Promise>>; declare function generateOperation(registry: Registry, options: GenerateOptions | PromiseLike>): Promise>; declare function toGenerateActionOptions(registry: Registry, options: GenerateOptions): Promise; type GenerateStreamOptions = Omit, 'streamingCallback'>; interface GenerateStreamResponse { get stream(): AsyncIterable; get response(): Promise>; } declare function generateStream(registry: Registry, options: GenerateOptions | PromiseLike>): GenerateStreamResponse; declare function tagAsPreamble(msgs?: MessageData[]): MessageData[] | undefined; export { type ExecutablePromptAction as A, type PartsResolver as B, loadPromptFolderRecursively as C, type DocsResolver as D, type ExecutablePrompt as E, type Resumable as F, GenerationBlockedError as G, type DynamicToolAction as H, type InterruptConfig as I, resolveTools as J, lookupToolByName as K, toToolDefinition as L, type MessagesResolver as M, type ToolFnOptions as N, type OutputOptions as O, type PromptAction as P, type ToolFn as Q, type ResumeOptions as R, isToolRequest as S, type ToolChoice as T, isToolResponse as U, isDynamicTool as V, dynamicTool as W, toGenerateActionOptions as X, GenerationResponseError as a, generateOperation as b, generateStream as c, toGenerateRequest as d, type GenerateOptions as e, type GenerateStreamOptions as f, generate as g, type GenerateStreamResponse as h, defineHelper as i, definePartial as j, definePrompt as k, isExecutablePrompt as l, loadPromptFolder as m, type PromptConfig as n, type PromptGenerateOptions as o, prompt as p, ToolInterruptError as q, asTool as r, defineInterrupt as s, tagAsPreamble as t, defineTool as u, type ToolAction as v, type ToolArgument as w, type ToolConfig as x, type ToolRunOptions as y, isPromptAction as z };